diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 385c8abd2..000000000 --- a/.dockerignore +++ /dev/null @@ -1,5 +0,0 @@ -**/*.sw* -.tox -.git -**/__pycache__ -.pipenv diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index cd7d8db33..000000000 --- a/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - -This is a: **FILL ME IN** - - -## Details - - -## Related Issues -- [ ] I have searched this repository/Pi-hole forums for existing issues and pull requests that look similar - - - - - -## How to reproduce the issue - -1. Environment data - * Operating System: **ENTER HERE** - * Hardware: - * Kernel Architecture: - * Docker Install Info and version: - - Software source: - - Supplimentary Software: - * Hardware architecture: - -2. docker-compose.yml contents, docker run shell command, or paste a screenshot of any UI based configuration of containers here -3. any additional info to help reproduce - - -## These common fixes didn't work for my issue - -- [ ] I have tried removing/destroying my container, and re-creating a new container -- [ ] I have tried fresh volume data by backing up and moving/removing the old volume data -- [ ] I have tried running the stock `docker run` example(s) in the readme (removing any customizations I added) -- [ ] I have tried a newer or older version of Docker Pi-hole (depending what version the issue started in for me) -- [ ] I have tried running without my volume data mounts to eliminate volumes as the cause - -If the above debugging / fixes revealed any new information note it here. -Add any other debugging steps you've taken or theories on root cause that may help. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 964513e0a..000000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,26 +0,0 @@ - - -## Description - - -## Motivation and Context - - - -## How Has This Been Tested? - - - - -## Types of changes - -- [ ] Bug fix (non-breaking change which fixes an issue) -- [ ] New feature (non-breaking change which adds functionality) -- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) - -## Checklist: - - -- [ ] My code follows the code style of this project. -- [ ] My change requires a change to the documentation. -- [ ] I have updated the documentation accordingly. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 000000000..5094281ba --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,174 @@ +name: build + +on: + push: + branches: + - master + - features/* + - releases/* + - development + tags: + - v* + +# For GitHub-hosted runners, each job in a workflow runs in a fresh instance of +# a virtual environment. +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: checkout repo + uses: actions/checkout@2036a08e25fa78bbd946711a407b529a0a1204bf #v2.3.2 + + - name: setup docker buildx + id: buildx + uses: crazy-max/ghaction-docker-buildx@bb77f35f7a82f54fcda51000ea4e4467825014fd #v3.3.0 + with: + qemu-version: latest + buildx-version: latest + + - name: setup cache + id: cache + uses: actions/cache@5ca27f25cb3a0babe750cad7e4fddd3e55f29e9a #v2.1.1 + with: + key: ${{ runner.os }}-buildx-${{ github.sha }} + path: /tmp/buildx-cache + restore-keys: ${{ runner.os }}-buildx- + + - name: configure build + id: config + env: + GITHUB_REPO: ${{ github.repository }} + run: | + if [[ $GITHUB_REF == refs/tags/v* ]]; then + # Tagged release + echo ::set-output name=version::${GITHUB_REF#refs/tags/} + elif [[ $GITHUB_REF == refs/heads/master ]]; then + # Master branch (next step is tag for release) + echo ::set-output name=version::latest + elif [[ $GITHUB_REF == refs/heads/releases/* ]]; then + # Release candidate branch (next step is merge into master) + echo ::set-output name=version::${GITHUB_REF#refs/heads/releases/}-rc + elif [[ $GITHUB_REF == refs/heads/development ]]; then + # Development branch (next step is fork or merge into release candidate branch) + echo ::set-output name=version::edge + elif [[ $GITHUB_REF == refs/heads/features/* ]]; then + # Feature branch (next step is to merge into development branch) + echo ::set-output name=version::edge-${GITHUB_REF#refs/heads/features/} + else + echo "\$GITHUB_REF ($GITHUB_REF) did not match supported patterns" + exit 1 + fi + + echo ::set-output name=date::$(date -u +'%Y-%m-%dT%H:%M:%SZ') + echo ::set-output name=gitsha::${GITHUB_SHA::8} + + # Temporary workaround (don't build linux/arm/v7) https://github.com/moby/moby/issues/41017 + echo ::set-output name=docker_platforms::linux/amd64,linux/arm/v6,linux/arm64 + echo ::set-output name=docker_image::${GITHUB_REPO##*/docker-} + + echo ::set-output name=pihole_core_version::$(grep "PIHOLE_CORE_VERSION=" Dockerfile | head -1 | grep -o "v.*") + echo ::set-output name=pihole_ftl_version::$( grep "PIHOLE_FTL_VERSION=" Dockerfile | head -1 | grep -o "v.*") + echo ::set-output name=pihole_web_version::$( grep "PIHOLE_WEB_VERSION=" Dockerfile | head -1 | grep -o "v.*") + + - name: show configuration + run: | + echo date: ${{ steps.config.outputs.date }} + echo gitsha: ${{ steps.config.outputs.gitsha }} + echo version: ${{ steps.config.outputs.version }} + echo docker_image: ${{ steps.config.outputs.docker_image }} + echo docker_platforms: ${{ steps.config.outputs.docker_platforms }} + echo pihole_ftl_version: ${{ steps.config.outputs.pihole_ftl_version }} + echo pihole_web_version: ${{ steps.config.outputs.pihole_web_version }} + echo pihole_core_version: ${{ steps.config.outputs.pihole_core_version }} + + - name: build docker image + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + run: | + docker buildx build \ + --output "type=image,push=false" \ + --cache-to "type=local,dest=/tmp/buildx-cache" \ + --cache-from "type=local,src=/tmp/buildx-cache" \ + --platform "${{ steps.config.outputs.docker_platforms }}" \ + --tag "$DOCKERHUB_USERNAME/${{ steps.config.outputs.docker_image }}:${{ steps.config.outputs.version }}" \ + . + + - name: cleanup + run: | + rm -f "$HOME/.docker/config.json" + + outputs: + date: ${{ steps.config.outputs.date }} + gitsha: ${{ steps.config.outputs.gitsha }} + version: ${{ steps.config.outputs.version }} + docker_image: ${{ steps.config.outputs.docker_image }} + docker_platforms: ${{ steps.config.outputs.docker_platforms }} + pihole_ftl_version: ${{ steps.config.outputs.pihole_ftl_version }} + pihole_web_version: ${{ steps.config.outputs.pihole_web_version }} + pihole_core_version: ${{ steps.config.outputs.pihole_core_version }} + + publish: + runs-on: ubuntu-latest + needs: build + steps: + - name: checkout repo + uses: actions/checkout@2036a08e25fa78bbd946711a407b529a0a1204bf #v2.3.2 + + - name: setup docker buildx + id: buildx + uses: crazy-max/ghaction-docker-buildx@bb77f35f7a82f54fcda51000ea4e4467825014fd #v3.3.0 + with: + qemu-version: latest + buildx-version: latest + + - name: setup cache + id: cache + uses: actions/cache@5ca27f25cb3a0babe750cad7e4fddd3e55f29e9a #v2.1.1 + with: + key: ${{ runner.os }}-buildx-${{ github.sha }} + path: /tmp/buildx-cache + restore-keys: ${{ runner.os }}-buildx- + + - name: docker hub login + if: ${{ success() }} + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSwORD }} + run: | + echo "$DOCKERHUB_PASSWORD" | docker login --username "$DOCKERHUB_USERNAME" --password-stdin + + - name: push docker image + if: ${{ success() }} + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + run: | + docker buildx build \ + --output "type=image,push=true" \ + --cache-from "type=local,src=/tmp/buildx-cache" \ + --platform "${{ needs.build.outputs.docker_platforms }}" \ + --tag "$DOCKERHUB_USERNAME/${{ needs.build.outputs.docker_image }}:${{ needs.build.outputs.version }}" \ + --label "org.opencontainers.image.created=${{ needs.build.outputs.date }}" \ + --label "org.opencontainers.image.revision=${{ needs.build.outputs.gitsha }}" \ + --label "org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --label "org.opencontainers.image.version=${{ needs.build.outputs.version }}" \ + --label "org.opencontainers.image.title=${{ needs.build.outputs.docker_image }}" \ + . + + - name: create release + uses: actions/create-release@1e92f6fc665e271a5435631ba00711fdd83d9d27 #v1.1.3 + if: ${{ success() && github.event_name == 'tag' }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ github.ref }} + release_name: Release ${{ github.ref }} + body: | + Core **[${{ needs.build.outputs.pihole_core_version }}](https://github.com/pi-hole/pi-hole/releases/tag/${{ needs.build.outputs.pihole_core_version }})** + FTLDNS **[${{ needs.build.outputs.pihole_ftl_version }}](https://github.com/pi-hole/FTL/releases/tag/${{ needs.build.outputs.pihole_ftl_version }})** + Web interface **[${{ needs.build.outputs.pihole_web_version }}](https://github.com/pi-hole/AdminLTE/releases/tag/${{ needs.build.outputs.pihole_web_version }})** + draft: true + prerelease: false + + - name: cleanup + run: | + rm -f "$HOME/.docker/config.json" diff --git a/.github/workflows/test-and-build.yaml b/.github/workflows/test-and-build.yaml deleted file mode 100644 index 24e7e7b5d..000000000 --- a/.github/workflows/test-and-build.yaml +++ /dev/null @@ -1,65 +0,0 @@ -name: Test & Build -on: - push: - branches: - - master - - dev - - v* - - beta-v* - - release/* - tags: - - v* - pull_request: - -#env: -# DOCKER_HUB_REPO: pihole - -jobs: - test-and-build: - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - ARCH: [amd64, armhf, arm64] - DEBIAN_VERSION: [stretch, buster] - env: - ARCH: ${{matrix.ARCH}} - DEBIAN_VERSION: ${{matrix.DEBIAN_VERSION}} - steps: - - name: Checkout Repo - uses: actions/checkout@v2 - - name: Run Tests - run: | - echo "Building ${ARCH}-${DEBIAN_VERSION}" - ./gh-actions-test.sh - - name: Push the ARCH image - if: github.event_name != 'pull_request' - run: | - . gh-actions-vars.sh - echo "${{ secrets.DOCKERHUB_PASS }}" | docker login --username="${{ secrets.DOCKERHUB_USER }}" --password-stdin - docker push "${ARCH_IMAGE}" - - name: Upload gh-workspace - if: github.event_name != 'pull_request' - uses: actions/upload-artifact@v1 - with: - name: gh-workspace - path: .gh-workspace - - publish: - if: github.event_name != 'pull_request' - runs-on: ubuntu-latest - needs: test-and-build - steps: - - name: Checkout Repo - uses: actions/checkout@v2 - - name: Download workspace files - uses: actions/download-artifact@v1 - with: - name: gh-workspace - path: .gh-workspace - - name: Tag and Publish multi-arch images - env: - DOCKERHUB_PASS: ${{ secrets.DOCKERHUB_PASS }} - DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} - run: | - ./gh-actions-deploy.sh diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index e69de29bb..000000000 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index bf39b276b..000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# Pull Request Guidelines - -Please review the following before opening a pull request (PR) to help your PR go smoothly: - -* Code changes go to the `dev` branch first - * To ensure proper testing and quality control, target any code change pull requests against `dev` branch. - -* Make sure the tests pass - * Take a look at [TESTING.md](TESTING.md) to see how to run tests locally so you do not have to push all your code to a PR and have travis-ci run it. - * Your tests will probably run faster locally and you get a faster feedback loop. diff --git a/Dockerfile b/Dockerfile index 9fff81c89..43f463dfd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,58 +1,47 @@ -ARG PIHOLE_BASE -FROM $PIHOLE_BASE - -ARG PIHOLE_ARCH -ENV PIHOLE_ARCH "${PIHOLE_ARCH}" -ARG S6_ARCH -ARG S6_VERSION -ENV S6OVERLAY_RELEASE "https://github.com/just-containers/s6-overlay/releases/download/${S6_VERSION}/s6-overlay-${S6_ARCH}.tar.gz" - -COPY install.sh /usr/local/bin/install.sh -COPY VERSION /etc/docker-pi-hole-version -ENV PIHOLE_INSTALL /root/ph_install.sh - -RUN bash -ex install.sh 2>&1 && \ - rm -rf /var/cache/apt/archives /var/lib/apt/lists/* - -ENTRYPOINT [ "/s6-init" ] - -ADD s6/debian-root / -COPY s6/service /usr/local/bin/service - -# php config start passes special ENVs into -ARG PHP_ENV_CONFIG -ENV PHP_ENV_CONFIG "${PHP_ENV_CONFIG}" -ARG PHP_ERROR_LOG -ENV PHP_ERROR_LOG "${PHP_ERROR_LOG}" -COPY ./start.sh / -COPY ./bash_functions.sh / - -# IPv6 disable flag for networks/devices that do not support it -ENV IPv6 True - -EXPOSE 53 53/udp -EXPOSE 67/udp -EXPOSE 80 -EXPOSE 443 - -ENV S6_LOGGING 0 -ENV S6_KEEP_ENV 1 -ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2 - -ENV ServerIP 0.0.0.0 -ENV FTL_CMD no-daemon -ENV DNSMASQ_USER root - -ARG PIHOLE_VERSION -ENV VERSION "${PIHOLE_VERSION}" -ENV PATH /opt/pihole:${PATH} - -ARG NAME -LABEL image="${NAME}:${PIHOLE_VERSION}_${PIHOLE_ARCH}" -ARG MAINTAINER -LABEL maintainer="${MAINTAINER}" -LABEL url="https://www.github.com/pi-hole/docker-pi-hole" - -HEALTHCHECK CMD dig +norecurse +retry=0 @127.0.0.1 pi.hole || exit 1 - -SHELL ["/bin/bash", "-c"] +FROM debian:buster-slim + +# Build time environment +ARG TARGETPLATFORM +ARG S6_VERSION=v1.22.1.0 +ARG PIHOLE_CORE_VERSION=v5.1.2 +ARG PIHOLE_WEB_VERSION=v5.1.1 +ARG PIHOLE_FTL_VERSION=v5.2 +ARG DEBIAN_FRONTEND=noninteractive +ARG PIHOLE_SKIP_OS_CHECK=true + +RUN apt-get update +RUN apt-get install --no-install-recommends -y \ + curl procps ca-certificates netcat-openbsd + +# curl in armhf-buster's image has SSL issues. Running c_rehash fixes it. +# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=923479 +RUN c_rehash + +COPY Dockerfile.sh / +COPY root/ / +RUN touch /.dockerenv + +# Hard to track down issue: COPY uses the file permissions from the working dir, +# which ought to be 0755... but when they're 0700, we see strange errors about +# hostname lookups failing. Git only tracks the executable bit, not read/write. +RUN chmod 755 /etc /usr /usr/bin /usr/bin/* + +# Runtime container environment +ENV S6_LOGGING=0 \ + S6_KEEP_ENV=1 \ + S6_BEHAVIOUR_IF_STAGE2_FAILS=2 \ + PIHOLE_DNS_USER=pihole \ + PATH=/opt/pihole:${PATH} + +RUN /Dockerfile.sh && \ + rm -rf /Dockerfile.sh /var/cache/apt/archives /var/lib/apt/lists/* + +EXPOSE 53/udp +EXPOSE 53/tcp +EXPOSE 80/tcp +EXPOSE 443/tcp + +SHELL ["/bin/bash", "-c"] +WORKDIR / +ENTRYPOINT [ "/s6-init" ] +HEALTHCHECK CMD dig +norecurse +retry=0 @127.0.0.1 localhost diff --git a/Dockerfile.py b/Dockerfile.py deleted file mode 100755 index 44efac4d8..000000000 --- a/Dockerfile.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python3 -""" Dockerfile.py - generates and build dockerfiles - -Usage: - Dockerfile.py [--hub_tag=] [--arch= ...] [--debian= ...] [-v] [-t] [--no-build] [--no-cache] [--fail-fast] - -Options: - --no-build Skip building the docker images - --no-cache Build without using any cache data - --fail-fast Exit on first build error - --hub_tag= What the Docker Hub Image should be tagged as [default: None] - --arch= What Architecture(s) to build [default: amd64 armel armhf arm64] - --debian= What debian version(s) to build [default: stretch buster] - -v Print docker's command output [default: False] - -t Print docker's build time [default: False] - -Examples: -""" -from docopt import docopt -import os -import sys -import subprocess - -__version__ = None -dot = os.path.abspath('.') -with open('{}/VERSION'.format(dot), 'r') as v: - raw_version = v.read().strip() - __version__ = raw_version.replace('release/', 'release-') - - -def build_dockerfiles(args) -> bool: - all_success = True - if args['-v']: - print(args) - if args['--no-build']: - print(" ::: Skipping Dockerfile building") - return all_success - - for arch in args['--arch']: - for debian_version in args['--debian']: - all_success = build('pihole', arch, debian_version, args['--hub_tag'], args['-t'], args['--no-cache'], args['-v']) and all_success - if not all_success and args['--fail-fast']: - return False - return all_success - - -def run_and_stream_command_output(command, environment_vars, verbose) -> bool: - print("Running", command) - build_result = subprocess.Popen(command.split(), env=environment_vars, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) - if verbose: - while build_result.poll() is None: - for line in build_result.stdout: - print(line, end='') - build_result.wait() - if build_result.returncode != 0: - print(" ::: Error running".format(command)) - print(build_result.stderr) - return build_result.returncode == 0 - - -def build(docker_repo: str, arch: str, debian_version: str, hub_tag: str, show_time: bool, no_cache: bool, verbose: bool) -> bool: - create_tag = f'{docker_repo}:{__version__}-{arch}-{debian_version}' - print(f' ::: Building {create_tag}') - time_arg = 'time' if show_time else '' - cache_arg = '--no-cache' if no_cache else '' - build_env = os.environ.copy() - build_env['PIHOLE_VERSION'] = __version__ - build_env['DEBIAN_VERSION'] = debian_version - build_command = f'{time_arg} docker-compose -f build.yml build {cache_arg} --pull {arch}' - print(f' ::: Building {arch} into {create_tag}') - success = run_and_stream_command_output(build_command, build_env, verbose) - if verbose: - print(build_command, '\n') - if success and hub_tag: - hub_tag_command = f'{time_arg} docker tag {create_tag} {hub_tag}' - print(f' ::: Tagging {create_tag} into {hub_tag}') - success = run_and_stream_command_output(hub_tag_command, build_env, verbose) - return success - - -if __name__ == '__main__': - args = docopt(__doc__, version='Dockerfile 1.1') - success = build_dockerfiles(args) - exit_code = 0 if success else 1 - sys.exit(exit_code) diff --git a/Dockerfile.sh b/Dockerfile.sh index 76ae6e039..e97ac0644 100755 --- a/Dockerfile.sh +++ b/Dockerfile.sh @@ -1,13 +1,64 @@ -#!/usr/bin/env bash +#!/bin/bash -eux -# @param ${ARCH} The architecture to build. Example: amd64 -# @param ${DEBIAN_VERSION} The debian version to build. Example: buster -# @param ${ARCH_IMAGE} What the Docker Hub Image should be tagged as [default: None] +# +# Download web installer script, prepare environment for unattended install, and run it +# +mkdir -p /etc/pihole +mkdir -p /var/run/pihole -set -eux -./Dockerfile.py -v --no-cache --arch="${ARCH}" --debian="${DEBIAN_VERSION}" --hub_tag="${ARCH_IMAGE}" -docker images +# Without this, debconf will try to talk to us +ln -s `which echo` /usr/local/bin/whiptail -# TODO: Add junitxml output and have something consume it -# 2 parallel max b/c race condition with docker fixture (I think?) -py.test -vv -n 2 -k "${ARCH}" ./test/ +# debconf-apt-progress seems to hang so get rid of it too +mv -f "$(which debconf-apt-progress)"{,.disabled} + +case "$TARGETPLATFORM" in + *386) S6_ARCH=x86 ;; + *arm64) S6_ARCH=aarch64 ;; + *arm) S6_ARCH=arm ;; # armhf + *arm/v7) S6_ARCH=arm ;; # armhf + *arm/v6) S6_ARCH=arm ;; # armel + *amd64) S6_ARCH=amd64 ;; + *) >&2 echo "unhandled case: ${TARGETPLATFORM}"; exit 1 ;; +esac + +S6OVERLAY_RELEASE="https://github.com/just-containers/s6-overlay/releases/download/${S6_VERSION}/s6-overlay-${S6_ARCH}.tar.gz" +curl -4 -L -s $S6OVERLAY_RELEASE | tar xvzf - -C / +mv /init /s6-init + +# Get the install functions +curl -o /install.sh \ + "https://raw.githubusercontent.com/pi-hole/pi-hole/${PIHOLE_CORE_VERSION}/automated%20install/basic-install.sh" + +{ echo "PIHOLE_INTERFACE=eth0" + echo "IPV4_ADDRESS=0.0.0.0" + echo "IPV6_ADDRESS=::" + echo "PIHOLE_DNS_1=1.1.1.1" + echo "PIHOLE_DNS_2=1.0.0.1" + echo "INSTALL_WEB_SERVER=true" + echo "INSTALL_WEB_INTERFACE=true" + echo "LIGHTTPD_ENABLED=true" +} >> /etc/pihole/setupVars.conf +source /etc/pihole/setupVars.conf + +# Fix permission denied to resolvconf post-inst /etc/resolv.conf moby/moby issue #1297 +echo resolvconf resolvconf/linkify-resolvconf boolean false | debconf-set-selections +echo "${PIHOLE_FTL_VERSION}" > /etc/pihole/ftlbranch + +# FIRE IN THE HOLE +bash -ex /install.sh --unattended + +git_reset() { + pushd "$1"; git reset --hard "$2"; popd +} + +# Seems installer fetches from `master` branch in each repo +git_reset "/etc/.pihole" "$PIHOLE_CORE_VERSION" +git_reset "/var/www/html/admin" "$PIHOLE_WEB_VERSION" + +sed -i 's/readonly //g' /opt/pihole/webpage.sh + +# Replace the call to `updatePiholeFunc` in arg parse with new `unsupportedFunc` +sed -i $'s/helpFunc() {/unsupportedFunc() {\\\n echo "Function not supported in Docker images"\\\n exit 0\\\n}\\\n\\\nhelpFunc() {/g' /usr/local/bin/pihole +sed -i $'s/)\s*updatePiholeFunc/) unsupportedFunc/g' /usr/local/bin/pihole +echo Docker install successful diff --git a/Dockerfile_build b/Dockerfile_build deleted file mode 100644 index 70e5a2814..000000000 --- a/Dockerfile_build +++ /dev/null @@ -1,25 +0,0 @@ -FROM python:buster - -# Only works for docker CLIENT (bind mounted socket) -COPY --from=docker:18.09.3 /usr/local/bin/docker /usr/local/bin/ - -ARG packages -RUN apt-get update && \ - apt-get install -y python3-dev curl gcc make \ - libffi-dev libssl-dev ${packages} \ - && pip3 install -U pip pipenv - -RUN curl -L https://github.com/docker/compose/releases/download/1.25.5/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose && \ - chmod +x /usr/local/bin/docker-compose - -COPY ./Dockerfile.sh /usr/local/bin/ -COPY Pipfile* /root/ -WORKDIR /root - -RUN pipenv install --system \ - && sed -i 's|/bin/sh|/bin/bash|g' /usr/local/lib/python3.8/site-packages/testinfra/backend/docker.py - -RUN echo "set -ex && Dockerfile.sh && \$@" > /usr/local/bin/entrypoint.sh -RUN chmod +x /usr/local/bin/entrypoint.sh -ENTRYPOINT entrypoint.sh -CMD Dockerfile.sh diff --git a/Pipfile b/Pipfile deleted file mode 100644 index 4aadaeac7..000000000 --- a/Pipfile +++ /dev/null @@ -1,63 +0,0 @@ -[[source]] -name = "pypi" -url = "https://pypi.org/simple" -verify_ssl = true - -[dev-packages] - -[packages] -apipkg = "==1.5" -atomicwrites = "==1.3.0" -attrs = "==19.3.0" -bcrypt = "==3.1.7" -cached-property = "==1.5.1" -certifi = "==2019.11.28" -cffi = "==1.13.2" -chardet = "==3.0.4" -configparser = "==4.0.2" -contextlib2 = "==0.6.0.post1" -coverage = "==5.0.1" -cryptography = "==2.8" -docker = "==4.1.0" -dockerpty = "==0.4.1" -docopt = "==0.6.2" -enum34 = "==1.1.6" -execnet = "==1.7.1" -filelock = "==3.0.12" -funcsigs = "==1.0.2" -idna = "==2.8" -importlib-metadata = "==1.3.0" -ipaddress = "==1.0.23" -jsonschema = "==3.2.0" -more-itertools = "==5.0.0" -pathlib2 = "==2.3.5" -pluggy = "==0.13.1" -py = "==1.8.1" -pycparser = "==2.19" -pyparsing = "==2.4.6" -pyrsistent = "==0.15.6" -pytest = "==4.6.8" -pytest-cov = "==2.8.1" -pytest-forked = "==1.1.3" -pytest-xdist = "==1.31.0" -requests = "==2.22.0" -scandir = "==1.10.0" -six = "==1.13.0" -subprocess32 = "==3.5.4" -testinfra = "==3.3.0" -texttable = "==1.6.2" -toml = "==0.10.0" -tox = "==3.14.3" -urllib3 = "==1.25.7" -virtualenv = "==16.7.9" -wcwidth = "==0.1.7" -zipp = "==0.6.0" -"backports.shutil_get_terminal_size" = "==1.0.0" -"backports.ssl_match_hostname" = "==3.7.0.1" -Jinja2 = "==2.10.3" -MarkupSafe = "==1.1.1" -PyYAML = "==5.2" -websocket_client = "==0.57.0" - -[requires] -python_version = "3.8" diff --git a/Pipfile.lock b/Pipfile.lock deleted file mode 100644 index 80ca574ab..000000000 --- a/Pipfile.lock +++ /dev/null @@ -1,586 +0,0 @@ -{ - "_meta": { - "hash": { - "sha256": "ee7705112b315cad899e08bd6eac8f47e9a200a0d47a1920cc192995b79f8673" - }, - "pipfile-spec": 6, - "requires": { - "python_version": "3.8" - }, - "sources": [ - { - "name": "pypi", - "url": "https://pypi.org/simple", - "verify_ssl": true - } - ] - }, - "default": { - "apipkg": { - "hashes": [ - "sha256:37228cda29411948b422fae072f57e31d3396d2ee1c9783775980ee9c9990af6", - "sha256:58587dd4dc3daefad0487f6d9ae32b4542b185e1c36db6993290e7c41ca2b47c" - ], - "index": "pypi", - "version": "==1.5" - }, - "atomicwrites": { - "hashes": [ - "sha256:03472c30eb2c5d1ba9227e4c2ca66ab8287fbfbbda3888aa93dc2e28fc6811b4", - "sha256:75a9445bac02d8d058d5e1fe689654ba5a6556a1dfd8ce6ec55a0ed79866cfa6" - ], - "index": "pypi", - "version": "==1.3.0" - }, - "attrs": { - "hashes": [ - "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c", - "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72" - ], - "index": "pypi", - "version": "==19.3.0" - }, - "backports.shutil-get-terminal-size": { - "hashes": [ - "sha256:0975ba55054c15e346944b38956a4c9cbee9009391e41b86c68990effb8c1f64", - "sha256:713e7a8228ae80341c70586d1cc0a8caa5207346927e23d09dcbcaf18eadec80" - ], - "index": "pypi", - "version": "==1.0.0" - }, - "backports.ssl-match-hostname": { - "hashes": [ - "sha256:bb82e60f9fbf4c080eabd957c39f0641f0fc247d9a16e31e26d594d8f42b9fd2" - ], - "index": "pypi", - "version": "==3.7.0.1" - }, - "bcrypt": { - "hashes": [ - "sha256:0258f143f3de96b7c14f762c770f5fc56ccd72f8a1857a451c1cd9a655d9ac89", - "sha256:0b0069c752ec14172c5f78208f1863d7ad6755a6fae6fe76ec2c80d13be41e42", - "sha256:19a4b72a6ae5bb467fea018b825f0a7d917789bcfe893e53f15c92805d187294", - "sha256:5432dd7b34107ae8ed6c10a71b4397f1c853bd39a4d6ffa7e35f40584cffd161", - "sha256:6305557019906466fc42dbc53b46da004e72fd7a551c044a827e572c82191752", - "sha256:69361315039878c0680be456640f8705d76cb4a3a3fe1e057e0f261b74be4b31", - "sha256:6fe49a60b25b584e2f4ef175b29d3a83ba63b3a4df1b4c0605b826668d1b6be5", - "sha256:74a015102e877d0ccd02cdeaa18b32aa7273746914a6c5d0456dd442cb65b99c", - "sha256:763669a367869786bb4c8fcf731f4175775a5b43f070f50f46f0b59da45375d0", - "sha256:8b10acde4e1919d6015e1df86d4c217d3b5b01bb7744c36113ea43d529e1c3de", - "sha256:9fe92406c857409b70a38729dbdf6578caf9228de0aef5bc44f859ffe971a39e", - "sha256:a190f2a5dbbdbff4b74e3103cef44344bc30e61255beb27310e2aec407766052", - "sha256:a595c12c618119255c90deb4b046e1ca3bcfad64667c43d1166f2b04bc72db09", - "sha256:c9457fa5c121e94a58d6505cadca8bed1c64444b83b3204928a866ca2e599105", - "sha256:cb93f6b2ab0f6853550b74e051d297c27a638719753eb9ff66d1e4072be67133", - "sha256:ce4e4f0deb51d38b1611a27f330426154f2980e66582dc5f438aad38b5f24fc1", - "sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7", - "sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc" - ], - "index": "pypi", - "version": "==3.1.7" - }, - "cached-property": { - "hashes": [ - "sha256:3a026f1a54135677e7da5ce819b0c690f156f37976f3e30c5430740725203d7f", - "sha256:9217a59f14a5682da7c4b8829deadbfc194ac22e9908ccf7c8820234e80a1504" - ], - "index": "pypi", - "version": "==1.5.1" - }, - "certifi": { - "hashes": [ - "sha256:017c25db2a153ce562900032d5bc68e9f191e44e9a0f762f373977de9df1fbb3", - "sha256:25b64c7da4cd7479594d035c08c2d809eb4aab3a26e5a990ea98cc450c320f1f" - ], - "index": "pypi", - "version": "==2019.11.28" - }, - "cffi": { - "hashes": [ - "sha256:0b49274afc941c626b605fb59b59c3485c17dc776dc3cc7cc14aca74cc19cc42", - "sha256:0e3ea92942cb1168e38c05c1d56b0527ce31f1a370f6117f1d490b8dcd6b3a04", - "sha256:135f69aecbf4517d5b3d6429207b2dff49c876be724ac0c8bf8e1ea99df3d7e5", - "sha256:19db0cdd6e516f13329cba4903368bff9bb5a9331d3410b1b448daaadc495e54", - "sha256:2781e9ad0e9d47173c0093321bb5435a9dfae0ed6a762aabafa13108f5f7b2ba", - "sha256:291f7c42e21d72144bb1c1b2e825ec60f46d0a7468f5346841860454c7aa8f57", - "sha256:2c5e309ec482556397cb21ede0350c5e82f0eb2621de04b2633588d118da4396", - "sha256:2e9c80a8c3344a92cb04661115898a9129c074f7ab82011ef4b612f645939f12", - "sha256:32a262e2b90ffcfdd97c7a5e24a6012a43c61f1f5a57789ad80af1d26c6acd97", - "sha256:3c9fff570f13480b201e9ab69453108f6d98244a7f495e91b6c654a47486ba43", - "sha256:415bdc7ca8c1c634a6d7163d43fb0ea885a07e9618a64bda407e04b04333b7db", - "sha256:42194f54c11abc8583417a7cf4eaff544ce0de8187abaf5d29029c91b1725ad3", - "sha256:4424e42199e86b21fc4db83bd76909a6fc2a2aefb352cb5414833c030f6ed71b", - "sha256:4a43c91840bda5f55249413037b7a9b79c90b1184ed504883b72c4df70778579", - "sha256:599a1e8ff057ac530c9ad1778293c665cb81a791421f46922d80a86473c13346", - "sha256:5c4fae4e9cdd18c82ba3a134be256e98dc0596af1e7285a3d2602c97dcfa5159", - "sha256:5ecfa867dea6fabe2a58f03ac9186ea64da1386af2159196da51c4904e11d652", - "sha256:62f2578358d3a92e4ab2d830cd1c2049c9c0d0e6d3c58322993cc341bdeac22e", - "sha256:6471a82d5abea994e38d2c2abc77164b4f7fbaaf80261cb98394d5793f11b12a", - "sha256:6d4f18483d040e18546108eb13b1dfa1000a089bcf8529e30346116ea6240506", - "sha256:71a608532ab3bd26223c8d841dde43f3516aa5d2bf37b50ac410bb5e99053e8f", - "sha256:74a1d8c85fb6ff0b30fbfa8ad0ac23cd601a138f7509dc617ebc65ef305bb98d", - "sha256:7b93a885bb13073afb0aa73ad82059a4c41f4b7d8eb8368980448b52d4c7dc2c", - "sha256:7d4751da932caaec419d514eaa4215eaf14b612cff66398dd51129ac22680b20", - "sha256:7f627141a26b551bdebbc4855c1157feeef18241b4b8366ed22a5c7d672ef858", - "sha256:8169cf44dd8f9071b2b9248c35fc35e8677451c52f795daa2bb4643f32a540bc", - "sha256:aa00d66c0fab27373ae44ae26a66a9e43ff2a678bf63a9c7c1a9a4d61172827a", - "sha256:ccb032fda0873254380aa2bfad2582aedc2959186cce61e3a17abc1a55ff89c3", - "sha256:d754f39e0d1603b5b24a7f8484b22d2904fa551fe865fd0d4c3332f078d20d4e", - "sha256:d75c461e20e29afc0aee7172a0950157c704ff0dd51613506bd7d82b718e7410", - "sha256:dcd65317dd15bc0451f3e01c80da2216a31916bdcffd6221ca1202d96584aa25", - "sha256:e570d3ab32e2c2861c4ebe6ffcad6a8abf9347432a37608fe1fbd157b3f0036b", - "sha256:fd43a88e045cf992ed09fa724b5315b790525f2676883a6ea64e3263bae6549d" - ], - "index": "pypi", - "version": "==1.13.2" - }, - "chardet": { - "hashes": [ - "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae", - "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691" - ], - "index": "pypi", - "version": "==3.0.4" - }, - "configparser": { - "hashes": [ - "sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c", - "sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df" - ], - "index": "pypi", - "version": "==4.0.2" - }, - "contextlib2": { - "hashes": [ - "sha256:01f490098c18b19d2bd5bb5dc445b2054d2fa97f09a4280ba2c5f3c394c8162e", - "sha256:3355078a159fbb44ee60ea80abd0d87b80b78c248643b49aa6d94673b413609b" - ], - "index": "pypi", - "version": "==0.6.0.post1" - }, - "coverage": { - "hashes": [ - "sha256:0101888bd1592a20ccadae081ba10e8b204d20235d18d05c6f7d5e904a38fc10", - "sha256:04b961862334687549eb91cd5178a6fbe977ad365bddc7c60f2227f2f9880cf4", - "sha256:1ca43dbd739c0fc30b0a3637a003a0d2c7edc1dd618359d58cc1e211742f8bd1", - "sha256:1cbb88b34187bdb841f2599770b7e6ff8e259dc3bb64fc7893acf44998acf5f8", - "sha256:232f0b52a5b978288f0bbc282a6c03fe48cd19a04202df44309919c142b3bb9c", - "sha256:24bcfa86fd9ce86b73a8368383c39d919c497a06eebb888b6f0c12f13e920b1a", - "sha256:25b8f60b5c7da71e64c18888f3067d5b6f1334b9681876b2fb41eea26de881ae", - "sha256:2714160a63da18aed9340c70ed514973971ee7e665e6b336917ff4cca81a25b1", - "sha256:2ca2cd5264e84b2cafc73f0045437f70c6378c0d7dbcddc9ee3fe192c1e29e5d", - "sha256:2cc707fc9aad2592fc686d63ef72dc0031fc98b6fb921d2f5395d9ab84fbc3ef", - "sha256:348630edea485f4228233c2f310a598abf8afa5f8c716c02a9698089687b6085", - "sha256:40fbfd6b044c9db13aeec1daf5887d322c710d811f944011757526ef6e323fd9", - "sha256:46c9c6a1d1190c0b75ec7c0f339088309952b82ae8d67a79ff1319eb4e749b96", - "sha256:591506e088901bdc25620c37aec885e82cc896528f28c57e113751e3471fc314", - "sha256:5ac71bba1e07eab403b082c4428f868c1c9e26a21041436b4905c4c3d4e49b08", - "sha256:5f622f19abda4e934938e24f1d67599249abc201844933a6f01aaa8663094489", - "sha256:65bead1ac8c8930cf92a1ccaedcce19a57298547d5d1db5c9d4d068a0675c38b", - "sha256:7362a7f829feda10c7265b553455de596b83d1623b3d436b6d3c51c688c57bf6", - "sha256:7f2675750c50151f806070ec11258edf4c328340916c53bac0adbc465abd6b1e", - "sha256:960d7f42277391e8b1c0b0ae427a214e1b31a1278de6b73f8807b20c2e913bba", - "sha256:a50b0888d8a021a3342d36a6086501e30de7d840ab68fca44913e97d14487dc1", - "sha256:b7dbc5e8c39ea3ad3db22715f1b5401cd698a621218680c6daf42c2f9d36e205", - "sha256:bb3d29df5d07d5399d58a394d0ef50adf303ab4fbf66dfd25b9ef258effcb692", - "sha256:c0fff2733f7c2950f58a4fd09b5db257b00c6fec57bf3f68c5bae004d804b407", - "sha256:c792d3707a86c01c02607ae74364854220fb3e82735f631cd0a345dea6b4cee5", - "sha256:c90bda74e16bcd03861b09b1d37c0a4158feda5d5a036bb2d6e58de6ff65793e", - "sha256:cfce79ce41cc1a1dc7fc85bb41eeeb32d34a4cf39a645c717c0550287e30ff06", - "sha256:eeafb646f374988c22c8e6da5ab9fb81367ecfe81c70c292623373d2a021b1a1", - "sha256:f425f50a6dd807cb9043d15a4fcfba3b5874a54d9587ccbb748899f70dc18c47", - "sha256:fcd4459fe35a400b8f416bc57906862693c9f88b66dc925e7f2a933e77f6b18b", - "sha256:ff3936dd5feaefb4f91c8c1f50a06c588b5dc69fba4f7d9c79a6617ad80bb7df" - ], - "index": "pypi", - "version": "==5.0.1" - }, - "cryptography": { - "hashes": [ - "sha256:02079a6addc7b5140ba0825f542c0869ff4df9a69c360e339ecead5baefa843c", - "sha256:1df22371fbf2004c6f64e927668734070a8953362cd8370ddd336774d6743595", - "sha256:369d2346db5934345787451504853ad9d342d7f721ae82d098083e1f49a582ad", - "sha256:3cda1f0ed8747339bbdf71b9f38ca74c7b592f24f65cdb3ab3765e4b02871651", - "sha256:44ff04138935882fef7c686878e1c8fd80a723161ad6a98da31e14b7553170c2", - "sha256:4b1030728872c59687badcca1e225a9103440e467c17d6d1730ab3d2d64bfeff", - "sha256:58363dbd966afb4f89b3b11dfb8ff200058fbc3b947507675c19ceb46104b48d", - "sha256:6ec280fb24d27e3d97aa731e16207d58bd8ae94ef6eab97249a2afe4ba643d42", - "sha256:7270a6c29199adc1297776937a05b59720e8a782531f1f122f2eb8467f9aab4d", - "sha256:73fd30c57fa2d0a1d7a49c561c40c2f79c7d6c374cc7750e9ac7c99176f6428e", - "sha256:7f09806ed4fbea8f51585231ba742b58cbcfbfe823ea197d8c89a5e433c7e912", - "sha256:90df0cc93e1f8d2fba8365fb59a858f51a11a394d64dbf3ef844f783844cc793", - "sha256:971221ed40f058f5662a604bd1ae6e4521d84e6cad0b7b170564cc34169c8f13", - "sha256:a518c153a2b5ed6b8cc03f7ae79d5ffad7315ad4569b2d5333a13c38d64bd8d7", - "sha256:b0de590a8b0979649ebeef8bb9f54394d3a41f66c5584fff4220901739b6b2f0", - "sha256:b43f53f29816ba1db8525f006fa6f49292e9b029554b3eb56a189a70f2a40879", - "sha256:d31402aad60ed889c7e57934a03477b572a03af7794fa8fb1780f21ea8f6551f", - "sha256:de96157ec73458a7f14e3d26f17f8128c959084931e8997b9e655a39c8fde9f9", - "sha256:df6b4dca2e11865e6cfbfb708e800efb18370f5a46fd601d3755bc7f85b3a8a2", - "sha256:ecadccc7ba52193963c0475ac9f6fa28ac01e01349a2ca48509667ef41ffd2cf", - "sha256:fb81c17e0ebe3358486cd8cc3ad78adbae58af12fc2bf2bc0bb84e8090fa5ce8" - ], - "index": "pypi", - "version": "==2.8" - }, - "docker": { - "hashes": [ - "sha256:6e06c5e70ba4fad73e35f00c55a895a448398f3ada7faae072e2bb01348bafc1", - "sha256:8f93775b8bdae3a2df6bc9a5312cce564cade58d6555f2c2570165a1270cd8a7" - ], - "index": "pypi", - "version": "==4.1.0" - }, - "dockerpty": { - "hashes": [ - "sha256:69a9d69d573a0daa31bcd1c0774eeed5c15c295fe719c61aca550ed1393156ce" - ], - "index": "pypi", - "version": "==0.4.1" - }, - "docopt": { - "hashes": [ - "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491" - ], - "index": "pypi", - "version": "==0.6.2" - }, - "enum34": { - "hashes": [ - "sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850", - "sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a", - "sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79", - "sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1" - ], - "index": "pypi", - "version": "==1.1.6" - }, - "execnet": { - "hashes": [ - "sha256:cacb9df31c9680ec5f95553976c4da484d407e85e41c83cb812aa014f0eddc50", - "sha256:d4efd397930c46415f62f8a31388d6be4f27a91d7550eb79bc64a756e0056547" - ], - "index": "pypi", - "version": "==1.7.1" - }, - "filelock": { - "hashes": [ - "sha256:18d82244ee114f543149c66a6e0c14e9c4f8a1044b5cdaadd0f82159d6a6ff59", - "sha256:929b7d63ec5b7d6b71b0fa5ac14e030b3f70b75747cef1b10da9b879fef15836" - ], - "index": "pypi", - "version": "==3.0.12" - }, - "funcsigs": { - "hashes": [ - "sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca", - "sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50" - ], - "index": "pypi", - "version": "==1.0.2" - }, - "idna": { - "hashes": [ - "sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407", - "sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c" - ], - "index": "pypi", - "version": "==2.8" - }, - "importlib-metadata": { - "hashes": [ - "sha256:073a852570f92da5f744a3472af1b61e28e9f78ccf0c9117658dc32b15de7b45", - "sha256:d95141fbfa7ef2ec65cfd945e2af7e5a6ddbd7c8d9a25e66ff3be8e3daf9f60f" - ], - "index": "pypi", - "version": "==1.3.0" - }, - "ipaddress": { - "hashes": [ - "sha256:6e0f4a39e66cb5bb9a137b00276a2eff74f93b71dcbdad6f10ff7df9d3557fcc", - "sha256:b7f8e0369580bb4a24d5ba1d7cc29660a4a6987763faf1d8a8046830e020e7e2" - ], - "index": "pypi", - "version": "==1.0.23" - }, - "jinja2": { - "hashes": [ - "sha256:74320bb91f31270f9551d46522e33af46a80c3d619f4a4bf42b3164d30b5911f", - "sha256:9fe95f19286cfefaa917656583d020be14e7859c6b0252588391e47db34527de" - ], - "index": "pypi", - "version": "==2.10.3" - }, - "jsonschema": { - "hashes": [ - "sha256:4e5b3cf8216f577bee9ce139cbe72eca3ea4f292ec60928ff24758ce626cd163", - "sha256:c8a85b28d377cc7737e46e2d9f2b4f44ee3c0e1deac6bf46ddefc7187d30797a" - ], - "index": "pypi", - "version": "==3.2.0" - }, - "markupsafe": { - "hashes": [ - "sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473", - "sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161", - "sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235", - "sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5", - "sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42", - "sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff", - "sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b", - "sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1", - "sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e", - "sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183", - "sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66", - "sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b", - "sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1", - "sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15", - "sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1", - "sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e", - "sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b", - "sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905", - "sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735", - "sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d", - "sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e", - "sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d", - "sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c", - "sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21", - "sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2", - "sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5", - "sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b", - "sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6", - "sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f", - "sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f", - "sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2", - "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7", - "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be" - ], - "index": "pypi", - "version": "==1.1.1" - }, - "more-itertools": { - "hashes": [ - "sha256:38a936c0a6d98a38bcc2d03fdaaedaba9f412879461dd2ceff8d37564d6522e4", - "sha256:c0a5785b1109a6bd7fac76d6837fd1feca158e54e521ccd2ae8bfe393cc9d4fc", - "sha256:fe7a7cae1ccb57d33952113ff4fa1bc5f879963600ed74918f1236e212ee50b9" - ], - "index": "pypi", - "version": "==5.0.0" - }, - "packaging": { - "hashes": [ - "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8", - "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181" - ], - "version": "==20.4" - }, - "pathlib2": { - "hashes": [ - "sha256:0ec8205a157c80d7acc301c0b18fbd5d44fe655968f5d947b6ecef5290fc35db", - "sha256:6cd9a47b597b37cc57de1c05e56fb1a1c9cc9fab04fe78c29acd090418529868" - ], - "index": "pypi", - "version": "==2.3.5" - }, - "pluggy": { - "hashes": [ - "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0", - "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d" - ], - "index": "pypi", - "version": "==0.13.1" - }, - "py": { - "hashes": [ - "sha256:5e27081401262157467ad6e7f851b7aa402c5852dbcb3dae06768434de5752aa", - "sha256:c20fdd83a5dbc0af9efd622bee9a5564e278f6380fffcacc43ba6f43db2813b0" - ], - "index": "pypi", - "version": "==1.8.1" - }, - "pycparser": { - "hashes": [ - "sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3" - ], - "index": "pypi", - "version": "==2.19" - }, - "pyparsing": { - "hashes": [ - "sha256:4c830582a84fb022400b85429791bc551f1f4871c33f23e44f353119e92f969f", - "sha256:c342dccb5250c08d45fd6f8b4a559613ca603b57498511740e65cd11a2e7dcec" - ], - "index": "pypi", - "version": "==2.4.6" - }, - "pyrsistent": { - "hashes": [ - "sha256:f3b280d030afb652f79d67c5586157c5c1355c9a58dfc7940566e28d28f3df1b" - ], - "index": "pypi", - "version": "==0.15.6" - }, - "pytest": { - "hashes": [ - "sha256:6192875be8af57b694b7c4904e909680102befcb99e610ef3d9f786952f795aa", - "sha256:f8447ebf8fd3d362868a5d3f43a9df786dfdfe9608843bd9002a2d47a104808f" - ], - "index": "pypi", - "version": "==4.6.8" - }, - "pytest-cov": { - "hashes": [ - "sha256:cc6742d8bac45070217169f5f72ceee1e0e55b0221f54bcf24845972d3a47f2b", - "sha256:cdbdef4f870408ebdbfeb44e63e07eb18bb4619fae852f6e760645fa36172626" - ], - "index": "pypi", - "version": "==2.8.1" - }, - "pytest-forked": { - "hashes": [ - "sha256:1805699ed9c9e60cb7a8179b8d4fa2b8898098e82d229b0825d8095f0f261100", - "sha256:1ae25dba8ee2e56fb47311c9638f9e58552691da87e82d25b0ce0e4bf52b7d87" - ], - "index": "pypi", - "version": "==1.1.3" - }, - "pytest-xdist": { - "hashes": [ - "sha256:0f46020d3d9619e6d17a65b5b989c1ebbb58fc7b1da8fb126d70f4bac4dfeed1", - "sha256:7dc0d027d258cd0defc618fb97055fbd1002735ca7a6d17037018cf870e24011" - ], - "index": "pypi", - "version": "==1.31.0" - }, - "pyyaml": { - "hashes": [ - "sha256:0e7f69397d53155e55d10ff68fdfb2cf630a35e6daf65cf0bdeaf04f127c09dc", - "sha256:2e9f0b7c5914367b0916c3c104a024bb68f269a486b9d04a2e8ac6f6597b7803", - "sha256:35ace9b4147848cafac3db142795ee42deebe9d0dad885ce643928e88daebdcc", - "sha256:38a4f0d114101c58c0f3a88aeaa44d63efd588845c5a2df5290b73db8f246d15", - "sha256:483eb6a33b671408c8529106df3707270bfacb2447bf8ad856a4b4f57f6e3075", - "sha256:4b6be5edb9f6bb73680f5bf4ee08ff25416d1400fbd4535fe0069b2994da07cd", - "sha256:7f38e35c00e160db592091751d385cd7b3046d6d51f578b29943225178257b31", - "sha256:8100c896ecb361794d8bfdb9c11fce618c7cf83d624d73d5ab38aef3bc82d43f", - "sha256:c0ee8eca2c582d29c3c2ec6e2c4f703d1b7f1fb10bc72317355a746057e7346c", - "sha256:e4c015484ff0ff197564917b4b4246ca03f411b9bd7f16e02a2f586eb48b6d04", - "sha256:ebc4ed52dcc93eeebeae5cf5deb2ae4347b3a81c3fa12b0b8c976544829396a4" - ], - "index": "pypi", - "version": "==5.2" - }, - "requests": { - "hashes": [ - "sha256:11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4", - "sha256:9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31" - ], - "index": "pypi", - "version": "==2.22.0" - }, - "scandir": { - "hashes": [ - "sha256:2586c94e907d99617887daed6c1d102b5ca28f1085f90446554abf1faf73123e", - "sha256:2ae41f43797ca0c11591c0c35f2f5875fa99f8797cb1a1fd440497ec0ae4b022", - "sha256:2b8e3888b11abb2217a32af0766bc06b65cc4a928d8727828ee68af5a967fa6f", - "sha256:2c712840c2e2ee8dfaf36034080108d30060d759c7b73a01a52251cc8989f11f", - "sha256:4d4631f6062e658e9007ab3149a9b914f3548cb38bfb021c64f39a025ce578ae", - "sha256:67f15b6f83e6507fdc6fca22fedf6ef8b334b399ca27c6b568cbfaa82a364173", - "sha256:7d2d7a06a252764061a020407b997dd036f7bd6a175a5ba2b345f0a357f0b3f4", - "sha256:8c5922863e44ffc00c5c693190648daa6d15e7c1207ed02d6f46a8dcc2869d32", - "sha256:92c85ac42f41ffdc35b6da57ed991575bdbe69db895507af88b9f499b701c188", - "sha256:b24086f2375c4a094a6b51e78b4cf7ca16c721dcee2eddd7aa6494b42d6d519d", - "sha256:cb925555f43060a1745d0a321cca94bcea927c50114b623d73179189a4e100ac" - ], - "index": "pypi", - "version": "==1.10.0" - }, - "six": { - "hashes": [ - "sha256:1f1b7d42e254082a9db6279deae68afb421ceba6158efa6131de7b3003ee93fd", - "sha256:30f610279e8b2578cab6db20741130331735c781b56053c59c4076da27f06b66" - ], - "index": "pypi", - "version": "==1.13.0" - }, - "subprocess32": { - "hashes": [ - "sha256:88e37c1aac5388df41cc8a8456bb49ebffd321a3ad4d70358e3518176de3a56b", - "sha256:eb2937c80497978d181efa1b839ec2d9622cf9600a039a79d0e108d1f9aec79d" - ], - "index": "pypi", - "version": "==3.5.4" - }, - "testinfra": { - "hashes": [ - "sha256:780e6c2ab392ea93c26cee1777c968a144c2189a56b3e239a3a66e6d256925b5", - "sha256:c3492b39c8d2c98d8419ce1a91d7fe348213f9b98b91198d2e7e88b3954b050b" - ], - "index": "pypi", - "version": "==3.3.0" - }, - "texttable": { - "hashes": [ - "sha256:7dc282a5b22564fe0fdc1c771382d5dd9a54742047c61558e071c8cd595add86", - "sha256:eff3703781fbc7750125f50e10f001195174f13825a92a45e9403037d539b4f4" - ], - "index": "pypi", - "version": "==1.6.2" - }, - "toml": { - "hashes": [ - "sha256:229f81c57791a41d65e399fc06bf0848bab550a9dfd5ed66df18ce5f05e73d5c", - "sha256:235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e" - ], - "index": "pypi", - "version": "==0.10.0" - }, - "tox": { - "hashes": [ - "sha256:06ba73b149bf838d5cd25dc30c2dd2671ae5b2757cf98e5c41a35fe449f131b3", - "sha256:806d0a9217584558cc93747a945a9d9bff10b141a5287f0c8429a08828a22192" - ], - "index": "pypi", - "version": "==3.14.3" - }, - "urllib3": { - "hashes": [ - "sha256:a8a318824cc77d1fd4b2bec2ded92646630d7fe8619497b142c84a9e6f5a7293", - "sha256:f3c5fd51747d450d4dcf6f923c81f78f811aab8205fda64b0aba34a4e48b0745" - ], - "index": "pypi", - "version": "==1.25.7" - }, - "virtualenv": { - "hashes": [ - "sha256:0d62c70883c0342d59c11d0ddac0d954d0431321a41ab20851facf2b222598f3", - "sha256:55059a7a676e4e19498f1aad09b8313a38fcc0cdbe4fdddc0e9b06946d21b4bb" - ], - "index": "pypi", - "version": "==16.7.9" - }, - "wcwidth": { - "hashes": [ - "sha256:3df37372226d6e63e1b1e1eda15c594bca98a22d33a23832a90998faa96bc65e", - "sha256:f4ebe71925af7b40a864553f761ed559b43544f8f71746c2d756c7fe788ade7c" - ], - "index": "pypi", - "version": "==0.1.7" - }, - "websocket-client": { - "hashes": [ - "sha256:0fc45c961324d79c781bab301359d5a1b00b13ad1b10415a4780229ef71a5549", - "sha256:d735b91d6d1692a6a181f2a8c9e0238e5f6373356f561bb9dc4c7af36f452010" - ], - "index": "pypi", - "version": "==0.57.0" - }, - "zipp": { - "hashes": [ - "sha256:3718b1cbcd963c7d4c5511a8240812904164b7f381b647143a89d3b98f9bcd8e", - "sha256:f06903e9f1f43b12d371004b4ac7b06ab39a44adc747266928ae6debfa7b3335" - ], - "index": "pypi", - "version": "==0.6.0" - } - }, - "develop": {} -} diff --git a/README.md b/README.md index 9a05db687..88752bce1 100644 --- a/README.md +++ b/README.md @@ -3,249 +3,134 @@

Pi-hole

- -## Quick Start -[Docker-compose](https://docs.docker.com/compose/install/) example: +![build status]( https://github.com/rndnoise/docker-pi-hole/workflows/build/badge.svg) +![release version]( https://img.shields.io/github/v/release/rndnoise/docker-pihole.svg?maxAge=604800) +![release date]( https://img.shields.io/github/release-date/rndnoise/docker-pihole.svg?maxAge=604800) +![unreleased commits](https://img.shields.io/github/commits-since/rndnoise/docker-pihole/latest.svg?maxAge=604800) + +## Overview + +A [Docker project](https://www.docker.com/what-docker) to make a lightweight x86 or ARM container with [Pi-hole](https://pi-hole.net/) functinnality. + +## Quick start + +Here's an example `docker-compose.yml`: ```yaml version: "3" -# More info at https://github.com/pi-hole/docker-pi-hole/ and https://docs.pi-hole.net/ services: pihole: container_name: pihole - image: pihole/pihole:latest + image: example/pihole:latest ports: + - "80:80/tcp" - "53:53/tcp" - "53:53/udp" - - "67:67/udp" - - "80:80/tcp" - - "443:443/tcp" + restart: unless-stopped environment: - TZ: 'America/Chicago' - # WEBPASSWORD: 'set a secure password here or it will be random' - # Volumes store your data between container upgrades + TZ: America/Chicago + PUID: 999 + PGID: 999 + PIHOLE_IPV4_ADDRESS: "0.0.0.0" + PIHOLE_IPV6_ADDRESS: "::" + PIHOLE_WEB_PASSWORD: "the password is password" + PIHOLE_WEB_HOSTNAME: "pi.hole" + PIHOLE_DNS_UPSTREAM_1: 1.1.1.1 + PIHOLE_DNS_UPSTREAM_2: 1.0.0.1 + PIHOLE_DNS_UPSTREAM_3: 8.8.8.8 + PIHOLE_DNS_UPSTREAM_4: 9.9.9.9 volumes: - - './etc-pihole/:/etc/pihole/' - - './etc-dnsmasq.d/:/etc/dnsmasq.d/' - # Recommended but not required (DHCP needs NET_ADMIN) - # https://github.com/pi-hole/docker-pi-hole#note-on-capabilities - cap_add: - - NET_ADMIN - restart: unless-stopped -``` - -[Here is an equivalent docker run script](https://github.com/pi-hole/docker-pi-hole/blob/master/docker_run.sh). - -## Upgrade Notices: - -### Docker Pi-Hole v4.2.2 - -- ServerIP no longer a required enviroment variable **unless you run network 'host' mode**! Feel free to remove it unless you need it to customize lighttpd -- --cap-add NET_ADMIN no longer required unless using DHCP, leaving in examples for consistency - -### Docker Pi-Hole v4.1.1+ - -Starting with the v4.1.1 release your Pi-hole container may encounter issues starting the DNS service unless ran with the following setting: - -- `--dns=127.0.0.1 --dns=1.1.1.1` The second server can be any DNS IP of your choosing, but the **first dns must be 127.0.0.1** - - A WARNING stating "Misconfigured DNS in /etc/resolv.conf" may show in docker logs without this. -- 4.1 required --cap-add NET_ADMIN until 4.2.1-1 - -These are the raw [docker run cli](https://docs.docker.com/engine/reference/commandline/cli/) versions of the commands. We provide no official support for docker GUIs but the community forums may be able to help if you do not see a place for these settings. Remember, always consult your manual too! - -## Overview - -#### Renamed from `diginc/pi-hole` to `pihole/pihole` - -A [Docker](https://www.docker.com/what-docker) project to make a lightweight x86 and ARM container with [Pi-hole](https://pi-hole.net) functionality. - -1) Install docker for your [x86-64 system](https://www.docker.com/community-edition) or [ARMv7 system](https://www.raspberrypi.org/blog/docker-comes-to-raspberry-pi/) using those links. [Docker-compose](https://docs.docker.com/compose/install/) is also recommended. -2) Use the above quick start example, customize if desired. -3) Enjoy! - -[![Build Status](https://github.com/pi-hole/docker-pi-hole/workflows/Test%20&%20Build/badge.svg)](https://github.com/pi-hole/docker-pi-hole/actions?query=workflow%3A%22Test+%26+Build%22) [![Docker Stars](https://img.shields.io/docker/stars/pihole/pihole.svg?maxAge=604800)](https://store.docker.com/community/images/pihole/pihole) [![Docker Pulls](https://img.shields.io/docker/pulls/pihole/pihole.svg?maxAge=604800)](https://store.docker.com/community/images/pihole/pihole) - -## Running Pi-hole Docker - -This container uses 2 popular ports, port 53 and port 80, so **may conflict with existing applications ports**. If you have no other services or docker containers using port 53/80 (if you do, keep reading below for a reverse proxy example), the minimum arguments required to run this container are in the script [docker_run.sh](https://github.com/pi-hole/docker-pi-hole/blob/master/docker_run.sh) - -If you're using a Red Hat based distribution with an SELinux Enforcing policy add `:z` to line with volumes like so: - -``` - -v "$(pwd)/etc-pihole/:/etc/pihole/:z" \ - -v "$(pwd)/etc-dnsmasq.d/:/etc/dnsmasq.d/:z" \ + - ./pihole/var-log:/var/log + - ./pihole/etc-pihole:/etc/pihole + - ./pihole/etc-dnsmasq.d:/etc/dnsmasq.d ``` -Volumes are recommended for persisting data across container re-creations for updating images. The IP lookup variables may not work for everyone, please review their values and hard code IP and IPv6 if necessary. - -You can customize where to store persistent data by setting the `PIHOLE_BASE` environment variable when invoking `docker_run.sh` (e.g. `PIHOLE_BASE=/opt/pihole-storage ./docker_run.sh`). If `PIHOLE_BASE` is not set, files are stored in your current directory when you invoke the script. - -Port 443 is to provide a sinkhole for ads that use SSL. If only port 80 is used, then blocked HTTPS queries will fail to connect to port 443 and may cause long loading times. Rejecting 443 on your firewall can also serve this same purpose. Ubuntu firewall example: `sudo ufw reject https` - -**Automatic Ad List Updates** - since the 3.0+ release, `cron` is baked into the container and will grab the newest versions of your lists and flush your logs. **Set your TZ** environment variable to make sure the midnight log rotation syncs up with your timezone's midnight. - -## Running DHCP from Docker Pi-Hole - -There are multiple different ways to run DHCP from within your Docker Pi-hole container but it is slightly more advanced and one size does not fit all. DHCP and Docker's multiple network modes are covered in detail on our docs site: [Docker DHCP and Network Modes](https://docs.pi-hole.net/docker/DHCP/) - -## Environment Variables - -There are other environment variables if you want to customize various things inside the docker container: - -| Docker Environment Var. | Description | -| ----------------------- | ----------- | -| `ADMIN_EMAIL: `
*Optional Default: ''* | Set an administrative contact address for the Block Page -| `TZ: `
**Recommended** *Default: UTC* | Set your [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) to make sure logs rotate at local midnight instead of at UTC midnight. -| `WEBPASSWORD: `
**Recommended** *Default: random* | http://pi.hole/admin password. Run `docker logs pihole \| grep random` to find your random pass. -| `DNS1: `
*Optional* *Default: 8.8.8.8* | Primary upstream DNS provider, default is google DNS -| `DNS2: `
*Optional* *Default: 8.8.4.4* | Secondary upstream DNS provider, default is google DNS, `no` if only one DNS should used -| `DNSSEC: <"true"\|"false">`
*Optional* *Default: "false"* | Enable DNSSEC support -| `DNS_BOGUS_PRIV: <"true"\|"false">`
*Optional* *Default: "true"* | Enable forwarding of reverse lookups for private ranges -| `DNS_FQDN_REQUIRED: <"true"\|"false">`
*Optional* *Default: true* | Never forward non-FQDNs -| `CONDITIONAL_FORWARDING: <"true"\|"false">`
*Optional* *Default: "false"* | Enable DNS conditional forwarding for device name resolution -| `CONDITIONAL_FORWARDING_IP: `
*Optional* | If conditional forwarding is enabled, set the IP of the local network router -| `CONDITIONAL_FORWARDING_DOMAIN: `
*Optional* | If conditional forwarding is enabled, set the domain of the local network router -| `CONDITIONAL_FORWARDING_REVERSE: `
*Optional* | If conditional forwarding is enabled, set the reverse DNS of the local network router (e.g. `0.168.192.in-addr.arpa`) -| `ServerIP: `
**Recommended** | **--net=host mode requires** Set to your server's LAN IP, used by web block modes and lighttpd bind address -| `ServerIPv6: `
*Required if using IPv6* | **If you have a v6 network** set to your server's LAN IPv6 to block IPv6 ads fully -| `VIRTUAL_HOST: `
*Optional* *Default: $ServerIP* | What your web server 'virtual host' is, accessing admin through this Hostname/IP allows you to make changes to the whitelist / blacklists in addition to the default 'http://pi.hole/admin/' address -| `IPv6: <"true"\|"false">`
*Optional* *Default: "true"* | For unraid compatibility, strips out all the IPv6 configuration from DNS/Web services when false. -| `INTERFACE: `
*Advanced/Optional* | The default works fine with our basic example docker run commands. If you're trying to use DHCP with `--net host` mode then you may have to customize this or DNSMASQ_LISTENING. -| `DNSMASQ_LISTENING: `
*Advanced/Optional* | `local` listens on all local subnets, `all` permits listening on internet origin subnets in addition to local. -| `WEB_PORT: `
*Advanced/Optional* | **This will break the 'webpage blocked' functionality of Pi-hole** however it may help advanced setups like those running synology or `--net=host` docker argument. This guide explains how to restore webpage blocked functionality using a linux router DNAT rule: [Alternative Synology installation method](https://discourse.pi-hole.net/t/alternative-synology-installation-method/5454?u=diginc) -| `DNSMASQ_USER: `
*Experimental Default: root* | Allows running FTLDNS as non-root. -| `TEMPERATUREUNIT`:
*Optional Default: c* | Set preferred temperature unit to `c`: Celsius, `k`: Kelvin, or `f` Fahrenheit units. -| `WEBUIBOXEDLAYOUT: `
*Optional Default: boxed* | Use boxed layout (helpful when working on large screens) - -To use these env vars in docker run format style them like: `-e DNS1=1.1.1.1` - -Here is a rundown of other arguments for your docker-compose / docker run. - -| Docker Arguments | Description | -| ---------------- | ----------- | -| `-p :` **Recommended** | Ports to expose (53, 80, 67, 443), the bare minimum ports required for Pi-holes HTTP and DNS services -| `--restart=unless-stopped`
**Recommended** | Automatically (re)start your Pi-hole on boot or in the event of a crash -| `-v $(pwd)/etc-pihole:/etc/pihole`
**Recommended** | Volumes for your Pi-hole configs help persist changes across docker image updates -| `-v $(pwd)/etc-dnsmasq.d:/etc/dnsmasq.d`
**Recommended** | Volumes for your dnsmasq configs help persist changes across docker image updates -| `--net=host`
*Optional* | Alternative to `-p :` arguments (Cannot be used at same time as -p) if you don't run any other web application. DHCP runs best with --net=host, otherwise your router must support dhcp-relay settings. -| `--cap-add=NET_ADMIN`
*Recommended* | Commonly added capability for DHCP, see [Note on Capabilities](#note-on-capabilities) below for other capabilities. -| `--dns=127.0.0.1`
*Recommended* | Sets your container's resolve settings to localhost so it can resolve DHCP hostnames from Pi-hole's DNSMasq, also fixes common resolution errors on container restart. -| `--dns=1.1.1.1`
*Optional* | Sets a backup server of your choosing in case DNSMasq has problems starting -| `--env-file .env`
*Optional* | File to store environment variables for docker replacing `-e key=value` settings. Here for convenience - -## Tips and Tricks - -* A good way to test things are working right is by loading this page: [http://pi.hole/admin/](http://pi.hole/admin/) -* [How do I set or reset the Web interface Password?](https://discourse.pi-hole.net/t/how-do-i-set-or-reset-the-web-interface-password/1328) - * `docker exec -it pihole_container_name pihole -a -p` - then enter your password into the prompt -* Port conflicts? Stop your server's existing DNS / Web services. - * Don't forget to stop your services from auto-starting again after you reboot - * Ubuntu users see below for more detailed information -* Port 80 is highly recommended because if you have another site/service using port 80 by default then the ads may not transform into blank ads correctly. To make sure docker-pi-hole plays nicely with an existing webserver you run you'll probably need a reverse proxy webserver config if you don't have one already. Pi-hole must be the default web app on the proxy e.g. if you go to your host by IP instead of domain then Pi-hole is served out instead of any other sites hosted by the proxy. This is the '[default_server](http://nginx.org/en/docs/http/ngx_http_core_module.html#listen)' in nginx or ['_default_' virtual host](https://httpd.apache.org/docs/2.4/vhosts/examples.html#default) in Apache and is taken advantage of so any undefined ad domain can be directed to your webserver and get a 'blocked' response instead of ads. - * You can still map other ports to Pi-hole port 80 using docker's port forwarding like this `-p 8080:80`, but again the ads won't render properly. Changing the inner port 80 shouldn't be required unless you run docker host networking mode. - * [Here is an example of running with jwilder/proxy](https://github.com/pi-hole/docker-pi-hole/blob/master/docker-compose-jwilder-proxy.yml) (an nginx auto-configuring docker reverse proxy for docker) on my port 80 with Pi-hole on another port. Pi-hole needs to be `DEFAULT_HOST` env in jwilder/proxy and you need to set the matching `VIRTUAL_HOST` for the Pi-hole's container. Please read jwilder/proxy readme for more info if you have trouble. - -### Installing on Ubuntu -Modern releases of Ubuntu (17.10+) include [`systemd-resolved`](http://manpages.ubuntu.com/manpages/bionic/man8/systemd-resolved.service.8.html) which is configured by default to implement a caching DNS stub resolver. This will prevent pi-hole from listening on port 53. -The stub resolver should be disabled with: `sudo sed -r -i.orig 's/#?DNSStubListener=yes/DNSStubListener=no/g' /etc/systemd/resolved.conf` - -This will not change the nameserver settings, which point to the stub resolver thus preventing DNS resolution. Change the `/etc/resolv.conf` symlink to point to `/run/systemd/resolve/resolv.conf`, which is automatically updated to follow the system's [`netplan`](https://netplan.io/): -`sudo sh -c 'rm /etc/resolv.conf && ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf'` -After making these changes, you should restart systemd-resolved using `systemctl restart systemd-resolved` - -Once pi-hole is installed, you'll want to configure your clients to use it ([see here](https://discourse.pi-hole.net/t/how-do-i-configure-my-devices-to-use-pi-hole-as-their-dns-server/245)). If you used the symlink above, your docker host will either use whatever is served by DHCP, or whatever static setting you've configured. If you want to explicitly set your docker host's nameservers you can edit the netplan(s) found at `/etc/netplan`, then run `sudo netplan apply`. -Example netplan: -```yaml -network: - ethernets: - ens160: - dhcp4: true - dhcp4-overrides: - use-dns: false - nameservers: - addresses: [127.0.0.1] - version: 2 +Here's an equivalent `docker run` command: + +```sh +docker run \ + --detach \ + --name pihole \ + --restart=unless-stopped \ + --publish 53:53/udp \ + --publish 53:53/tcp \ + --publish 80:80/tcp \ + --env "TZ=America/Chicago" \ + --env "PUID=999" \ + --env "PGID=999" \ + --env "PIHOLE_IPV4_ADDRESS=0.0.0.0" \ + --env "PIHOLE_IPV6_ADDRESS=::" \ + --env "PIHOLE_WEB_PASSWORD=the password is password" \ + --env "PIHOLE_WEB_HOSTNAME=pi.hole" \ + --env "PIHOLE_DNS_UPSTREAM_1=1.1.1.1" \ + --env "PIHOLE_DNS_UPSTREAM_2=1.0.0.1" \ + --env "PIHOLE_DNS_UPSTREAM_3=8.8.8.8" \ + --env "PIHOLE_DNS_UPSTREAM_4=9.9.9.9" \ + --volume "$(pwd)/pihole/var-log:/var/log" \ + --volume "$(pwd)/pihole/etc-pihole:/etc/pihole" \ + --volume "$(pwd)/pihole/etc-dnsmasq.d:/etc/dnsmasq.d" + example/pihole:latest ``` -Note that it is also possible to disable `systemd-resolved` entirely. However, this can cause problems with name resolution in vpns ([see bug report](https://bugs.launchpad.net/network-manager/+bug/1624317)). It also disables the functionality of netplan since systemd-resolved is used as the default renderer ([see `man netplan`](http://manpages.ubuntu.com/manpages/bionic/man5/netplan.5.html#description)). If you choose to disable the service, you will need to manually set the nameservers, for example by creating a new `/etc/resolv.conf`. - -Users of older Ubuntu releases (circa 17.04) will need to disable dnsmasq. +## Environment variables + +| Environment variable name | Default | Values | Description | +| ----------------------------- | ------- | ----------- | ----------- | +| `PIHOLE_ADMIN_EMAIL` | | | Set an administrative contact address on the block page +| `PIHOLE_DNS_BLOCKING_MODE` | `NULL` | [See docs](https://docs.pi-hole.net/ftldns/blockingmode/) | Method used to block queries +| `PIHOLE_DNS_BOGUS_PRIV` | `true` | `true`, `false` | Forward reverse lookups on private ranges to upstream servers +| `PIHOLE_DNS_CNAME_INSPECT` | `true` | `true`, `false` | Enable or disable deep CNAME inspection. See [PR #663](https://github.com/pi-hole/FTL/pull/663) +| `PIHOLE_DNS_DNSSEC` | `false` | `true`, `false` | Enable or disable DNSSEC +| `PIHOLE_DNS_FQDN_REQUIRED` | `true` | `true`, `false` | Forward queries on non-FQDNs to upstream servers +| `PIHOLE_DNS_IGNORE_LOCALHOST` | `false` | `true`, `false` | Ignore queries originating from the local machine +| `PIHOLE_DNS_LAN_DOMAIN` | | | When LAN forwarding is enabled, forward queries for this domain to upstream LAN DNS server +| `PIHOLE_DNS_LAN_ENABLE` | `false` | `true`, `false` | Enable or disable forwarding queries for LAN to a separate DNS server +| `PIHOLE_DNS_LAN_NETWORK` | | IPv4/6 CIDR | When LAN forwarding is enabled, forward reverse queries for this network range to upstream LAN DNS server +| `PIHOLE_DNS_LAN_UPSTREAM` | | | When LAN forwarding is enabled, use this DNS server to resolve LAN queries +| `PIHOLE_DNS_PRIVACY_LVL` | `0` | [See docs](https://docs.pi-hole.net/ftldns/privacylevels/) | Specifies level of detail given in Pi-hole statistics. +| `PIHOLE_DNS_UPSTREAM_1`* | | IPv4/6 addr | Primary upstream DNS server +| `PIHOLE_DNS_UPSTREAM_2` | | IPv4/6 addr | Secondary upstream DNS server +| `PIHOLE_DNS_UPSTREAM_3` | | IPv4/6 addr | Tertiary upstream DNS server +| `PIHOLE_DNS_UPSTREAM_4` | | IPv4/6 addr | Quaternary upstream DNS server +| `PIHOLE_DNS_USER` | `pihole` | | User which runs `pihole-FTL` (can be `root`) +| `PIHOLE_IPV4_ADDRESS` | `0.0.0.0` | `auto`, IPv4 addr | Your Pi-hole's address, used to redirect/block requests +| `PIHOLE_IPV6_ADDRESS` | `::` | `auto`, IPv6 addr | Your Pi-hole's address, used to redirect/block requests +| `PIHOLE_LISTEN` | `all` | `all`, `iface`, `local` | Listen to all `local` subnets, `all` networks (including the Internet), or a specified `iface` +| `PIHOLE_INTERFACE` | | | When `PIHOLE_LISTEN` is `iface`, specifies the interface used to listen for DNS queries and HTTP requests +| `PIHOLE_TEMPERATURE_UNIT` | `F` | `F`, `C`, `K` | +| `PIHOLE_WEB_HOSTNAME` | `hostname -f` | | The hostname used to access the Pi-hole admin page +| `PIHOLE_WEB_PASSWORD` | randomized | | The password required to access the Pi-hole admin page. See `pihole logs pihole` to find the randomized password +| `PIHOLE_WEB_PASSWORD_FILE` | | | Filename containing password, will override `PIHOLE_PASSWORD` if it's set. +| `PIHOLE_WEB_PORT` | `80` | | Which port the admin page should listen on +| `PIHOLE_WEB_UI` | `boxed` | `boxed`, `traditional` | | Which layout is used for the admin page + +Required environment variables (which do not have default values) are indicated by `*` + +## Tips and tricks + +... ## Docker tags and versioning -The primary docker tags / versions are explained in the following table. [Click here to see the full list of tags](https://store.docker.com/community/images/pihole/pihole/tags), I also try to tag with the specific version of Pi-hole Core for version archival purposes, the web version that comes with the core releases should be in the [GitHub Release notes](https://github.com/pi-hole/docker-pi-hole/releases). - -| tag | architecture | description | Dockerfile | -| --- | ------------ | ----------- | ---------- | -| `latest` | auto detect | x86, arm, or arm64 container, docker auto detects your architecture. | [Dockerfile](https://github.com/pi-hole/docker-pi-hole/blob/master/Dockerfile) | -| `v5.0` | auto detect | Versioned tags, if you want to pin against a specific Pi-hole version, use one of these | | -| `v5.0-stretch` | auto detect | Versioned tags, if you want to pin against a specific Pi-hole and Debian version, use one of these | | -| `v5.0--stretch` | based on tag | Specific architectures and Debian version tags | | -| `dev` | auto detect | like latest tag, but for the development branch (pushed occasionally) | | - -### `pihole/pihole:latest` [![](https://images.microbadger.com/badges/image/pihole/pihole:latest.svg)](https://microbadger.com/images/pihole/pihole "Get your own image badge on microbadger.com") [![](https://images.microbadger.com/badges/version/pihole/pihole:latest.svg)](https://microbadger.com/images/pihole/pihole "Get your own version badge on microbadger.com") [![](https://images.microbadger.com/badges/version/pihole/pihole:latest.svg)](https://microbadger.com/images/pihole/pihole "Get your own version badge on microbadger.com") - -This version of the docker aims to be as close to a standard Pi-hole installation by using the recommended base OS and the exact configs and scripts (minimally modified to get them working). This enables fast updating when an update comes from Pi-hole. - -https://hub.docker.com/r/pihole/pihole/tags/ - -## Upgrading, Persistence, and Customizations - -The standard Pi-hole customization abilities apply to this docker, but with docker twists such as using docker volume mounts to map host stored file configurations over the container defaults. Volumes are also important to persist the configuration in case you have removed the Pi-hole container which is a typical docker upgrade pattern. - -### Upgrading / Reconfiguring - -Do not attempt to upgrade (`pihole -up`) or reconfigure (`pihole -r`). New images will be released for upgrades, upgrading by replacing your old container with a fresh upgraded image is the 'docker way'. Long-living docker containers are not the docker way since they aim to be portable and reproducible, why not re-create them often! Just to prove you can. +... -0. Read the release notes for both this Docker release and the Pi-hole release - * This will help you avoid common problems due to any known issues with upgrading or newly required arguments or variables - * We will try to put common break/fixes at the top of this readme too -1. Download the latest version of the image: `docker pull pihole/pihole` -2. Throw away your container: `docker rm -f pihole` - * **Warning** When removing your pihole container you may be stuck without DNS until step 3; **docker pull** before **docker rm -f** to avoid DNS inturruption **OR** always have a fallback DNS server configured in DHCP to avoid this problem altogether. - * If you care about your data (logs/customizations), make sure you have it volume-mapped or it will be deleted in this step. -3. Start your container with the newer base image: `docker run pihole/pihole` (`` being your preferred run volumes and env vars) +## Upgrading -Why is this style of upgrading good? A couple reasons: Everyone is starting from the same base image which has been tested to known it works. No worrying about upgrading from A to B, B to C, or A to C is required when rolling out updates, it reducing complexity, and simply allows a 'fresh start' every time while preserving customizations with volumes. Basically I'm encouraging [phoenix server](https://www.google.com/?q=phoenix+servers) principles for your containers. +... -To reconfigure Pi-hole you'll either need to use an existing container environment variables or if there is no a variable for what you need, use the web UI or CLI commands. +## Running DHCP from Docker Pi-hole -### Pi-hole features +This docker image doesn't support configuring FTLDNS as a DHCP server. Instead, you can either use `PIHOLE_DNS_LAN_...` configuration to forward LAN traffic to your DHCP server/router, or write a script to export data from your DHCP server to a host file and drop a config file in `/etc/dnsmasq.d/` to tell FTLDNS about that file. Changes to that file will auttomatically be detected without restarting FTLDNS. -Here are some relevant wiki pages from [Pi-hole's documentation](https://github.com/pi-hole/pi-hole/blob/master/README.md#get-help-or-connect-with-us-on-the-web). The web interface or command line tools can be used to implement changes to pihole. - -We install all pihole utilities so the the built in [pihole commands](https://discourse.pi-hole.net/t/the-pihole-command-with-examples/738) will work via `docker exec ` like so: - -* `docker exec pihole_container_name pihole updateGravity` -* `docker exec pihole_container_name pihole -w spclient.wg.spotify.com` -* `docker exec pihole_container_name pihole -wild example.com` - -### Customizations - -The webserver and DNS service inside the container can be customized if necessary. Any configuration files you volume mount into `/etc/dnsmasq.d/` will be loaded by dnsmasq when the container starts or restarts or if you need to modify the Pi-hole config it is located at `/etc/dnsmasq.d/01-pihole.conf`. The docker start scripts runs a config test prior to starting so it will tell you about any errors in the docker log. - -Similarly for the webserver you can customize configs in /etc/lighttpd - -### Systemd init script - -As long as your docker system service auto starts on boot and you run your container with `--restart=unless-stopped` your container should always start on boot and restart on crashes. If you prefer to have your docker container run as a systemd service instead, add the file [pihole.service](https://raw.githubusercontent.com/pi-hole/docker-pi-hole/master/pihole.service) to "/etc/systemd/system"; customize whatever your container name is and remove `--restart=unless-stopped` from your docker run. Then after you have initially created the docker container using the docker run command above, you can control it with "systemctl start pihole" or "systemctl stop pihole" (instead of `docker start`/`docker stop`). You can also enable it to auto-start on boot with "systemctl enable pihole" (as opposed to `--restart=unless-stopped` and making sure docker service auto-starts on boot). - -NOTE: After initial run you may need to manually stop the docker container with "docker stop pihole" before the systemctl can start controlling the container. - -## Note on Capabilities - -DNSMasq / [FTLDNS](https://docs.pi-hole.net/ftldns/in-depth/#linux-capabilities) expects to have the following capabilities available: -- `CAP_NET_BIND_SERVICE`: Allows FTLDNS binding to TCP/UDP sockets below 1024 (specifically DNS service on port 53) -- `CAP_NET_RAW`: use raw and packet sockets (needed for handling DHCPv6 requests, and verifying that an IP is not in use before leasing it) -- `CAP_NET_ADMIN`: modify routing tables and other network-related operations (in particular inserting an entry in the neighbor table to answer DHCP requests using unicast packets) - -This image automatically grants those capabilities, if available, to the FTLDNS process, even when run as non-root.\ -By default, docker does not include the `NET_ADMIN` capability for non-privileged containers, and it is recommended to explicitly add it to the container using `--cap-add=NET_ADMIN`.\ -However, if DHCP and IPv6 Router Advertisements are not in use, it should be safe to skip it. For the most paranoid, it should even be possible to explicitly drop the `NET_RAW` capability to prevent FTLDNS from automatically gaining it. +``` +local=/lan/ # answer queries from this domain using host files +hostsdir=/etc/dnsmasq.d/lan # files in thtis directory will be used as host files +``` +## Upgrade notices -# User Feedback +### Docker Pi-hole v5.1.2 -Please report issues on the [GitHub project](https://github.com/pi-hole/docker-pi-hole) when you suspect something docker related. Pi-hole or general docker questions are best answered on our [user forums](https://github.com/pi-hole/pi-hole/blob/master/README.md#get-help-or-connect-with-us-on-the-web). Ping me (@diginc) on the forums if it's a docker container and you're not sure if it's docker related. +This version was forked from [pi-hole/docker-pi-hole](https://github.com/pi-hole/docker-pi-hole/tree/v5.1.2) and then largely rewritten. The build has been rewritten using a single `Dockerfile` and one installation script, `Dockerfile.sh`. Multiarch images are built using `docker buildx` via GitHub actions and automatically published to Docker Hub. The startup shell scripts that configured Pi-hole have been replaced with a single Perl script. diff --git a/TESTING.md b/TESTING.md deleted file mode 100644 index 75e8090bf..000000000 --- a/TESTING.md +++ /dev/null @@ -1,22 +0,0 @@ -# Prerequisites - -Make sure you have bash, docker. Python and some test hacks are crammed into the `Dockerfile_build` file for now. Revisions in the future may re-enable running python on your host (not just in docker). - -# Running tests locally - -`ARCH=amd64 ./gh-actions-test.sh` - -Should result in : - -- An image named `pihole:amd64` being build -- Tests being ran to confirm the image doesnt have any regressions - -# Local image names - -Docker images built by `Dockerfile.py` are named the same but stripped of the `pihole/` docker repository namespace. - -e.g. `pi-hole:debian_amd64` or `pi-hole-multiarch:debian_arm64` - -You can run the multiarch images on an amd64 development system if you [enable binfmt-support as described in the multiarch image docs](https://hub.docker.com/r/multiarch/multiarch/debian-debootstrap/) - -`docker run --rm --privileged multiarch/qemu-user-static:register --reset` diff --git a/VERSION b/VERSION deleted file mode 100644 index 96f312354..000000000 --- a/VERSION +++ /dev/null @@ -1 +0,0 @@ -v5.1.2 diff --git a/autotest b/autotest deleted file mode 100755 index 6a02656dc..000000000 --- a/autotest +++ /dev/null @@ -1 +0,0 @@ -py.test -f ./test -v $@ diff --git a/bash_functions.sh b/bash_functions.sh deleted file mode 100644 index 025f84534..000000000 --- a/bash_functions.sh +++ /dev/null @@ -1,374 +0,0 @@ -#!/bin/bash -# Some of the bash_functions use variables these core pi-hole/web scripts -. /opt/pihole/webpage.sh - -fix_capabilities() { - setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN+ei $(which pihole-FTL) || ret=$? - - if [[ $ret -ne 0 && "${DNSMASQ_USER:-root}" != "root" ]]; then - echo "ERROR: Failed to set capabilities for pihole-FTL. Cannot run as non-root." - exit 1 - fi -} - -prepare_configs() { - # Done in /start.sh, don't do twice - PH_TEST=true . $PIHOLE_INSTALL - distro_check - installConfigs - touch "$setupVars" - set +e - mkdir -p /var/run/pihole /var/log/pihole - # Re-apply perms from basic-install over any volume mounts that may be present (or not) - # Also similar to preflights for FTL https://github.com/pi-hole/pi-hole/blob/master/advanced/Templates/pihole-FTL.service - chown pihole:root /etc/lighttpd - chown pihole:pihole "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" "/var/log/pihole" "${regexFile}" - chmod 644 "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" - # not sure why pihole:pihole user/group write perms are not enough for web to write...dirty fix: - chmod 777 "${regexFile}" - touch /var/log/pihole-FTL.log /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole.log - chown pihole:pihole /var/run/pihole /var/log/pihole - test -f /var/run/pihole/FTL.sock && rm /var/run/pihole/FTL.sock - chown pihole:pihole /var/log/pihole-FTL.log /run/pihole-FTL.pid /run/pihole-FTL.port /etc/pihole /etc/pihole/dhcp.leases /var/log/pihole.log - chmod 0644 /var/log/pihole-FTL.log /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole.log - set -e - # Update version numbers - pihole updatechecker - # Re-write all of the setupVars to ensure required ones are present (like QUERY_LOGGING) - - # If the setup variable file exists, - if [[ -e "${setupVars}" ]]; then - cp -f "${setupVars}" "${setupVars}.update.bak" - fi -} - -validate_env() { - # Optional ServerIP is a valid IP - # nc won't throw any text based errors when it times out connecting to a valid IP, otherwise it complains about the DNS name being garbage - # if nc doesn't behave as we expect on a valid IP the routing table should be able to look it up and return a 0 retcode - if [[ "$(nc -4 -w1 -z "$ServerIP" 53 2>&1)" != "" ]] && ! ip route get "$ServerIP" > /dev/null ; then - echo "ERROR: ServerIP Environment variable ($ServerIP) doesn't appear to be a valid IPv4 address" - exit 1 - fi - - # Optional IPv6 is a valid address - if [[ -n "$ServerIPv6" ]] ; then - if [[ "$ServerIPv6" == 'kernel' ]] ; then - echo "ERROR: You passed in IPv6 with a value of 'kernel', this maybe beacuse you do not have IPv6 enabled on your network" - unset ServerIPv6 - exit 1 - fi - if [[ "$(nc -6 -w1 -z "$ServerIPv6" 53 2>&1)" != "" ]] && ! ip route get "$ServerIPv6" > /dev/null ; then - echo "ERROR: ServerIPv6 Environment variable ($ServerIPv6) doesn't appear to be a valid IPv6 address" - echo " TIP: If your server is not IPv6 enabled just remove '-e ServerIPv6' from your docker container" - exit 1 - fi - fi; -} - -setup_dnsmasq_dns() { - . /opt/pihole/webpage.sh - local DNS1="${1:-8.8.8.8}" - local DNS2="${2:-8.8.4.4}" - local dnsType='default' - if [ "$DNS1" != '8.8.8.8' ] || [ "$DNS2" != '8.8.4.4' ] ; then - dnsType='custom' - fi; - - # TODO With the addition of this to /start.sh this needs a refactor - if [ ! -f /.piholeFirstBoot ] ; then - local setupDNS1="$(grep 'PIHOLE_DNS_1' ${setupVars})" - local setupDNS2="$(grep 'PIHOLE_DNS_2' ${setupVars})" - setupDNS1="${setupDNS1/PIHOLE_DNS_1=/}" - setupDNS2="${setupDNS2/PIHOLE_DNS_2=/}" - if [[ -n "$DNS1" && -n "$setupDNS1" ]] || \ - [[ -n "$DNS2" && -n "$setupDNS2" ]] ; then - echo "Docker DNS variables not used" - fi - echo "Existing DNS servers used (${setupDNS1:-unset} & ${setupDNS2:-unset})" - return - fi - - echo "Using $dnsType DNS servers: $DNS1 & $DNS2" - if [[ -n "$DNS1" && -z "$setupDNS1" ]] ; then - change_setting "PIHOLE_DNS_1" "${DNS1}" - fi - if [[ -n "$DNS2" && -z "$setupDNS2" ]] ; then - if [[ "$DNS2" == "no" ]] ; then - delete_setting "PIHOLE_DNS_2" - unset PIHOLE_DNS_2 - else - change_setting "PIHOLE_DNS_2" "${DNS2}" - fi - fi -} - -setup_dnsmasq_interface() { - local interface="${1:-eth0}" - local interfaceType='default' - if [ "$interface" != 'eth0' ] ; then - interfaceType='custom' - fi; - echo "DNSMasq binding to $interfaceType interface: $interface" - [ -n "$interface" ] && change_setting "PIHOLE_INTERFACE" "${interface}" -} - -setup_dnsmasq_listening_behaviour() { - local dnsmasq_listening_behaviour="${1}" - - if [ -n "$dnsmasq_listening_behaviour" ]; then - change_setting "DNSMASQ_LISTENING" "${dnsmasq_listening_behaviour}" - fi; -} - -setup_dnsmasq_config_if_missing() { - # When fresh empty directory volumes are used we miss this file - if [ ! -f /etc/dnsmasq.d/01-pihole.conf ] ; then - cp /etc/.pihole/advanced/01-pihole.conf /etc/dnsmasq.d/ - fi; -} - -setup_dnsmasq() { - local dns1="$1" - local dns2="$2" - local interface="$3" - local dnsmasq_listening_behaviour="$4" - # Coordinates - setup_dnsmasq_config_if_missing - setup_dnsmasq_dns "$dns1" "$dns2" - setup_dnsmasq_interface "$interface" - setup_dnsmasq_listening_behaviour "$dnsmasq_listening_behaviour" - setup_dnsmasq_user "${DNSMASQ_USER}" - ProcessDNSSettings -} - -setup_dnsmasq_user() { - local DNSMASQ_USER="${1}" - - # Run DNSMASQ as root user to avoid SHM permission issues - if grep -r -q '^\s*user=' /etc/dnsmasq.* ; then - # Change user that had been set previously to root - for f in $(grep -r -l '^\s*user=' /etc/dnsmasq.*); do - sed -i "/^\s*user=/ c\user=${DNSMASQ_USER}" "${f}" - done - else - echo -e "\nuser=${DNSMASQ_USER}" >> /etc/dnsmasq.conf - fi -} - -setup_dnsmasq_hostnames() { - # largely borrowed from automated install/basic-install.sh - local IPV4_ADDRESS="${1}" - local IPV6_ADDRESS="${2}" - local hostname="${3}" - local dnsmasq_pihole_01_location="/etc/dnsmasq.d/01-pihole.conf" - - if [ -z "$hostname" ]; then - if [[ -f /etc/hostname ]]; then - hostname=$( \"${VIRTUAL_HOST}\"," - local serverip_line="\t\t\t\"ServerIP\" => \"${ServerIP}\"," - local php_error_line="\t\t\t\"PHP_ERROR_LOG\" => \"${PHP_ERROR_LOG}\"," - - # idempotent line additions - grep -qP "$vhost_line" "$PHP_ENV_CONFIG" || \ - sed -i "/bin-environment/ a\\${vhost_line}" "$PHP_ENV_CONFIG" - grep -qP "$serverip_line" "$PHP_ENV_CONFIG" || \ - sed -i "/bin-environment/ a\\${serverip_line}" "$PHP_ENV_CONFIG" - grep -qP "$php_error_line" "$PHP_ENV_CONFIG" || \ - sed -i "/bin-environment/ a\\${php_error_line}" "$PHP_ENV_CONFIG" - - echo "Added ENV to php:" - grep -E '(VIRTUAL_HOST|ServerIP|PHP_ERROR_LOG)' "$PHP_ENV_CONFIG" -} - -setup_web_port() { - local warning="WARNING: Custom WEB_PORT not used" - # Quietly exit early for empty or default - if [[ -z "${1}" || "${1}" == '80' ]] ; then return ; fi - - if ! echo $1 | grep -q '^[0-9][0-9]*$' ; then - echo "$warning - $1 is not an integer" - return - fi - - local -i web_port="$1" - if (( $web_port < 1 || $web_port > 65535 )); then - echo "$warning - $web_port is not within valid port range of 1-65535" - return - fi - echo "Custom WEB_PORT set to $web_port" - echo "INFO: Without proper router DNAT forwarding to $ServerIP:$web_port, you may not get any blocked websites on ads" - - # Update lighttpd's port - sed -i '/server.port\s*=\s*80\s*$/ s/80/'$WEB_PORT'/g' /etc/lighttpd/lighttpd.conf - -} - -load_web_password_secret() { - # If WEBPASSWORD is not set at all, attempt to read password from WEBPASSWORD_FILE, - # allowing secrets to be passed via docker secrets - if [ -z "${WEBPASSWORD+x}" ] && [ -n "${WEBPASSWORD_FILE}" ] && [ -r "${WEBPASSWORD_FILE}" ]; then - WEBPASSWORD=$(<"${WEBPASSWORD_FILE}") - fi; -} - -generate_password() { - if [ -z "${WEBPASSWORD+x}" ] ; then - # Not set at all, give the user a random pass - WEBPASSWORD=$(tr -dc _A-Z-a-z-0-9 < /dev/urandom | head -c 8) - echo "Assigning random password: $WEBPASSWORD" - fi; -} - -setup_web_password() { - setup_var_exists "WEBPASSWORD" && return - - PASS="$1" - # Turn bash debug on while setting up password (to print it) - if [[ "$PASS" == "" ]] ; then - echo "" | pihole -a -p - else - echo "Setting password: ${PASS}" - set -x - pihole -a -p "$PASS" "$PASS" - fi - # Turn bash debug back off after print password setup - # (subshell to null hides printing output) - { set +x; } 2>/dev/null - - # To avoid printing this if conditional in bash debug, turn off debug above.. - # then re-enable debug if necessary (more code but cleaner printed output) - if [ "${PH_VERBOSE:-0}" -gt 0 ] ; then - set -x - fi -} - -setup_ipv4_ipv6() { - local ip_versions="IPv4 and IPv6" - if [ "$IPv6" != "True" ] ; then - ip_versions="IPv4" - sed -i '/use-ipv6.pl/ d' /etc/lighttpd/lighttpd.conf - fi; - echo "Using $ip_versions" -} - -test_configs() { - set -e - echo -n '::: Testing pihole-FTL DNS: ' - sudo -u ${DNSMASQ_USER:-root} pihole-FTL test || exit 1 - echo -n '::: Testing lighttpd config: ' - lighttpd -t -f /etc/lighttpd/lighttpd.conf || exit 1 - set +e - echo "::: All config checks passed, cleared for startup ..." -} - -setup_blocklists() { - local blocklists="$1" - # Exit/return early without setting up adlists with defaults for any of the following conditions: - # 1. skip_setup_blocklists env is set - exit_string="(exiting ${FUNCNAME[0]} early)" - - if [ -n "${skip_setup_blocklists}" ]; then - echo "::: skip_setup_blocklists requested ($exit_string)" - return - fi - - # 2. The adlist file exists already (restarted container or volume mounted list) - if [ -f "${adlistFile}" ]; then - echo "::: Preexisting ad list ${adlistFile} detected ($exit_string)" - cat "${adlistFile}" - return - fi - - echo "::: ${FUNCNAME[0]} now setting default blocklists up: " - echo "::: TIP: Use a docker volume for ${adlistFile} if you want to customize for first boot" - installDefaultBlocklists - - echo "::: Blocklists (${adlistFile}) now set to:" - cat "${adlistFile}" -} - -setup_var_exists() { - local KEY="$1" - if [ -n "$2" ]; then - local REQUIRED_VALUE="[^\n]+" - fi - if grep -Pq "^${KEY}=${REQUIRED_VALUE}" "$setupVars"; then - echo "::: Pre existing ${KEY} found" - true - else - false - fi -} - -setup_temp_unit() { - local UNIT="$1" - # check if var is empty - if [[ "$UNIT" != "" ]] ; then - # check if we have valid units - if [[ "$UNIT" == "c" || "$UNIT" == "k" || $UNIT == "f" ]] ; then - pihole -a -${UNIT} - fi - fi -} - -setup_ui_layout() { - local LO=$1 - # check if var is empty - if [[ "$LO" != "" ]] ; then - # check if we have valid types boxed | traditional - if [[ "$LO" == "traditional" || "$LO" == "boxed" ]] ; then - change_setting "WEBUIBOXEDLAYOUT" "$WEBUIBOXEDLAYOUT" - fi - fi -} - -setup_admin_email() { - local EMAIL=$1 - # check if var is empty - if [[ "$EMAIL" != "" ]] ; then - pihole -a -e "$EMAIL" - fi -} diff --git a/build.yml b/build.yml deleted file mode 100644 index 0a33a7c92..000000000 --- a/build.yml +++ /dev/null @@ -1,49 +0,0 @@ -# Docker Compose build file: docker-compose -f build.yml build -version: "3.7" - -x-common-args: &common-args - PIHOLE_VERSION: ${PIHOLE_VERSION} - NAME: pihole/pihole - MAINTAINER: adam@diginc.us - S6_VERSION: v1.22.1.0 - PHP_ENV_CONFIG: /etc/lighttpd/conf-enabled/15-fastcgi-php.conf - PHP_ERROR_LOG: /var/log/lighttpd/error.log - - -services: - amd64: - image: pihole:${PIHOLE_VERSION}-amd64-${DEBIAN_VERSION:-stretch} - build: - context: . - args: - <<: *common-args - PIHOLE_BASE: pihole/debian-base:${DEBIAN_VERSION:-stretch} - PIHOLE_ARCH: amd64 - S6_ARCH: amd64 - armel: - image: pihole:${PIHOLE_VERSION}-armel-${DEBIAN_VERSION:-stretch} - build: - context: . - args: - <<: *common-args - PIHOLE_BASE: multiarch/debian-debootstrap:armel-${DEBIAN_VERSION:-stretch}-slim - PIHOLE_ARCH: armel - S6_ARCH: arm - armhf: - image: pihole:${PIHOLE_VERSION}-armhf-${DEBIAN_VERSION:-stretch} - build: - context: . - args: - <<: *common-args - PIHOLE_BASE: multiarch/debian-debootstrap:armhf-${DEBIAN_VERSION:-stretch}-slim - PIHOLE_ARCH: arm - S6_ARCH: arm - arm64: - image: pihole:${PIHOLE_VERSION}-arm64-${DEBIAN_VERSION:-stretch} - build: - context: . - args: - <<: *common-args - PIHOLE_BASE: multiarch/debian-debootstrap:arm64-${DEBIAN_VERSION:-stretch}-slim - PIHOLE_ARCH: arm64 - S6_ARCH: aarch64 diff --git a/docker-compose-jwilder-proxy.yml b/docker-compose-jwilder-proxy.yml deleted file mode 100644 index ee9c9e4d8..000000000 --- a/docker-compose-jwilder-proxy.yml +++ /dev/null @@ -1,60 +0,0 @@ -version: "3" - -# https://github.com/pi-hole/docker-pi-hole/blob/master/README.md - -services: - jwilder-proxy: - image: jwilder/nginx-proxy - ports: - - '80:80' - environment: - DEFAULT_HOST: pihole.yourDomain.lan - volumes: - - '/var/run/docker.sock:/tmp/docker.sock' - restart: always - - pihole: - image: pihole/pihole:latest - ports: - - '53:53/tcp' - - '53:53/udp' - - "67:67/udp" - - '8053:80/tcp' - - "443:443/tcp" - volumes: - - './etc-pihole/:/etc/pihole/' - - './etc-dnsmasq.d/:/etc/dnsmasq.d/' - # run `touch ./var-log/pihole.log` first unless you like errors - # - './var-log/pihole.log:/var/log/pihole.log' - # Recommended but not required (DHCP needs NET_ADMIN) - # https://github.com/pi-hole/docker-pi-hole#note-on-capabilities - cap_add: - - NET_ADMIN - environment: - ServerIP: 192.168.41.55 - PROXY_LOCATION: pihole - VIRTUAL_HOST: pihole.yourDomain.lan - VIRTUAL_PORT: 80 - extra_hosts: - # Resolve to nothing domains (terminate connection) - - 'nw2master.bioware.com nwn2.master.gamespy.com:0.0.0.0' - # LAN hostnames for other docker containers using jwilder - - 'yourDomain.lan:192.168.41.55' - - 'pihole pihole.yourDomain.lan:192.168.41.55' - - 'ghost ghost.yourDomain.lan:192.168.41.55' - - 'wordpress wordpress.yourDomain.lan:192.168.41.55' - restart: always - -# Another container you might want to have running through the proxy -# Note it also have ENV Vars like pihole and a host under pihole's extra_hosts -# ghost: -# image: fractalf/ghost -# ports: -# - '2368:2368/tcp' -# volumes: -# - '/etc/ghost/:/ghost-override' -# environment: -# PROXY_LOCATION: ghost -# VIRTUAL_HOST: ghost.yourDomain.lan -# VIRTUAL_PORT: 2368 -# restart: always diff --git a/docker-compose-traefik-proxy.md b/docker-compose-traefik-proxy.md deleted file mode 100644 index 48e7662b8..000000000 --- a/docker-compose-traefik-proxy.md +++ /dev/null @@ -1,111 +0,0 @@ -Please note the following about this [traefik](https://traefik.io/) example for Docker Pi-hole - -- Still requires standard Pi-hole setup steps, make sure you've gone through the [README](https://github.com/pi-hole/docker-pi-hole/blob/master/README.md) and understand how to setup Pi-hole without traefik first -- Update these things before using: - - set instances of `homedomain.lan` below to your home domain (typically set in your router) - - set your Pi-hole ENV WEBPASSWORD if you don't want a random admin pass -- This works for me, Your mileage may vary! -- For support, do your best to figure out traefik issues on your own: - - by looking at logs and traefik web interface on port 8080 - - also by searching the web and searching their forums/docker issues for similar question/problems -- Port 8053 is mapped directly to Pi-hole to serve as a back door without going through traefik -- There is some delay after starting your container before traefik forwards the HTTP traffic correctly, give it a minute - -``` -version: '3.8' - -services: - traefik: - container_name: traefik - domainname: homedomain.lan - - image: traefik:v2.2 - restart: unless-stopped - # Note I opt to whitelist certain apps for exposure to traefik instead of auto discovery - # use `--providers.docker.exposedbydefault=true` if you don't want to have to do this - command: - - "--providers.docker=true" - - "--providers.docker.network=discovery" - - "--providers.docker.exposedbydefault=false" - - "--api.insecure=true" - - "--api.dashboard=true" - - "--entrypoints.http.address=:80" - - "--log.level=DEBUG" - ports: - - "80:80" - - "443:443" - - "8080:8080" - volumes: - - /var/run/docker.sock:/var/run/docker.sock:ro - networks: - - default - - discovery - dns: - - 192.168.1.50 - - 192.168.1.1 - - pihole: - container_name: pihole - domainname: homedomain.lan - - image: pihole/pihole:latest - networks: - - discovery - ports: - - '0.0.0.0:53:53/tcp' - - '0.0.0.0:53:53/udp' - - '0.0.0.0:67:67/udp' - - '0.0.0.0:8053:80/tcp' - volumes: - - ./etc-pihole/:/etc/pihole/ - - ./etc-dnsmasqd/:/etc/dnsmasq.d/ - # run `touch ./pihole.log` first unless you like errors - # - ./pihole.log:/var/log/pihole.log - environment: - ServerIP: 192.168.1.50 - PROXY_LOCATION: pihole - VIRTUAL_HOST: pihole.homedomain.lan - VIRTUAL_PORT: 80 - TZ: 'America/Chicago' - # WEBPASSWORD: - restart: unless-stopped - labels: - # required when using --providers.docker.exposedbydefault=false - - "traefik.enable=true" - - "traefik.http.routers.pihole.rule=Host(`pihole.homedomain.lan`)" - - "traefik.http.routers.pihole.entrypoints=http" - - "traefik.docker.network=discovery" - - "traefik.http.services.pihole.loadbalancer.server.port=80" - -networks: - # Discovery is manually created to avoid forcing any order of docker-compose stack creation (`docker network create discovery`) - # allows other compose files to be seen by proxy - # Not required if you aren't using multiple docker-compose files... - discovery: - external: true -``` - -After running `docker-compose up -d` you should see this if you look at logs on traefik `docker-compose logs -f traefik` - -``` -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Provider event received {Status:health_status: healthy ID:3befdc0a97908de7a679109c8cf1d2a6bf8a78c9018faae697b7251f1ff38932 From:pihole/pihole:latest Type:container Action:health_status: healthy Actor:{ID:3befdc0a97908de7a679109c8cf1d2a6bf8a78c9018faae697b7251f1ff38932 Attributes:map[com.docker.compose.config-hash:b2785684a80ef0cc97b7c34697e239ad90ef68580f2cc286f183c95d966f6eae com.docker.compose.container-number:1 com.docker.compose.oneoff:False com.docker.compose.project:pi-hole com.docker.compose.project.config_files:docker-compose.yml com.docker.compose.project.working_dir:/opt/pi-hole com.docker.compose.service:pihole com.docker.compose.version:1.25.5 image:pihole/pihole:latest maintainer:adam@diginc.us name:pihole traefik.docker.network:discovery traefik.enable:true traefik.http.routers.pihole.entrypoints:http traefik.http.routers.pihole.rule:Host(`pihole.homedomain.lan`) traefik.http.services.pihole.loadbalancer.server.port:80 url:https://www.github.com/pi-hole/docker-pi-hole]} Scope:local Time:1589199915 TimeNano:1589199915511243989}" providerName=docker -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Filtering disabled container" providerName=docker container=traefik-pi-hole-c5847115be3d90c73a89824f80f1e6882bd8de60c50063f56be9d224192a14f4 -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Configuration received from provider docker: {\"http\":{\"routers\":{\"pihole\":{\"entryPoints\":[\"http\"],\"service\":\"pihole\",\"rule\":\"Host(`pihole.homedomain.lan`)\"}},\"services\":{\"pihole\":{\"loadBalancer\":{\"servers\":[{\"url\":\"http://172.18.0.3:80\"}],\"passHostHeader\":true}}}},\"tcp\":{},\"udp\":{}}" providerName=docker -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Creating middleware" middlewareType=Pipelining entryPointName=http routerName=pihole@docker serviceName=pihole middlewareName=pipelining -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Creating load-balancer" entryPointName=http routerName=pihole@docker serviceName=pihole -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Creating server 0 http://172.18.0.3:80" entryPointName=http serverName=0 routerName=pihole@docker serviceName=pihole -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Added outgoing tracing middleware pihole" entryPointName=http routerName=pihole@docker middlewareName=tracing middlewareType=TracingForwarder -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Creating middleware" entryPointName=http middlewareName=traefik-internal-recovery middlewareType=Recovery -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Added outgoing tracing middleware dashboard@internal" middlewareType=TracingForwarder entryPointName=traefik routerName=dashboard@internal middlewareName=tracing -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Creating middleware" entryPointName=traefik routerName=dashboard@internal middlewareName=dashboard_stripprefix@internal middlewareType=StripPrefix -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Adding tracing to middleware" entryPointName=traefik routerName=dashboard@internal middlewareName=dashboard_stripprefix@internal -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Creating middleware" middlewareName=dashboard_redirect@internal middlewareType=RedirectRegex entryPointName=traefik routerName=dashboard@internal -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Setting up redirection from ^(http:\\/\\/[^:\\/]+(:\\d+)?)\\/$ to ${1}/dashboard/" middlewareName=dashboard_redirect@internal middlewareType=RedirectRegex entryPointName=traefik routerName=dashboard@internal -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Adding tracing to middleware" routerName=dashboard@internal middlewareName=dashboard_redirect@internal entryPointName=traefik -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Added outgoing tracing middleware api@internal" entryPointName=traefik routerName=api@internal middlewareName=tracing middlewareType=TracingForwarder -traefik | time="2020-05-11T12:25:15Z" level=debug msg="Creating middleware" entryPointName=traefik middlewareName=traefik-internal-recovery middlewareType=Recovery -traefik | time="2020-05-11T12:25:15Z" level=debug msg="No default certificate, generating one" - -``` - -Also your port 8080 should list the Route/Rule for pihole and backend-pihole container. diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 031eb493f..000000000 --- a/docker-compose.yml +++ /dev/null @@ -1,29 +0,0 @@ -version: "3" - -# https://github.com/pi-hole/docker-pi-hole/blob/master/README.md - -services: - pihole: - container_name: pihole - image: pihole/pihole:latest - # For DHCP it is recommended to remove these ports and instead add: network_mode: "host" - ports: - - "53:53/tcp" - - "53:53/udp" - - "67:67/udp" - - "80:80/tcp" - - "443:443/tcp" - environment: - TZ: 'America/Chicago' - # WEBPASSWORD: 'set a secure password here or it will be random' - # Volumes store your data between container upgrades - volumes: - - './etc-pihole/:/etc/pihole/' - - './etc-dnsmasq.d/:/etc/dnsmasq.d/' - # run `touch ./var-log/pihole.log` first unless you like errors - # - './var-log/pihole.log:/var/log/pihole.log' - # Recommended but not required (DHCP needs NET_ADMIN) - # https://github.com/pi-hole/docker-pi-hole#note-on-capabilities - cap_add: - - NET_ADMIN - restart: unless-stopped diff --git a/docker-pi-hole.cron b/docker-pi-hole.cron deleted file mode 100644 index 2e34964af..000000000 --- a/docker-pi-hole.cron +++ /dev/null @@ -1,31 +0,0 @@ -# Pi-hole: A black hole for Internet advertisements -# (c) 2015, 2016 by Jacob Salmela -# Network-wide ad blocking via your Raspberry Pi -# http://pi-hole.net -# Updates ad sources every week -# -# Pi-hole is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# This file is under source-control of the Pi-hole installation and update -# scripts, any changes made to this file will be overwritten when the softare -# is updated or re-installed. Please make any changes to the appropriate crontab -# or other cron file snippets. - -# Your container name goes here: -DOCKER_NAME=pihole -PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin - -# Pi-hole: Update the ad sources once a week on Sunday at 01:59 -# Download any updates from the adlists -59 1 * * 7 root PATH="$PATH:/usr/local/bin/" docker exec $DOCKER_NAME pihole updateGravity > /dev/null - -# Update docker-pi-hole by pulling the latest docker image ane re-creating your container. -# pihole software update commands are unsupported in docker! -#30 2 * * 7 root PATH="$PATH:/usr/local/bin/" docker exec $DOCKER_NAME pihole updatePihole > /dev/null - -# Pi-hole: Flush the log daily at 00:00 so it doesn't get out of control -# Stats will be viewable in the Web interface thanks to the cron job above -00 00 * * * root PATH="$PATH:/usr/local/bin/" docker exec $DOCKER_NAME pihole flush > /dev/null diff --git a/docker_run.sh b/docker_run.sh deleted file mode 100755 index cc0cd6ed7..000000000 --- a/docker_run.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -# https://github.com/pi-hole/docker-pi-hole/blob/master/README.md - -PIHOLE_BASE="${PIHOLE_BASE:-$(pwd)}" -[[ -d "$PIHOLE_BASE" ]] || mkdir -p "$PIHOLE_BASE" || { echo "Couldn't create storage directory: $PIHOLE_BASE"; exit 1; } - -# Note: ServerIP should be replaced with your external ip. -docker run -d \ - --name pihole \ - -p 53:53/tcp -p 53:53/udp \ - -p 80:80 \ - -p 443:443 \ - -e TZ="America/Chicago" \ - -v "${PIHOLE_BASE}/etc-pihole/:/etc/pihole/" \ - -v "${PIHOLE_BASE}/etc-dnsmasq.d/:/etc/dnsmasq.d/" \ - --dns=127.0.0.1 --dns=1.1.1.1 \ - --restart=unless-stopped \ - --hostname pi.hole \ - -e VIRTUAL_HOST="pi.hole" \ - -e PROXY_LOCATION="pi.hole" \ - -e ServerIP="127.0.0.1" \ - pihole/pihole:latest - -printf 'Starting up pihole container ' -for i in $(seq 1 20); do - if [ "$(docker inspect -f "{{.State.Health.Status}}" pihole)" == "healthy" ] ; then - printf ' OK' - echo -e "\n$(docker logs pihole 2> /dev/null | grep 'password:') for your pi-hole: https://${IP}/admin/" - exit 0 - else - sleep 3 - printf '.' - fi - - if [ $i -eq 20 ] ; then - echo -e "\nTimed out waiting for Pi-hole start, consult check your container logs for more info (\`docker logs pihole\`)" - exit 1 - fi -done; diff --git a/doco-example.yml b/doco-example.yml deleted file mode 120000 index 412c25747..000000000 --- a/doco-example.yml +++ /dev/null @@ -1 +0,0 @@ -docker-compose.yml \ No newline at end of file diff --git a/gh-actions-deploy.sh b/gh-actions-deploy.sh deleted file mode 100755 index 51e324048..000000000 --- a/gh-actions-deploy.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env bash -set -ex -# Github Actions Job for merging/deploying all architectures (post-test passing) -. gh-actions-vars.sh - -function annotate() { - local base=$1 - local image=$2 - local arch=$3 - local annotate_flags="${annotate_map[$arch]}" - - $dry docker manifest annotate ${base} ${image} --os linux ${annotate_flags} -} - -function create_manifest() { - local debian_version=$1 - local images=() - cd "${debian_version}" - - for arch in *; do - arch_image=$(cat "${arch}") - docker pull "${arch_image}" - images+=("${arch_image}") - done - - multiarch_images=$(get_multiarch_images) - for docker_tag in ${multiarch_images}; do - docker manifest create ${docker_tag} ${images[*]} - for arch in *; do - arch_image=$(cat "${arch}") - annotate "${docker_tag}" "${arch_image}" "${arch}" - done - - docker manifest inspect "${docker_tag}" - docker manifest push --purge "${docker_tag}" - done - cd ../ -} - -function get_multiarch_images() { - multiarch_images="${MULTIARCH_IMAGE}-${debian_version}" - if [[ "${debian_version}" == "${DEFAULT_DEBIAN_VERSION}" ]] ; then - # default debian version gets a non-debian tag as well as latest tag - multiarch_images="${multiarch_images} ${MULTIARCH_IMAGE} ${LATEST_IMAGE}" - fi - echo "${multiarch_images}" -} - - -# Keep in sync with build.yml names -declare -A annotate_map=( - ["amd64"]="--arch amd64" - ["armel"]="--arch arm --variant v6" - ["armhf"]="--arch arm --variant v7" - ["arm64"]="--arch arm64 --variant v8" -) - -mkdir -p ~/.docker -export DOCKER_CLI_EXPERIMENTAL='enabled' -echo "{}" | jq '.experimental="enabled"' | tee ~/.docker/config.json -# I tried to keep this login command outside of this script -# but for some reason auth would always fail in Github Actions. -# I think setting up a cred store would fix it -# https://docs.docker.com/engine/reference/commandline/login/#credentials-store -echo "${DOCKERHUB_PASS}" | docker login --username="${DOCKERHUB_USER}" --password-stdin -docker info - -ls -lat ./.gh-workspace/ -cd .gh-workspace - -for debian_version in *; do - create_manifest "${debian_version}" -done diff --git a/gh-actions-test.sh b/gh-actions-test.sh deleted file mode 100755 index 691ab16bc..000000000 --- a/gh-actions-test.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash -set -ex - -# Script ran by Github actions for tests -# -# @environment ${ARCH} The architecture to build. Example: amd64. -# @environment ${DEBIAN_VERSION} Debian version to build. ('buster' or 'stretch'). -# @environment ${ARCH_IMAGE} What the Docker Hub Image should be tagged as. Example: pihole/pihole:master-amd64-stretch - -# setup qemu/variables -docker run --rm --privileged multiarch/qemu-user-static:register --reset > /dev/null -. gh-actions-vars.sh - -if [[ "$1" == "enter" ]]; then - enter="-it --entrypoint=sh" -fi - -# generate and build dockerfile -docker build --tag image_pipenv --file Dockerfile_build . -docker run --rm \ - --volume /var/run/docker.sock:/var/run/docker.sock \ - --volume "$(pwd):/$(pwd)" \ - --workdir "$(pwd)" \ - --env PIPENV_CACHE_DIR="$(pwd)/.pipenv" \ - --env ARCH="${ARCH}" \ - --env ARCH_IMAGE="${ARCH_IMAGE}" \ - --env DEBIAN_VERSION="${DEBIAN_VERSION}" \ - ${enter} image_pipenv - -mkdir -p ".gh-workspace/${DEBIAN_VERSION}/" -echo "${ARCH_IMAGE}" | tee "./.gh-workspace/${DEBIAN_VERSION}/${ARCH}" diff --git a/gh-actions-vars.sh b/gh-actions-vars.sh deleted file mode 100755 index 636169837..000000000 --- a/gh-actions-vars.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -set -a - -# @environment ${ARCH} The architecture to build. Defaults to 'amd64'. -# @environment ${DEBIAN_VERSION} Debian version to build. Defaults to 'stretch'. -# @environment ${DOCKER_HUB_REPO} The docker hub repo to tag images for. Defaults to 'pihole'. -# @environment ${DOCKER_HUB_IMAGE_NAME} The name of the resulting image. Defaults to 'pihole'. - -GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD | sed "s/\//-/g") -GIT_TAG=$(git describe --tags --exact-match 2> /dev/null || true) - -DEFAULT_DEBIAN_VERSION="stretch" - -if [[ -z "${ARCH}" ]]; then - ARCH="amd64" - echo "Defaulting arch to ${ARCH}" -fi - -if [[ -z "${DEBIAN_VERSION}" ]]; then - DEBIAN_VERSION="${DEFAULT_DEBIAN_VERSION}" - echo "Defaulting DEBIAN_VERSION to ${DEBIAN_VERSION}" -fi - -if [[ -z "${DOCKER_HUB_REPO}" ]]; then - DOCKER_HUB_REPO="pihole" - echo "Defaulting DOCKER_HUB_REPO to ${DOCKER_HUB_REPO}" -fi - -if [[ -z "${DOCKER_HUB_IMAGE_NAME}" ]]; then - DOCKER_HUB_IMAGE_NAME="pihole" - echo "Defaulting DOCKER_HUB_IMAGE_NAME to ${DOCKER_HUB_IMAGE_NAME}" -fi - -BASE_IMAGE="${DOCKER_HUB_REPO}/${DOCKER_HUB_IMAGE_NAME}" - -GIT_TAG="${GIT_TAG:-$GIT_BRANCH}" -ARCH_IMAGE="${BASE_IMAGE}:${GIT_TAG}-${ARCH}-${DEBIAN_VERSION}" -MULTIARCH_IMAGE="${BASE_IMAGE}:${GIT_TAG}" - - - -# To get latest released, cut a release on https://github.com/pi-hole/docker-pi-hole/releases (manually gated for quality control) -latest_tag='UNKNOWN' -if ! latest_tag=$(curl -sI https://github.com/pi-hole/docker-pi-hole/releases/latest | grep --color=never -i Location | awk -F / '{print $NF}' | tr -d '[:cntrl:]'); then - print "Failed to retrieve latest docker-pi-hole release metadata" -else - if [[ "${GIT_TAG}" == "${latest_tag}" ]] ; then - LATEST_IMAGE="${BASE_IMAGE}:latest" - fi -fi - - -set +a diff --git a/install.sh b/install.sh deleted file mode 100644 index 7a10d345d..000000000 --- a/install.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash -ex - -mkdir -p /etc/pihole/ -mkdir -p /var/run/pihole -# Production tags with valid web footers -export CORE_VERSION="$(cat /etc/docker-pi-hole-version)" -export WEB_VERSION="v5.1.1" - -# Only use for pre-production / testing -export CHECKOUT_BRANCHES=false -# Search for release/* branch naming convention for custom checkouts -if [[ "$CORE_VERSION" == *"release/"* ]] ; then - CHECKOUT_BRANCHES=true -fi - -apt-get update -apt-get install --no-install-recommends -y curl procps ca-certificates -# curl in armhf-buster's image has SSL issues. Running c_rehash fixes it. -# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=923479 -c_rehash -ln -s `which echo` /usr/local/bin/whiptail -curl -L -s $S6OVERLAY_RELEASE | tar xvzf - -C / -mv /init /s6-init - -# debconf-apt-progress seems to hang so get rid of it too -which debconf-apt-progress -mv "$(which debconf-apt-progress)" /bin/no_debconf-apt-progress - -# Get the install functions -curl https://raw.githubusercontent.com/pi-hole/pi-hole/${CORE_VERSION}/automated%20install/basic-install.sh > "$PIHOLE_INSTALL" -PH_TEST=true . "${PIHOLE_INSTALL}" - -# Preseed variables to assist with using --unattended install -{ - echo "PIHOLE_INTERFACE=eth0" - echo "IPV4_ADDRESS=0.0.0.0" - echo "IPV6_ADDRESS=0:0:0:0:0:0" - echo "PIHOLE_DNS_1=8.8.8.8" - echo "PIHOLE_DNS_2=8.8.4.4" - echo "QUERY_LOGGING=true" - echo "INSTALL_WEB_SERVER=true" - echo "INSTALL_WEB_INTERFACE=true" - echo "LIGHTTPD_ENABLED=true" -}>> "${setupVars}" -source $setupVars - -export USER=pihole -distro_check - -# fix permission denied to resolvconf post-inst /etc/resolv.conf moby/moby issue #1297 -apt-get -y install debconf-utils -echo resolvconf resolvconf/linkify-resolvconf boolean false | debconf-set-selections - -ln -s /bin/true /usr/local/bin/service -bash -ex "./${PIHOLE_INSTALL}" --unattended -rm /usr/local/bin/service - -# IPv6 support for nc openbsd better than traditional -apt-get install -y --force-yes netcat-openbsd - -fetch_release_metadata() { - local directory="$1" - local version="$2" - pushd "$directory" - git fetch -t - git remote set-branches origin '*' - git fetch --depth 10 - git checkout master - git reset --hard "$version" - popd -} - -if [[ $CHECKOUT_BRANCHES == true ]] ; then - ln -s /bin/true /usr/local/bin/service - ln -s /bin/true /usr/local/bin/update-rc.d - echo "${CORE_VERSION}" | sudo tee /etc/pihole/ftlbranch - echo y | bash -x pihole checkout core ${CORE_VERSION} - echo y | bash -x pihole checkout web ${WEB_VERSION} - # echo y | bash -x pihole checkout ftl ${CORE_VERSION} - # If the v is forgotten: ${CORE_VERSION/v/} - unlink /usr/local/bin/service - unlink /usr/local/bin/update-rc.d -else - # Reset to our tags so version numbers get detected correctly - fetch_release_metadata "${PI_HOLE_LOCAL_REPO}" "${CORE_VERSION}" - fetch_release_metadata "${webInterfaceDir}" "${WEB_VERSION}" -fi - -# FTL Armel fix not in prod yet -# Remove once https://github.com/pi-hole/pi-hole/commit/3fbb0ac8dde14b8edc1982ae3a2a021f3cf68477 is in master -if [[ "$ARCH" == 'armel' ]]; then - curl -o /usr/bin/pihole-FTL https://ftl.pi-hole.net/development/pihole-FTL-armel-native -fi - -sed -i 's/readonly //g' /opt/pihole/webpage.sh -sed -i '/^WEBPASSWORD/d' /etc/pihole/setupVars.conf - -# Replace the call to `updatePiholeFunc` in arg parse with new `unsupportedFunc` -sed -i $'s/helpFunc() {/unsupportedFunc() {\\\n echo "Function not supported in Docker images"\\\n exit 0\\\n}\\\n\\\nhelpFunc() {/g' /usr/local/bin/pihole -sed -i $'s/)\s*updatePiholeFunc/) unsupportedFunc/g' /usr/local/bin/pihole - -touch /.piholeFirstBoot - -echo 'Docker install successful' diff --git a/pihole.service b/pihole.service deleted file mode 100644 index b4cbad770..000000000 --- a/pihole.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=pihole -Requires=docker.service multi-user.target -After=docker.service network-online.target dhcpd.service - -[Service] -Restart=always -ExecStart=/usr/bin/docker start -a pihole -ExecStop=/usr/bin/docker stop -t 2 pihole - -[Install] -WantedBy=multi-user.target - diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index db310133a..000000000 --- a/requirements.txt +++ /dev/null @@ -1,53 +0,0 @@ -apipkg==1.5 -atomicwrites==1.3.0 -attrs==19.3.0 -backports.shutil-get-terminal-size==1.0.0 -backports.ssl-match-hostname==3.7.0.1 -bcrypt==3.1.7 -cached-property==1.5.1 -certifi==2019.11.28 -cffi==1.13.2 -chardet==3.0.4 -configparser==4.0.2 -contextlib2==0.6.0.post1 -coverage==5.0.1 -cryptography==2.8 -docker==4.1.0 -dockerpty==0.4.1 -docopt==0.6.2 -enum34==1.1.6 -execnet==1.7.1 -filelock==3.0.12 -funcsigs==1.0.2 -idna==2.8 -importlib-metadata==1.3.0 -ipaddress==1.0.23 -Jinja2==2.10.3 -jsonschema==3.2.0 -MarkupSafe==1.1.1 -more-itertools==5.0.0 -packaging==19.2 -pathlib2==2.3.5 -pluggy==0.13.1 -py==1.8.1 -pycparser==2.19 -pyparsing==2.4.6 -pyrsistent==0.15.6 -pytest==4.6.8 -pytest-cov==2.8.1 -pytest-forked==1.1.3 -pytest-xdist==1.31.0 -PyYAML==5.2 -requests==2.22.0 -scandir==1.10.0 -six==1.13.0 -subprocess32==3.5.4 -testinfra==3.3.0 -texttable==1.6.2 -toml==0.10.0 -tox==3.14.3 -urllib3==1.25.7 -virtualenv==16.7.9 -wcwidth==0.1.7 -websocket-client==0.57.0 -zipp==0.6.0 diff --git a/root/etc/cont-init.d/10-permissions.sh b/root/etc/cont-init.d/10-permissions.sh new file mode 100755 index 000000000..89e08a16e --- /dev/null +++ b/root/etc/cont-init.d/10-permissions.sh @@ -0,0 +1,46 @@ +#!/bin/bash +set -eu + +# reown [NAME] [NEW ID] +function reown { + kind="$1" + name="$2" + new="$3" + + case "$kind" in + user) + old=$(getent passwd "$name" | cut -d: -f3) + own="chown" + mod="usermod -o -u" + ;; + group) + old=$(getent group "$name" | cut -d: -f3) + own="chgrp" + mod="groupmod -o -g" + ;; + *) + echo "bad argument" >/dev/stderr + exit 1 + ;; + esac + + if [ $old -eq $new ]; then + echo "not changing id of $kind $name, it already matches host" + return + fi + + echo "changing id of $kind $name from $old to $new" + $mod $new $name + + for path in $(find / "-$kind" "$old" 2>/dev/null); do + echo "fixing ownership of $path" + $own "$name" "$path" + done +} + +# TODO: We shouldn't assign these accounts the same UIDs +[[ -n ${WWWDATA_UID+x} ]] && reown user www-data ${WWWDATA_UID} +[[ -n ${WWWDATA_GID+x} ]] && reown group www-data ${WWWDATA_GID} + +[[ -n ${PIHOLE_UID+x} ]] && reown user pihole ${PIHOLE_UID} +[[ -n ${PIHOLE_GID+x} ]] && reown group pihole ${PIHOLE_GID} diff --git a/root/etc/cont-init.d/20-startup.pl b/root/etc/cont-init.d/20-startup.pl new file mode 100755 index 000000000..56e99f364 --- /dev/null +++ b/root/etc/cont-init.d/20-startup.pl @@ -0,0 +1,807 @@ +#!/usr/bin/with-contenv perl + +use 5.010; +use strict; +use warnings; +no warnings "experimental"; + +use Socket qw(AF_INET AF_INET6 SOCK_STREAM); +use Data::Dumper; +use File::Find qw(find); +use File::Temp qw(tempfile); +use Carp qw(croak); + +############################################################################### + +{ package Cvar; + + sub env ($$;\%) { + my ($class, $name, %env) = @_; + %env = %ENV unless %env; + + return bless { + _type => "env", + _name => $name, + _env => \%env + }, $class; + } + + sub lit ($$$) { + my ($class, $val) = @_; + + return bless { + _type => "lit", + _val => $val + }, $class; + } + + sub name { + my $self = shift; + return ($self->{_type} eq "env") ? + $self->{_name} : + undef; + } + + sub val { + my $self = shift; + return ($self->{_type} eq "env") ? + $self->{_env}{$self->{_name}} : + $self->{_val}; + } + + sub set { + my $self = shift; + # TODO + } + + sub delete { + my $self = shift; + return ($self->{_type} eq "env") ? + delete $self->{_env}{$self->{_name}} : + delete $self->{_val}; + } + + sub exists { + my $self = shift; + return ($self->{_type} eq "env") ? + exists $self->{_env}{$self->{_name}} : + exists $self->{_val}; + } +} + +############################################################################### + +my %FILES; +my $PIHOLE_CONF = "/etc/pihole/setupVars.conf"; +my $FTL_CONF = "/etc/pihole/pihole-FTL.conf"; +my $DNSMASQ_CONF = "/etc/dnsmasq.d/01-pihole.conf"; + +sub env ($;\%) { return Cvar->env(@_); } +sub lit ($) { return Cvar->lit(@_); } + +sub configure ($$$$@); +sub configure_admin_email ($); +sub configure_blocklists (); +sub configure_dhcp (); +sub configure_dns_defaults (); +sub configure_dns_hostname ($$@); +sub configure_dns_fqdn ($); +sub configure_dns_priv ($); +sub configure_dns_dnssec ($); +sub configure_dns_forwarding ($$$$); +sub configure_dns_interface ($$); +sub configure_dns_upstream ($@); +sub configure_dns_user ($); +sub configure_ftl ($$$@); +sub configure_network (\%$$); +sub configure_pihole ($$$@); +sub configure_temperature ($); +sub configure_web_address ($$$); +sub configure_web_fastcgi ($$); +sub configure_web_password ($$); +sub configure_whitelists (); +sub do_or_die (@); +sub explain (@); +sub fix_capabilities ($); +sub fix_permissions ($); +sub mask ($$); +sub print_env(\%); +sub read_conf ($); +sub read_file ($); +sub sed (&$@); +sub set_defaults (\%); +sub test_configuration ($); +sub trim ($); +sub validate ($$$@); +sub validate_ip ($); +sub write_conf ($@); +sub write_file ($@); + +############################################################################### + +sub configure ($$$$@) { + my $path = shift; + my $name = shift; # Variable name written to output + my $reqd = shift; + my $cvar = shift; + my @allow = @_; + + validate($name, $reqd, $cvar, @allow); + + my @conf = grep {!/^$name=/} read_conf($path); + push @conf, "$name=" . ($cvar->val() // ""); + chomp @conf; + + write_conf($path, @conf); +} + +sub configure_admin_email ($) { + my ($email) = @_; + do_or_die("pihole", "-a", "-e", $email->val()) if $email->exists(); +} + +sub configure_blocklists () { + my $path = "/etc/pihole/adlists.list"; + return if -f $path; + + my @items = (); + push @items, "https://dbl.oisd.nl/\n"; + write_conf($path, @items); +} + +sub configure_dhcp() { +} + +sub configure_dns_defaults () { + do_or_die("cp", "-f", "/etc/.pihole/advanced/01-pihole.conf", $DNSMASQ_CONF); +} + +sub configure_dns_hostname ($$@) { + my $ipv4 = shift; + my $ipv6 = shift; + my @names = @_; + + my @dnsmasq = read_conf($DNSMASQ_CONF); + @dnsmasq = grep {!/local\.list/} @dnsmasq; + + write_conf($DNSMASQ_CONF, @dnsmasq); +} + +sub configure_dns_fqdn ($) { + my ($fqdn) = @_; + + configure_pihole("DNS_FQDN_REQUIRED", 0, $fqdn, "true", "false"); + + my @dnsmasq = grep {!/^domain-needeed/} read_conf($DNSMASQ_CONF); + push @dnsmasq, "domain-needed" + unless ($fqdn->exists() and $fqdn->val() eq "false"); + + write_conf($DNSMASQ_CONF, @dnsmasq); +} + +sub configure_dns_priv ($) { + my ($priv) = @_; + + configure_pihole("DNS_BOGUS_PRIV", 0, $priv, "true", "false"); + + my @dnsmasq = grep {!/^bogus-priv/} read_conf($DNSMASQ_CONF); + push @dnsmasq, "bogus-priv" + unless ($priv->exists() and $priv->val() eq "false"); + + write_conf($DNSMASQ_CONF, @dnsmasq); +} + +sub configure_dns_dnssec ($) { + my ($dnssec) = @_; + + configure_pihole("DNSSEC", 0, $dnssec, "true", "false"); + + my @dnsmasq = read_conf($DNSMASQ_CONF); + @dnsmasq = grep {!/^dnssec/} @dnsmasq; + @dnsmasq = grep {!/^trust-anchor=/} @dnsmasq; + + if ($dnssec->exists() and $dnssec->val() eq "true") { + push @dnsmasq, "dnssec"; + push @dnsmasq, "trust-anchor=.,20326,8,2,E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D"; + } + + write_conf($DNSMASQ_CONF, @dnsmasq); +} + +sub configure_dns_forwarding ($$$$) { + my ($enable, $upstream, $network, $domain) = @_; + + my @pihole = read_conf($PIHOLE_CONF); + @pihole = grep {!/^REV_SERVER/} @pihole; + @pihole = grep {!/^CONDITIONAL/} @pihole; + + my @dnsmasq = read_conf($DNSMASQ_CONF); + @dnsmasq = grep {!/^rev-server=/} @dnsmasq; + @dnsmasq = grep {!/^server=/} @dnsmasq; + + if ($enable->exists() and $enable->val() eq "true") { + validate("REV_SERVER_TARGET", 1, $upstream);#, \&validate_ip); TODO + validate("REV_SERVER_CIDR", 1, $network);#, \&validate_cidr); TODO + + push @pihole, "REV_SERVER=true"; + push @pihole, "REV_SERVER_CIDR=".$network->val(); + push @pihole, "REV_SERVER_TARGET=".$upstream->val(); + push @pihole, "REV_SERVER_DOMAIN=".($domain->val() // ""); + + push @dnsmasq, sprintf("rev-server=%s,%s", $network->val(), $upstream->val()); + push @dnsmasq, sprintf("server=/%s/%s", $domain->val(), $upstream->val()) + if ($domain->exists() and $domain->val()); + } + + write_conf($DNSMASQ_CONF, @dnsmasq); + write_conf($PIHOLE_CONF, @pihole); +} + +sub configure_dns_interface ($$) { + my ($iface, $listen) = @_; + + configure_pihole("PIHOLE_INTERFACE", 0, $iface); + configure_pihole("DNSMASQ_LISTENING", 0, $listen, "all", "local", "iface"); + + my @dnsmasq = read_conf($DNSMASQ_CONF); + @dnsmasq = grep {!/^interface=/} @dnsmasq; + @dnsmasq = grep {!/^local-service/} @dnsmasq; + @dnsmasq = grep {!/^except-interface=/} @dnsmasq; + + given ($listen->val() // "all") { + when ("all") { push @dnsmasq, "except-interface=nonexisting"; } + when ("local") { push @dnsmasq, "local-service"; } + when ("iface") { + $iface->exists() or croak(sprintf("%s must be set when %s is '%s'", + $iface->name(), $listen->name(), $listen->val())); + push @dnsmasq, "interface=".$iface->val(); + } + } + + write_conf($DNSMASQ_CONF, @dnsmasq); +} + +sub configure_dns_upstream ($@) { + my @dnsmasq = grep {!/^server=/} read_conf($DNSMASQ_CONF); + my @pihole = grep {!/PIHOLE_DNS_\d/} read_conf($PIHOLE_CONF); + my $count = 0; + + + foreach $_ (@_) { + next unless $_->exists(); + # validate_ip($_); TODO Need to remove optional port number + + push @pihole, sprintf("PIHOLE_DNS_%s=%s", ++$count, $_->val()); + push @dnsmasq, "server=".$_->val(); + $count ++; + } + + # No values given (or all were empty) + validate("PIHOLE_DNS_1", 1, $_[0]) unless $count; + + write_conf($PIHOLE_CONF, @pihole); + write_conf($DNSMASQ_CONF, @dnsmasq); +} + +sub configure_dns_user ($) { + my ($dns_user) = @_; + + # Erase any user= directives in config snippet files + find(sub { + write_file($_, grep {!/^user=/} read_file($_)) if -f; + }, "/etc/dnsmasq.d"); + + configure("/etc/dnsmasq.conf", "user", 1, $dns_user); +} + +sub configure_ftl ($$$@) { + return &configure($FTL_CONF, @_); +} + +sub configure_network (\%$$) { + my ($env, $ipv4, $ipv6) = @_; + my %env = %{$env}; + my $sock; + + if ($ipv4->exists() and $ipv4->val() eq "auto") { + my $output = `ip route get 1.1.1.1 2>/dev/null`; + + if ($? == 0) { + my ($gw) = $output =~ m/via\s+([^\s]+)/; + my ($if) = $output =~ m/dev\s+([^\s]+)/; + my ($ip) = $output =~ m/src\s+([^\s]+)/; + + say sprintf("Detected %s (auto): %s", $ipv4->name(), $ip); + $ipv4->set($ip); + } else { + say sprintf("Failed to auto-detect %s: %s", $ipv4->name, $output); + $ipv4->delete(); + } + } + + if ($ipv6->exists() and $ipv6->val() eq "auto") { + my $output = `ip route get 2606:4700:4700::1001 2>/dev/null`; + + if ($? == 0) { + my ($gw) = $output =~ m/via\s+([^\s]+)/; + my ($if) = $output =~ m/dev\s+([^\s]+)/; + my ($ip) = $output =~ m/src\s+([^\s]+)/; + + # TODO sanitize + my @output = `ip -6 addr show dev '$if'` + or explain("ip -6 addr show dev '$if'"); + + my @gua = (); # global unique addresses + my @ula = (); # unique local addresses + my @ll = (); # link local addresses + + foreach (grep {/inet6/} @output) { + my ($ip) = m{inet6\s+([^/])+/}; + my ($chazwazza) = $ip =~ /^([^:]+):/; + $chazwazza = hex($chazwazza); + + push @ula, $ip if (($chazwazza & mask( 7, 16)) == 0xfc00); + push @gua, $ip if (($chazwazza & mask( 3, 16)) == 0x2000); + push @ll, $ip if (($chazwazza & mask(10, 16)) == 0xfe80); + } + + Dumper[@gua]; + Dumper[@ula]; + Dumper[@ll]; + + # TODO + # say sprintf("Detected %s (auto): %s", $ipv6->name(), $ip); + # $ipv6->set($ip); + } else { + say sprintf("Failed to auto-detect %s: %s", $ipv6->name(), $output); + $ipv6->delete(); + } + } + + if ($ipv4->exists() and $ipv4->val() eq "0.0.0.0") { + # This is interpreted as "listen on any IPv4 address, if one exists", so + # we first need to check if one exists. + if (!socket($sock, AF_INET, SOCK_STREAM, 0) or !`ip -4 addr`) { + say sprintf("Determined IPv4 is not available; deleting %s='%s'", + $ipv4->name(), $ipv4->val()); + $ipv4->delete(); + } + } + + if ($ipv6->exists() and $ipv6->val() eq "::") { + # This is interpreted as "listen on any IPv6 address, if one exists", so + # we first need to check if one exists. + if (!socket($sock, AF_INET6, SOCK_STREAM, 0) or !`ip -6 addr`) { + say sprintf("Determined IPv6 is not available; deleting %s='%s'", + $ipv6->name(), $ipv6->val()); + $ipv6->delete(); + } + } + + croak sprintf("Neither %s nor %s are configured", $ipv4->name, $ipv6->name) + unless ($ipv4->exists() or $ipv6->exists()); + + validate_ip($ipv4); + validate_ip($ipv6); + + configure_pihole("IPV4_ADDRESS", 0, $ipv4); + configure_pihole("IPV6_ADDRESS", 0, $ipv6); +} + +# Change an option in setupVars.conf +sub configure_pihole ($$$@) { + return &configure($PIHOLE_CONF, @_); +} + +sub configure_temperature ($) { + my ($unit) = @_; + configure_pihole("PIHOLE_TEMPERATURE_UNIT", 0, $unit, "K", "F", "C"); +} + +sub configure_web_address ($$$) { + my ($ipv4, $ipv6, $port) = @_; + my $path = "/etc/lighttpd/lighttpd.conf"; + my @lighttpd = read_conf($path); + + validate("WEB_PORT", 1, $port); + croak sprintf("%s='%s' is invalid, must be 1-65535", $port->name(), $port->val()) + unless ($port->val() =~ /\A\d+\z/ and $port->val() > 0 and $port->val() <= 65535); + + @lighttpd = grep {!/^\$SERVER\["socket"\]/} @lighttpd; + @lighttpd = grep {!/^server\.port\s*=/} @lighttpd; + @lighttpd = grep {!/^server\.bind\s*=/} @lighttpd; + @lighttpd = grep {!/use-ipv6/} @lighttpd; + + push @lighttpd, "server.port = ".$port->val(); + push @lighttpd, 'server.use-ipv6 = "enable"' if $ipv6->exists(); + + if ($ipv4->exists() and $ipv4->val() eq "0.0.0.0") { + push @lighttpd, 'server.bind = "0.0.0.0"'; + } else { + push @lighttpd, 'server.bind = "127.0.0.1"'; + push @lighttpd, sprintf('$SERVER["socket"] == "%s:%s" { }', $ipv4->val(), $port->val()) if $ipv4->exists(); + push @lighttpd, sprintf('$SERVER["socket"] == "[%s]:%s" { }', $ipv6->val(), $port->val()) if $ipv6->exists(); + } + + write_conf($path, @lighttpd); +} + +sub configure_web_fastcgi ($$) { + my ($ipv4, $host) = @_; + my $path = "/etc/lighttpd/conf-enabled/15-fastcgi-php.conf"; + my @fastcgi = read_conf($path); + + @fastcgi = grep {!/^\s*"PHP_ERROR_LOG"/ } @fastcgi; + @fastcgi = grep {!/^\s*"VIRTUAL_HOST"/ } @fastcgi; + @fastcgi = grep {!/^\s*"ServerIP"/ } @fastcgi; + + my @env; + push @env, "\t\t\"bin-environment\" => ("; + push @env, sprintf('%s"VIRTUAL_HOST" => "%s",', "\t\t\t", $host->val()); + push @env, sprintf('%s"ServerIP" => "%s",', "\t\t\t", $ipv4->val()) if $ipv4->exists(); + push @env, sprintf('%s"PHP_ERROR_LOG" => "%s",', "\t\t\t", "/var/log/lighttpd/error.log"); + + @fastcgi = sed {/"bin-environment"/} \@env, @fastcgi; + write_conf($path, @fastcgi); +} + +sub configure_web_password ($$) { + my ($pw, $pwfile) = @_; + + if ($pwfile->exists() and -f $pwfile->val() and -s $pwfile->val()) { + $pw = lit(read_file($pwfile)); + chomp $pw; + } + + if (!$pw->exists()) { + $pw = lit(trim(`openssl rand -base64 20`)); + say "Generated new random web admin password: ".$pw->val(); + } + + do_or_die("pihole", "-a", "-p", $pw); +} + +# TODO this file isn't used (yet) +sub configure_whitelists () { + my $path = "/etc/pihole/whitelists.list"; + return if -f $path; + + my @items = (); + push @items, "https://github.com/anudeepND/whitelist/blob/master/domains/optional-list.txt"; + push @items, "https://github.com/anudeepND/whitelist/blob/master/domains/referral-sites.txt"; + push @items, "https://github.com/anudeepND/whitelist/blob/master/domains/whitelist.txt"; + write_conf($path, join("\n", @items)."\n"); +} + +sub do_or_die (@) { + say "+ ".join(" ", @_) if exists $ENV{"PIHOLE_DEBUG"}; + system(@_) and explain(@_); +} + +# Explain how a call to system() failed, then abort +sub explain (@) { + ($? == -1) or croak join(" ", @_)." failed to execute: ".$!; + ($? & 0x7f) or croak join(" ", @_)." died with signal ". ($? & 0x7f); + croak join(" ", @_)." failed with exit code ".($? >> 8); +} + +sub fix_capabilities ($) { + my ($dns_user) = @_; + my $ftl_path = trim(`which pihole-FTL`); + + do_or_die("setcap", "CAP_SYS_NICE,CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN+ei", $ftl_path) + if ($dns_user ne "root"); +} + +sub fix_permissions ($) { + my $dns = $_[0]->val(); + my $www = "www-data"; + + my @files = ( + {type=>"d", path=>"/etc/lighttpd", uid=>"root", gid=>"root", mode=>"0755"}, + {type=>"d", path=>"/etc/pihole", uid=>$dns, gid=>"root", mode=>"0755"}, # TODO + {type=>"d", path=>"/var/cache/lighttpd/compress", uid=>$www, gid=>"root", mode=>"0755"}, + {type=>"d", path=>"/var/cache/lighttpd/uploads", uid=>$www, gid=>"root", mode=>"0755"}, + {type=>"d", path=>"/var/log", uid=>"root", gid=>"root", mode=>"0755"}, + {type=>"d", path=>"/var/log/lighttpd", uid=>$www, gid=>"root", mode=>"0755"}, + {type=>"d", path=>"/var/log/pihole", uid=>$dns, gid=>"root", mode=>"0755"}, + {type=>"d", path=>"/run/lighttpd", uid=>$www, gid=>"root", mode=>"0755"}, + {type=>"d", path=>"/run/pihole", uid=>$dns, gid=>"root", mode=>"0755"}, + {type=>"f", path=>"/etc/pihole/custom.list", uid=>$dns, gid=>"root", mode=>"0644"}, + {type=>"f", path=>"/etc/pihole/dhcp.leases", uid=>$dns, gid=>"root", mode=>"0644"}, + {type=>"f", path=>"/etc/pihole/dns-servers.conf", uid=>$dns, gid=>"root", mode=>"0644"}, + {type=>"f", path=>"/etc/pihole/pihole-FTL.conf", uid=>$dns, gid=>"root", mode=>"0644"}, + {type=>"f", path=>"/etc/pihole/regex.list", uid=>$dns, gid=>"root", mode=>"0644"}, + {type=>"f", path=>"/etc/pihole/setupVars.conf", uid=>$dns, gid=>"root", mode=>"0644"}, + {type=>"f", path=>"/var/log/pihole.log", uid=>$dns, gid=>"root", mode=>"0644"}, + {type=>"f", path=>"/var/log/pihole-FTL.log", uid=>$dns, gid=>"root", mode=>"0644"}, + {type=>"f", path=>"/var/log/lighttpd/access.log", uid=>$www, gid=>"root", mode=>"0644"}, + {type=>"f", path=>"/var/log/lighttpd/error.log", uid=>$www, gid=>"root", mode=>"0644"}, + {type=>"f", path=>"/run/pihole-FTL.pid", uid=>$dns, gid=>"root", mode=>"0644"}, + {type=>"f", path=>"/run/pihole-FTL.port", uid=>$dns, gid=>"root", mode=>"0644"}, + {type=>"x", path=>"/run/pihole/FTL.sock"} + ); + + my %grouped = ( + touch => [], + mkdir => [], + rm => [], + uid => {}, + gid => {}, + mode => {}); + + foreach (@files) { + push(@{$grouped{touch}}, $_->{path}) if ($_->{type} eq "f"); + push(@{$grouped{mkdir}}, $_->{path}) if ($_->{type} eq "d"); + push(@{$grouped{rm}}, $_->{path}) if ($_->{type} eq "x"); + + if ($_->{type} ne "x") { + $grouped{uid }{$_->{uid }} = () if !defined $grouped{uid }{$_->{uid }}; + $grouped{gid }{$_->{gid }} = () if !defined $grouped{gid }{$_->{gid }}; + $grouped{mode}{$_->{mode}} = () if !defined $grouped{mode}{$_->{mode}}; + + push(@{$grouped{uid }{$_->{uid }}}, $_->{path}); + push(@{$grouped{gid }{$_->{gid }}}, $_->{path}); + push(@{$grouped{mode}{$_->{mode}}}, $_->{path}); + } + } + + do_or_die("mkdir", "-p", @{$grouped{mkdir}}) if @{$grouped{mkdir}}; + do_or_die("touch", @{$grouped{touch}}) if @{$grouped{touch}}; + do_or_die("rm", "-rf", @{$grouped{rm}}) if @{$grouped{rm}}; + + foreach $_ (keys %{$grouped{uid }}) { do_or_die("chown", $_, @{$grouped{uid }{$_}}); } + foreach $_ (keys %{$grouped{gid }}) { do_or_die("chgrp", $_, @{$grouped{gid }{$_}}); } + foreach $_ (keys %{$grouped{mode}}) { do_or_die("chmod", $_, @{$grouped{mode}{$_}}); } + + do_or_die("cp", "-f", "/etc/pihole/setupVars.conf", "/etc/pihole/setupVars.conf.bak"); +} + +sub mask ($$) { + my ($bits, $size) = @_; + return ((1 << $bits) - 1) << ($size - $bits); +} + +sub print_env(\%) { + my %env = %{$_[0]}; + + say "Environment:"; + foreach my $k (sort (keys %env)) { + printf " %-50s= %s\n", $k, ($env{$k} // "undef"); + } +} + +sub read_conf ($) { + my ($path) = @_; + + @{$FILES{$path}} = read_file($path) + unless (exists $FILES{$path}); + + return @{$FILES{$path}} +} + +sub read_file ($) { + my ($path) = @_; + local @ARGV = @_; + croak "$path does not exist" unless (-e $path); + croak "$path is not a file" unless (-f $path); + croak "$path is not readable" unless (-r $path); + return map { chomp; $_ } <>; +} + +sub sed (&$@) { + my $test = shift; + my $swap = shift; + my @result; + my $swappd; + + foreach $_ (@_) { + if (&$test) { + $swappd = (ref $swap eq "CODE") ? &$swap : $swap; + + given (ref $swappd) { + when ("") { push @result, $swappd; } + when ("ARRAY") { push @result, @$swappd; } + when ("SCALAR") { push @result, $$swappd; } + default { croak "wrong type"; } + } + } else { + push @result, $_; + } + } + + return @result; +} + +sub set_defaults (\%) { + my ($env) = @_; + + # TODO: Default value set here isn't read by services.d/pihole-FTL/run + exists $env->{"PIHOLE_DNS_USER"} + or croak("PIHOLE_DNS_USER should be set in Dockerfile, docker-compose.yml, or passed via docker run -e..."); + + $env->{"PIHOLE_ADMIN_EMAIL" } //= "root\@example.com"; + $env->{"PIHOLE_DNS_BLOCKING_MODE" } //= "NULL"; + $env->{"PIHOLE_DNS_BOGUS_PRIV" } //= "true"; + $env->{"PIHOLE_DNS_CNAME_INSPECT" } //= "true"; + $env->{"PIHOLE_DNS_PRIVACY_LVL" } //= "0"; + $env->{"PIHOLE_DNS_DNSSEC" } //= "true"; + $env->{"PIHOLE_DNS_FQDN_REQUIRED" } //= "true"; + $env->{"PIHOLE_DNS_IGNORE_LOCALHOST" } //= "false"; + $env->{"PIHOLE_DNS_LOG_QUERIES", } //= "true"; + $env->{"PIHOLE_DNS_UPSTREAM_1" } //= "1.1.1.1"; + $env->{"PIHOLE_LISTEN" } //= "all"; + $env->{"PIHOLE_TEMPERATURE_UNIT" } //= "F"; + $env->{"PIHOLE_WEB_ENABLED" } //= "true"; + $env->{"PIHOLE_WEB_HOSTNAME" } //= trim(`hostname -f 2>/dev/null || hostname`); + $env->{"PIHOLE_WEB_INSTALL_SERVER" } //= "true"; + $env->{"PIHOLE_WEB_INSTALL_UI" } //= "true"; + $env->{"PIHOLE_WEB_PORT", } //= "80"; + $env->{"PIHOLE_WEB_UI" } //= "boxed"; +} + +sub test_configuration ($) { + my ($dns_user) = @_; + + say "\n\n$PIHOLE_CONF"; + do_or_die("cat", "-n", $PIHOLE_CONF); + + say "\n\n$FTL_CONF"; + do_or_die("cat", "-n", $FTL_CONF); + + say "\n\n$DNSMASQ_CONF"; + do_or_die("cat", "-n", $DNSMASQ_CONF); + + say "\n\n/etc/dnsmasq.conf"; + do_or_die("cat", "-n", "/etc/dnsmasq.conf"); + + say "\n\n/etc/lighttpd/lighttpd.conf"; + do_or_die("cat", "-n", "/etc/lighttpd/lighttpd.conf"); + + say "\n\n/etc/lighttpd/conf-enabled/15-fastcgi-php.conf"; + do_or_die("cat", "-n", "/etc/lighttpd/conf-enabled/15-fastcgi-php.conf"); + + # check lighttpd configuration + do_or_die("lighttpd", "-t", "-f", "/etc/lighttpd/lighttpd.conf"); + + # check pihole configuration + # TODO: silence STDOUT + do_or_die("sudo", "-u", $dns_user->val(), "/usr/bin/pihole-FTL", "test"); +} + +sub trim ($) { + my ($str) = @_; + $str =~ s/\A\s+|\s+\z//g if (defined $str); + return $str; +} + +# Enforce (non-)required and enumerated value constraints +sub validate ($$$@) { + my $name = shift; + my $reqd = shift; + my $cvar = shift; + my %allow = map { $_ => 1 } @_; + + (!$cvar->exists() and $reqd) and + croak(($cvar->name() // $name)." cannot be empty"); + + ($cvar->exists() and %allow and !exists($allow{$cvar->val()})) and + croak(($cvar->name() // $name)." cannot be '".$cvar->val()."' (expected one of: ".join(", ", @_).")"); +} + +sub validate_ip ($) { + my ($ip) = @_; + return unless $ip->exists(); + + # TODO: Silence STDOUT, STDERR + system("ip", "route", "get", $ip->val()) and + croak(sprintf("%s='%s' is invalid", $ip->name(), $ip->val())); +} + +sub write_conf ($@) { + my $path = shift; + @{$FILES{$path}} = @_; + + say "Updating $path, ".scalar(@_)." lines" if exists $ENV{"PIHOLE_DEBUG"}; +} + +sub write_file ($@) { + my $path = shift; + my $content = join("\n", @_); + $content .= "\n" unless $content =~ /\n\z/; + + say "Writing $path, ".scalar(@_)." lines" if exists $ENV{"PIHOLE_DEBUG"}; + + open(my $io, ">", $path) or croak "can't open $path for writing: $!"; + print $io $content; + close $io; + + # Just in case + delete $FILES{$path}; +} + +sub sync_files() { + foreach my $path (sort (keys %FILES)) { + write_file($path, @{$FILES{$path}}); + } +} + +############################################################################### + +sub main { + set_defaults(%ENV); + print_env(%ENV); + + fix_permissions(env("PIHOLE_DNS_USER")); + fix_capabilities(env("PIHOLE_DNS_USER")); + + configure_dns_defaults(); + configure_network(%ENV, env("PIHOLE_IPV4_ADDRESS"), env("PIHOLE_IPV6_ADDRESS")); + + # Update version numbers + do_or_die("pihole", "updatechecker"); + + configure_web_password(env("PIHOLE_WEB_PASSWORD"), env("PIHOLE_WEB_PASSWORD_FILE")); + configure_web_address(env("PIHOLE_IPV4_ADDRESS"), env("PIHOLE_IPV6_ADDRESS"), env("PIHOLE_WEB_PORT")); + configure_web_fastcgi(env("PIHOLE_IPV4_ADDRESS"), env("PIHOLE_WEB_HOSTNAME")); + + configure_dns_interface(env("PIHOLE_LISTEN"), env("PIHOLE_INTERFACE")); + configure_dns_user(env("PIHOLE_DNS_USER")); + configure_dns_hostname(env("PIHOLE_IPV4_ADDRESS"), env("PIHOLE_IPV6_ADDRESS"), env("PIHOLE_WEB_HOSTNAME")); + configure_dns_fqdn(env("PIHOLE_DNS_FQDN_REQUIRED")); + configure_dns_priv(env("PIHOLE_DNS_BOGUS_PRIV")); + configure_dns_dnssec(env("PIHOLE_DNS_DNSSEC")); + configure_dns_forwarding( + env("PIHOLE_DNS_LAN_ENABLE"), + env("PIHOLE_DNS_LAN_UPSTREAM"), + env("PIHOLE_DNS_LAN_NETWORK"), + env("PIHOLE_DNS_LAN_DOMAIN")); + configure_dns_upstream( + env("PIHOLE_DNS_UPSTREAM_1"), + env("PIHOLE_DNS_UPSTREAM_2"), + env("PIHOLE_DNS_UPSTREAM_3"), + env("PIHOLE_DNS_UPSTREAM_4")); + + configure_temperature(env("PIHOLE_TEMPERATURE_UNIT")); + configure_admin_email(env("PIHOLE_ADMIN_EMAIL")); + + configure_dhcp(); + + configure_pihole("QUERY_LOGGING" , 0, env("PIHOLE_DNS_LOG_QUERIES"), "true", "false"); + configure_pihole("INSTALL_WEB_SERVER" , 0, env("PIHOLE_WEB_INSTALL_SERVER"),"true", "false"); + configure_pihole("INSTALL_WEB_INTERFACE" , 0, env("PIHOLE_WEB_INSTALL_UI"), "true", "false"); + configure_pihole("LIGHTTPD_ENABLED" , 0, env("PIHOLE_WEB_ENABLED"), "true", "false"); + configure_pihole("WEBUIBOXEDLAYOUT" , 0, env("PIHOLE_WEB_UI"), "boxed", "normal"); + + # https://docs.pi-hole.net/ftldns/configfile/ + configure_ftl("BLOCKINGMODE", 1, env("PIHOLE_DNS_BLOCKING_MODE"), "NULL", "IP-NODATA-AAAA", "IP", "NXDOMAIN", "NODATA"); + configure_ftl("SOCKET_LISTENING", 0, lit("local"), "local", "all"); + configure_ftl("FTLPORT", 0, lit("4711")); + configure_ftl("RESOLVE_IPV6", 0, lit("true"), "true", "false"); + configure_ftl("RESOLVE_IPV4", 0, lit("true"), "true", "false"); + configure_ftl("DBIMPORT", 0, lit("true"), "true", "false"); + configure_ftl("MAXDBDAYS", 0, lit("180")); + configure_ftl("DBINTERVAL", 0, lit("1.0")); + configure_ftl("PRIVACYLEVEL", 0, env("PIHOLE_DNS_PRIVACY_LVL"), "0", "1", "2"); + configure_ftl("CNAMEDEEPINSPECT", 1, env("PIHOLE_DNS_CNAME_INSPECT"), "true", "false"); + configure_ftl("IGNORE_LOCALHOST", 0, env("PIHOLE_DNS_IGNORE_LOCALHOST"), "true", "false"); + + configure_blocklists(); + configure_whitelists(); + + sync_files(); + test_configuration(env("PIHOLE_DNS_USER")); + + # s6 doesn't like it when pihole-FTL is running when s6 services start + `kill -9 \$(pgrep pihole-FTL) || echo pihole-FTL is not already running`; + + # Remove crontab installed by pihole, we have our own + do_or_die("rm", "-f", "/etc/cron.d/pihole"); +} + +############################################################################### + +STDOUT->autoflush(1); +STDERR->autoflush(1); + +main(); diff --git a/root/etc/cron.d/docker-pihole b/root/etc/cron.d/docker-pihole new file mode 100644 index 000000000..f4bca6716 --- /dev/null +++ b/root/etc/cron.d/docker-pihole @@ -0,0 +1,19 @@ +PATH=/opt/pihole:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin + +# +# ┌───────────── minute, 0 - 59 +# │ ┌───────────── hour, 0 - 23 +# │ │ ┌───────────── day of the month, 1 - 31 +# │ │ │ ┌───────────── month, 1 - 12 +# │ │ │ │ ┌───────────── day of the week, 0 - 6 (Sunday to Saturday; +# │ │ │ │ │ ┌───────────── username +# │ │ │ │ │ │ +# │ │ │ │ │ │ +# * * * * * * + +# Download blocklist updates once a week + 45 3 * * 6 root pihole -g >/dev/null +@reboot root pihole -g >/dev/null + +# Flush the log daily at so it doesn't get out of control + 15 0 * * * root pihole flush >/dev/null diff --git a/s6/debian-root/etc/fix-attrs.d/01-resolver-resolv b/root/etc/fix-attrs.d/01-resolver-resolv similarity index 100% rename from s6/debian-root/etc/fix-attrs.d/01-resolver-resolv rename to root/etc/fix-attrs.d/01-resolver-resolv diff --git a/s6/debian-root/etc/services.d/cron/finish b/root/etc/services.d/cron/finish similarity index 100% rename from s6/debian-root/etc/services.d/cron/finish rename to root/etc/services.d/cron/finish diff --git a/s6/debian-root/etc/services.d/cron/run b/root/etc/services.d/cron/run similarity index 100% rename from s6/debian-root/etc/services.d/cron/run rename to root/etc/services.d/cron/run diff --git a/s6/debian-root/etc/services.d/lighttpd/finish b/root/etc/services.d/lighttpd/finish similarity index 100% rename from s6/debian-root/etc/services.d/lighttpd/finish rename to root/etc/services.d/lighttpd/finish diff --git a/s6/debian-root/etc/services.d/lighttpd/run b/root/etc/services.d/lighttpd/run similarity index 100% rename from s6/debian-root/etc/services.d/lighttpd/run rename to root/etc/services.d/lighttpd/run diff --git a/root/etc/services.d/pihole-FTL/finish b/root/etc/services.d/pihole-FTL/finish new file mode 100644 index 000000000..60b9ac325 --- /dev/null +++ b/root/etc/services.d/pihole-FTL/finish @@ -0,0 +1,10 @@ +#!/usr/bin/with-contenv bash + +PID=$(pgrep pihole-FTL) + +if [ -n "$PID" ]; then + s6-echo "Stopping pihole-FTL" + kill -9 $PID +else + s6-echo "Stopping pihole-FTL (not running)" +fi diff --git a/root/etc/services.d/pihole-FTL/run b/root/etc/services.d/pihole-FTL/run new file mode 100644 index 000000000..34e50a80a --- /dev/null +++ b/root/etc/services.d/pihole-FTL/run @@ -0,0 +1,4 @@ +#!/usr/bin/with-contenv bash + +s6-echo "Starting pihole-FTL (no-daemon) as ${PIHOLE_DNS_USER}" +s6-setuidgid ${PIHOLE_DNS_USER} pihole-FTL no-daemon >/dev/null 2>&1 diff --git a/s6/debian-root/usr/bin/host-ip b/root/usr/bin/host-ip similarity index 100% rename from s6/debian-root/usr/bin/host-ip rename to root/usr/bin/host-ip diff --git a/s6/debian-root/usr/bin/set-contenv b/root/usr/bin/set-contenv similarity index 100% rename from s6/debian-root/usr/bin/set-contenv rename to root/usr/bin/set-contenv diff --git a/s6/service b/root/usr/local/bin/service similarity index 100% rename from s6/service rename to root/usr/local/bin/service diff --git a/s6/debian-root/etc/cont-init.d/20-start.sh b/s6/debian-root/etc/cont-init.d/20-start.sh deleted file mode 100644 index 10d58f76d..000000000 --- a/s6/debian-root/etc/cont-init.d/20-start.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/with-contenv bash -set -e - -bashCmd='bash -e' -if [ "${PH_VERBOSE:-0}" -gt 0 ] ; then - set -x ; - bashCmd='bash -e -x' -fi - -# used to start dnsmasq here for gravity to use...now that conflicts port 53 - -$bashCmd /start.sh -# Gotta go fast, no time for gravity -if [ -n "$PYTEST" ]; then - sed -i 's/^gravity_spinup$/#gravity_spinup # DISABLED FOR PYTEST/g' "$(which gravity.sh)" -fi -gravity.sh - -# Kill dnsmasq because s6 won't like it if it's running when s6 services start -kill -9 $(pgrep pihole-FTL) || true - -pihole -v diff --git a/s6/debian-root/etc/services.d/pihole-FTL/finish b/s6/debian-root/etc/services.d/pihole-FTL/finish deleted file mode 100644 index 81c9bd942..000000000 --- a/s6/debian-root/etc/services.d/pihole-FTL/finish +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/with-contenv bash - -s6-echo "Stopping pihole-FTL" -kill -9 $(pgrep pihole-FTL) diff --git a/s6/debian-root/etc/services.d/pihole-FTL/run b/s6/debian-root/etc/services.d/pihole-FTL/run deleted file mode 100644 index 70dbd1ca3..000000000 --- a/s6/debian-root/etc/services.d/pihole-FTL/run +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/with-contenv bash - -s6-echo "Starting pihole-FTL ($FTL_CMD) as ${DNSMASQ_USER}" -s6-setuidgid ${DNSMASQ_USER} pihole-FTL $FTL_CMD >/dev/null 2>&1 - -# Notes on above: -# - DNSMASQ_USER default of root is in Dockerfile & can be overwritten by runtime container env -# - /var/log/pihole*.log has FTL's output that no-daemon would normally print in FG too -# prevent duplicating it in docker logs by sending to dev null diff --git a/s6/timeout b/s6/timeout deleted file mode 100755 index 457f050cc..000000000 --- a/s6/timeout +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -# A shim to make busybox timeout take in debian style args -# v1 only need support for this style: `timeout 1 getent hosts github.com` - -# Busybox args: -# Usage: timeout [-t SECS] [-s SIG] PROG ARGS -# Debian args: -# Usage: timeout [OPTION] DURATION COMMAND [ARG]... -# or: timeout [OPTION] - -TIMEOUT=/usr/bin/timeout -SECS="${1}" -ARGS="${@:2}" - -$TIMEOUT -t $SECS $ARGS diff --git a/setup.py b/setup.py deleted file mode 100644 index 0e393bc13..000000000 --- a/setup.py +++ /dev/null @@ -1,6 +0,0 @@ -from setuptools import setup - -setup( - setup_requires=['pytest-runner'], - tests_require=['pytest'], -) diff --git a/start.sh b/start.sh deleted file mode 100755 index 16b8c79d9..000000000 --- a/start.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/bash -e -# Dockerfile variables -export TAG -export ServerIP -export ServerIPv6 -export PYTEST -export PHP_ENV_CONFIG -export PHP_ERROR_LOG -export HOSTNAME -export WEBLOGDIR -export DNS1 -export DNS2 -export DNSSEC -export DNS_BOGUS_PRIV -export DNS_FQDN_REQUIRED -export INTERFACE -export DNSMASQ_LISTENING_BEHAVIOUR="$DNSMASQ_LISTENING" -export IPv6 -export WEB_PORT -export CONDITIONAL_FORWARDING -export CONDITIONAL_FORWARDING_IP -export CONDITIONAL_FORWARDING_DOMAIN -export CONDITIONAL_FORWARDING_REVERSE -export TEMPERATUREUNIT -export ADMIN_EMAIL -export WEBUIBOXEDLAYOUT - -export adlistFile='/etc/pihole/adlists.list' - -# The below functions are all contained in bash_functions.sh -. /bash_functions.sh - -# Ensure we have all functions available to update our configurations -. /opt/pihole/webpage.sh - -# PH_TEST prevents the install from actually running (someone should rename that) -PH_TEST=true . $PIHOLE_INSTALL - -echo " ::: Starting docker specific checks & setup for docker pihole/pihole" - -# TODO: -#if [ ! -f /.piholeFirstBoot ] ; then -# echo " ::: Not first container startup so not running docker's setup, re-create container to run setup again" -#else -# regular_setup_functions -#fi - -fix_capabilities -load_web_password_secret -generate_password -validate_env || exit 1 -prepare_configs -change_setting "PIHOLE_INTERFACE" "$PIHOLE_INTERFACE" -change_setting "IPV4_ADDRESS" "$IPV4_ADDRESS" -change_setting "QUERY_LOGGING" "$QUERY_LOGGING" -change_setting "INSTALL_WEB_SERVER" "$INSTALL_WEB_SERVER" -change_setting "INSTALL_WEB_INTERFACE" "$INSTALL_WEB_INTERFACE" -change_setting "LIGHTTPD_ENABLED" "$LIGHTTPD_ENABLED" -change_setting "IPV4_ADDRESS" "$ServerIP" -change_setting "IPV6_ADDRESS" "$ServerIPv6" -change_setting "DNS_BOGUS_PRIV" "$DNS_BOGUS_PRIV" -change_setting "DNS_FQDN_REQUIRED" "$DNS_FQDN_REQUIRED" -change_setting "DNSSEC" "$DNSSEC" -change_setting "CONDITIONAL_FORWARDING" "$CONDITIONAL_FORWARDING" -change_setting "CONDITIONAL_FORWARDING_IP" "$CONDITIONAL_FORWARDING_IP" -change_setting "CONDITIONAL_FORWARDING_DOMAIN" "$CONDITIONAL_FORWARDING_DOMAIN" -change_setting "CONDITIONAL_FORWARDING_REVERSE" "$CONDITIONAL_FORWARDING_REVERSE" -setup_web_port "$WEB_PORT" -setup_web_password "$WEBPASSWORD" -setup_temp_unit "$TEMPERATUREUNIT" -setup_ui_layout "$WEBUIBOXEDLAYOUT" -setup_admin_email "$ADMIN_EMAIL" -setup_dnsmasq "$DNS1" "$DNS2" "$INTERFACE" "$DNSMASQ_LISTENING_BEHAVIOUR" -setup_php_env -setup_dnsmasq_hostnames "$ServerIP" "$ServerIPv6" "$HOSTNAME" -setup_ipv4_ipv6 -setup_lighttpd_bind "$ServerIP" -setup_blocklists -test_configs - -[ -f /.piholeFirstBoot ] && rm /.piholeFirstBoot - -echo " ::: Docker start setup complete" diff --git a/test/__init__.py b/test/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/test/conftest.py b/test/conftest.py deleted file mode 100644 index 61d68fd83..000000000 --- a/test/conftest.py +++ /dev/null @@ -1,220 +0,0 @@ - -import functools -import os -import pytest -import subprocess -import testinfra -import types - -local_host = testinfra.get_host('local://') -check_output = local_host.check_output - -DEBIAN_VERSION = os.environ.get('DEBIAN_VERSION', 'stretch') -__version__ = None -dotdot = os.path.abspath(os.path.join(os.path.abspath(__file__), os.pardir, os.pardir)) -with open('{}/VERSION'.format(dotdot), 'r') as v: - raw_version = v.read().strip() - __version__ = raw_version.replace('release/', 'release-') - -@pytest.fixture() -def run_and_stream_command_output(): - def run_and_stream_command_output_inner(command, verbose=False): - print("Running", command) - build_env = os.environ.copy() - build_env['PIHOLE_VERSION'] = __version__ - build_result = subprocess.Popen(command.split(), env=build_env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - bufsize=1, universal_newlines=True) - if verbose: - while build_result.poll() is None: - for line in build_result.stdout: - print(line, end='') - build_result.wait() - if build_result.returncode != 0: - print(" ::: Error running".format(command)) - print(build_result.stderr) - return run_and_stream_command_output_inner - -@pytest.fixture() -def args_volumes(): - return '-v /dev/null:/etc/pihole/adlists.list' - -@pytest.fixture() -def args_env(): - return '-e ServerIP="127.0.0.1"' - -@pytest.fixture() -def args(args_volumes, args_env): - return "{} {}".format(args_volumes, args_env) - -@pytest.fixture() -def test_args(): - ''' test override fixture to provide arguments seperate from our core args ''' - return '' - -def DockerGeneric(request, _test_args, _args, _image, _cmd, _entrypoint): - #assert 'docker' in check_output('id'), "Are you in the docker group?" - # Always appended PYTEST arg to tell pihole we're testing - if 'pihole' in _image and 'PYTEST=1' not in _args: - _args = '{} -e PYTEST=1'.format(_args) - docker_run = 'docker run -d -t {args} {test_args} {entry} {image} {cmd}'\ - .format(args=_args, test_args=_test_args, entry=_entrypoint, image=_image, cmd=_cmd) - # Print a human runable version of the container run command for faster debugging - print(docker_run.replace('-d -t', '--rm -it').replace('tail -f /dev/null', 'bash')) - docker_id = check_output(docker_run) - - def teardown(): - check_output("docker logs {}".format(docker_id)) - check_output("docker rm -f {}".format(docker_id)) - request.addfinalizer(teardown) - - docker_container = testinfra.backend.get_backend("docker://" + docker_id, sudo=False) - docker_container.id = docker_id - - return docker_container - - -@pytest.fixture -def Docker(request, test_args, args, image, cmd, entrypoint): - ''' One-off Docker container run ''' - return DockerGeneric(request, test_args, args, image, cmd, entrypoint) - -@pytest.fixture(scope='module') -def DockerPersist(request, persist_test_args, persist_args, persist_image, persist_cmd, persist_entrypoint, Dig): - ''' Persistent Docker container for multiple tests, instead of stopping container after one test ''' - ''' Uses DUP'd module scoped fixtures because smaller scoped fixtures won't mix with module scope ''' - persistent_container = DockerGeneric(request, persist_test_args, persist_args, persist_image, persist_cmd, persist_entrypoint) - ''' attach a dig conatiner for lookups ''' - persistent_container.dig = Dig(persistent_container.id) - return persistent_container - -@pytest.fixture -def entrypoint(): - return '' - -@pytest.fixture(params=['amd64', 'armhf', 'arm64', 'armel']) -def arch(request): - return request.param - -@pytest.fixture() -def version(): - return __version__ - -@pytest.fixture() -def debian_version(): - return DEBIAN_VERSION - -@pytest.fixture() -def tag(version, arch, debian_version): - return '{}-{}-{}'.format(version, arch, debian_version) - -@pytest.fixture -def webserver(tag): - ''' TODO: this is obvious without alpine+nginx as the alternative, remove fixture, hard code lighttpd in tests? ''' - return 'lighttpd' - -@pytest.fixture() -def image(tag): - image = 'pihole' - return '{}:{}'.format(image, tag) - -@pytest.fixture() -def cmd(): - return 'tail -f /dev/null' - -@pytest.fixture(scope='module') -def persist_arch(): - '''amd64 only, dnsmasq/pihole-FTL(?untested?) will not start under qemu-user-static :(''' - return 'amd64' - -@pytest.fixture(scope='module') -def persist_version(): - return __version__ - -@pytest.fixture(scope='module') -def persist_debian_version(): - return DEBIAN_VERSION - -@pytest.fixture(scope='module') -def persist_args_dns(): - return '--dns 127.0.0.1 --dns 1.1.1.1' - -@pytest.fixture(scope='module') -def persist_args_volumes(): - return '-v /dev/null:/etc/pihole/adlists.list' - -@pytest.fixture(scope='module') -def persist_args_env(): - return '-e ServerIP="127.0.0.1"' - -@pytest.fixture(scope='module') -def persist_args(persist_args_volumes, persist_args_env): - return "{} {}".format(persist_args_volumes, persist_args_env) - -@pytest.fixture(scope='module') -def persist_test_args(): - ''' test override fixture to provide arguments seperate from our core args ''' - return '' - -@pytest.fixture(scope='module') -def persist_tag(persist_version, persist_arch, persist_debian_version): - return '{}_{}_{}'.format(persist_version, persist_arch, persist_debian_version) - -@pytest.fixture(scope='module') -def persist_webserver(persist_tag): - ''' TODO: this is obvious without alpine+nginx as the alternative, remove fixture, hard code lighttpd in tests? ''' - return 'lighttpd' - -@pytest.fixture(scope='module') -def persist_image(persist_tag): - image = 'pihole' - return '{}:{}'.format(image, persist_tag) - -@pytest.fixture(scope='module') -def persist_cmd(): - return 'tail -f /dev/null' - -@pytest.fixture(scope='module') -def persist_entrypoint(): - return '' - -@pytest.fixture -def Slow(): - """ - Run a slow check, check if the state is correct for `timeout` seconds. - """ - import time - def slow(check, timeout=20): - timeout_at = time.time() + timeout - while True: - try: - assert check() - except AssertionError as e: - if time.time() < timeout_at: - time.sleep(1) - else: - raise e - else: - return - return slow - -@pytest.fixture(scope='module') -def Dig(): - ''' separate container to link to pi-hole and perform lookups ''' - ''' a docker pull is faster than running an install of dnsutils ''' - def dig(docker_id): - args = '--link {}:test_pihole'.format(docker_id) - image = 'azukiapp/dig' - cmd = 'tail -f /dev/null' - dig_container = DockerGeneric(request, '', args, image, cmd, '') - return dig_container - return dig - -''' -Persistent Docker container for testing service post start.sh -''' -@pytest.fixture -def RunningPiHole(DockerPersist, Slow, persist_webserver): - ''' Persist a fully started docker-pi-hole to help speed up subsequent tests ''' - Slow(lambda: DockerPersist.run('pgrep pihole-FTL').rc == 0) - Slow(lambda: DockerPersist.run('pgrep lighttpd').rc == 0) - return DockerPersist diff --git a/test/test_bash_functions.py b/test/test_bash_functions.py deleted file mode 100644 index 8773d419e..000000000 --- a/test/test_bash_functions.py +++ /dev/null @@ -1,181 +0,0 @@ - -import os -import pytest -import re - - -@pytest.mark.parametrize('test_args,expected_ipv6,expected_stdout', [ - ('', True, 'IPv4 and IPv6'), - ('-e "IPv6=True"', True, 'IPv4 and IPv6'), - ('-e "IPv6=False"', False, 'IPv4'), - ('-e "IPv6=foobar"', False, 'IPv4'), -]) -def test_IPv6_not_True_removes_ipv6(Docker, Slow, test_args, expected_ipv6, expected_stdout): - ''' When a user overrides IPv6=True they only get IPv4 listening webservers ''' - IPV6_LINE = 'use-ipv6.pl' - WEB_CONFIG = '/etc/lighttpd/lighttpd.conf' - - function = Docker.run('. /bash_functions.sh ; setup_ipv4_ipv6') - assert "Using {}".format(expected_stdout) in function.stdout - if expected_stdout == 'IPv4': - assert 'IPv6' not in function.stdout - # On overlay2(?) docker sometimes writes to disk are slow enough to break some tests... - expected_ipv6_check = lambda: (\ - IPV6_LINE in Docker.run('grep \'use-ipv6.pl\' {}'.format(WEB_CONFIG)).stdout - ) == expected_ipv6 - Slow(expected_ipv6_check) - - -@pytest.mark.parametrize('test_args', ['-e "WEB_PORT=999"']) -def test_overrides_default_WEB_PORT(Docker, Slow, test_args): - ''' When a --net=host user sets WEB_PORT to avoid synology's 80 default IPv4 and or IPv6 ports are updated''' - CONFIG_LINE = r'server.port\s*=\s*999' - WEB_CONFIG = '/etc/lighttpd/lighttpd.conf' - - function = Docker.run('. /bash_functions.sh ; eval `grep setup_web_port /start.sh`') - assert "Custom WEB_PORT set to 999" in function.stdout - assert "INFO: Without proper router DNAT forwarding to 127.0.0.1:999, you may not get any blocked websites on ads" in function.stdout - Slow(lambda: re.search(CONFIG_LINE, Docker.run('cat {}'.format(WEB_CONFIG)).stdout) != None) - - -@pytest.mark.parametrize('test_args,expected_error', [ - ('-e WEB_PORT="LXXX"', 'WARNING: Custom WEB_PORT not used - LXXX is not an integer'), - ('-e WEB_PORT="1,000"', 'WARNING: Custom WEB_PORT not used - 1,000 is not an integer'), - ('-e WEB_PORT="99999"', 'WARNING: Custom WEB_PORT not used - 99999 is not within valid port range of 1-65535'), -]) -def test_bad_input_to_WEB_PORT(Docker, test_args, expected_error): - function = Docker.run('. /bash_functions.sh ; eval `grep setup_web_port /start.sh`') - assert expected_error in function.stdout - - -# DNS Environment Variable behavior in combinations of modified pihole LTE settings -@pytest.mark.skip('broke, needs investigation in v5.0 beta') -@pytest.mark.parametrize('args_env, expected_stdout, dns1, dns2', [ - ('', 'default DNS', '8.8.8.8', '8.8.4.4' ), - ('-e DNS1="1.2.3.4"', 'custom DNS', '1.2.3.4', '8.8.4.4' ), - ('-e DNS2="1.2.3.4"', 'custom DNS', '8.8.8.8', '1.2.3.4' ), - ('-e DNS1="1.2.3.4" -e DNS2="2.2.3.4"', 'custom DNS', '1.2.3.4', '2.2.3.4' ), - ('-e DNS1="1.2.3.4" -e DNS2="no"', 'custom DNS', '1.2.3.4', None ), - ('-e DNS2="no"', 'custom DNS', '8.8.8.8', None ), -]) -def test_override_default_servers_with_DNS_EnvVars(Docker, Slow, args_env, expected_stdout, dns1, dns2): - ''' on first boot when DNS vars are NOT set explain default google DNS settings are used - or when DNS vars are set override the pihole DNS settings ''' - assert Docker.run('test -f /.piholeFirstBoot').rc == 0 - function = Docker.run('. /bash_functions.sh ; eval `grep "^setup_dnsmasq " /start.sh`') - assert expected_stdout in function.stdout - expected_servers = 'server={}\n'.format(dns1) if dns2 == None else 'server={}\nserver={}\n'.format(dns1, dns2) - Slow(lambda: expected_servers == Docker.run('grep "^server=[^/]" /etc/dnsmasq.d/01-pihole.conf').stdout) - - -#@pytest.mark.skipif(os.environ.get('CI') == 'true', -# reason="Can't get setupVar setup to work on travis") -@pytest.mark.skip('broke, needs investigation in v5.0 beta') -@pytest.mark.parametrize('args_env, dns1, dns2, expected_stdout', [ - - ('', '9.9.9.1', '9.9.9.2', - 'Existing DNS servers used'), - ('-e DNS1="1.2.3.4"', '9.9.9.1', '9.9.9.2', - 'Docker DNS variables not used\nExisting DNS servers used (9.9.9.1 & 9.9.9.2)'), - ('-e DNS2="1.2.3.4"', '8.8.8.8', None, - 'Docker DNS variables not used\nExisting DNS servers used (8.8.8.8 & unset)'), - ('-e DNS1="1.2.3.4" -e DNS2="2.2.3.4"', '1.2.3.4', '2.2.3.4', - 'Docker DNS variables not used\nExisting DNS servers used (1.2.3.4 & 2.2.3.4'), -]) -def test_DNS_Envs_are_secondary_to_setupvars(Docker, Slow, args_env, expected_stdout, dns1, dns2): - ''' on second boot when DNS vars are set just use pihole DNS settings - or when DNS vars and FORCE_DNS var are set override the pihole DNS settings ''' - # Given we are not booting for the first time - assert Docker.run('rm /.piholeFirstBoot').rc == 0 - - # and a user already has custom pihole dns variables in setup vars - dns_count = 1 - setupVars = '/etc/pihole/setupVars.conf' - Docker.run('sed -i "/^PIHOLE_DNS/ d" {}'.format(setupVars)) - Docker.run('echo "PIHOLE_DNS_1={}" | tee -a {}'.format(dns1, setupVars)) - if dns2: - Docker.run('echo "PIHOLE_DNS_2={}" | tee -a {}'.format(dns2, setupVars)) - Docker.run('sync {}'.format(setupVars)) - Slow(lambda: 'PIHOLE_DNS' in Docker.run('cat {}'.format(setupVars)).stdout) - - # When we run setup dnsmasq during startup of the container - function = Docker.run('. /bash_functions.sh ; eval `grep "^setup_dnsmasq " /start.sh`') - assert expected_stdout in function.stdout - - # Then the servers are still what the user had customized if forced dnsmasq is not set - expected_servers = ['server={}'.format(dns1)] - if dns2: - expected_servers.append('server={}'.format(dns2)) - Slow(lambda: Docker.run('grep "^server=[^/]" /etc/dnsmasq.d/01-pihole.conf').stdout.strip().split('\n') == \ - expected_servers) - - -@pytest.mark.parametrize('args_env, expected_stdout, expected_config_line', [ - ('', 'binding to default interface: eth0', 'interface=eth0' ), - ('-e INTERFACE="eth0"', 'binding to default interface: eth0', 'interface=eth0' ), - ('-e INTERFACE="br0"', 'binding to custom interface: br0', 'interface=br0'), -]) -def test_DNS_interface_override_defaults(Docker, Slow, args_env, expected_stdout, expected_config_line): - ''' When INTERFACE environment var is passed in, overwrite dnsmasq interface ''' - function = Docker.run('. /bash_functions.sh ; eval `grep "^setup_dnsmasq " /start.sh`') - assert expected_stdout in function.stdout - Slow(lambda: expected_config_line + '\n' == Docker.run('grep "^interface" /etc/dnsmasq.d/01-pihole.conf').stdout) - - -expected_debian_lines = [ - '"VIRTUAL_HOST" => "127.0.0.1"', - '"ServerIP" => "127.0.0.1"', - '"PHP_ERROR_LOG" => "/var/log/lighttpd/error.log"' -] - - -@pytest.mark.parametrize('expected_lines,repeat_function', [ - (expected_debian_lines, 1), - (expected_debian_lines, 2) -]) -def test_debian_setup_php_env(Docker, expected_lines, repeat_function): - ''' confirm all expected output is there and nothing else ''' - stdout = '' - for i in range(repeat_function): - stdout = Docker.run('. /bash_functions.sh ; eval `grep setup_php_env /start.sh`').stdout - for expected_line in expected_lines: - search_config_cmd = "grep -c '{}' /etc/lighttpd/conf-enabled/15-fastcgi-php.conf".format(expected_line) - search_config_count = Docker.run(search_config_cmd) - found_lines = int(search_config_count.stdout.rstrip('\n')) - if found_lines > 1: - assert False, "Found line {} times (more than once): {}".format(expected_line) - - -def test_webPassword_random_generation(Docker): - ''' When a user sets webPassword env the admin password gets set to that ''' - function = Docker.run('. /bash_functions.sh ; eval `grep generate_password /start.sh`') - assert 'assigning random password' in function.stdout.lower() - - -@pytest.mark.parametrize('entrypoint,cmd', [('--entrypoint=tail','-f /dev/null')]) -@pytest.mark.parametrize('args_env,secure,setupVarsHash', [ - ('-e ServerIP=1.2.3.4 -e WEBPASSWORD=login', True, 'WEBPASSWORD=6060d59351e8c2f48140f01b2c3f3b61652f396c53a5300ae239ebfbe7d5ff08'), - ('-e ServerIP=1.2.3.4 -e WEBPASSWORD=""', False, ''), -]) -def test_webPassword_env_assigns_password_to_file_or_removes_if_empty(Docker, args_env, secure, setupVarsHash): - ''' When a user sets webPassword env the admin password gets set or removed if empty ''' - function = Docker.run('. /bash_functions.sh ; eval `grep setup_web_password /start.sh`') - - if secure: - assert 'new password set' in function.stdout.lower() - assert Docker.run('grep -q \'{}\' {}'.format(setupVarsHash, '/etc/pihole/setupVars.conf')).rc == 0 - else: - assert 'password removed' in function.stdout.lower() - assert Docker.run('grep -q \'^WEBPASSWORD=$\' /etc/pihole/setupVars.conf').rc == 0 - - -@pytest.mark.parametrize('entrypoint,cmd', [('--entrypoint=tail','-f /dev/null')]) -@pytest.mark.parametrize('test_args', ['-e WEBPASSWORD=login', '-e WEBPASSWORD=""']) -def test_webPassword_pre_existing_trumps_all_envs(Docker, args_env, test_args): - '''When a user setup webPassword in the volume prior to first container boot, - during prior container boot, the prior volume password is left intact / setup skipped''' - Docker.run('. /opt/pihole/webpage.sh ; add_setting WEBPASSWORD volumepass') - function = Docker.run('. /bash_functions.sh ; eval `grep setup_web_password /start.sh`') - - assert '::: Pre existing WEBPASSWORD found' in function.stdout - assert Docker.run('grep -q \'{}\' {}'.format('WEBPASSWORD=volumepass', '/etc/pihole/setupVars.conf')).rc == 0 diff --git a/test/test_pihole_scripts.py b/test/test_pihole_scripts.py deleted file mode 100644 index b7fee895f..000000000 --- a/test/test_pihole_scripts.py +++ /dev/null @@ -1,54 +0,0 @@ -import pytest - - -@pytest.fixture(scope='module') -def start_cmd(): - ''' broken by default, required override ''' - return None - - -@pytest.fixture -def RunningPiHole(DockerPersist, Slow, persist_webserver, persist_tag, start_cmd): - ''' Override the RunningPiHole to run and check for success of a - pihole-FTL start based `pihole` script command - - Individual tests all must override start_cmd''' - #print DockerPersist.run('ps -ef').stdout - assert DockerPersist.dig.run('ping -c 1 test_pihole').rc == 0 - Slow(lambda: DockerPersist.run('pgrep pihole-FTL').rc == 0) - Slow(lambda: DockerPersist.run('pgrep {}'.format(persist_webserver)).rc == 0) - oldpid = DockerPersist.run('pidof pihole-FTL') - cmd = DockerPersist.run('pihole {}'.format(start_cmd)) - Slow(lambda: DockerPersist.run('pgrep pihole-FTL').rc == 0) - newpid = DockerPersist.run('pidof pihole-FTL') - for pid in [oldpid, newpid]: - assert pid != '' - # ensure a new pid for pihole-FTL appeared due to service restart - assert oldpid != newpid - assert cmd.rc == 0 - # Save out cmd result to check different stdout of start/enable/disable - DockerPersist.cmd = cmd - return DockerPersist - - -@pytest.mark.parametrize('start_cmd,hostname,expected_ip, expected_messages', [ - ('enable', 'pi.hole', '127.0.0.1', ['Blocking already enabled,','nothing to do']), - ('disable', 'pi.hole', '127.0.0.1', ['Disabling blocking','Pi-hole Disabled']), -]) -def test_pihole_enable_disable_command(RunningPiHole, Dig, persist_tag, start_cmd, hostname, expected_ip, expected_messages): - ''' the start_cmd tests are all built into the RunningPiHole fixture in this file ''' - dig_cmd = "dig +time=1 +noall +answer {} @test_pihole".format(hostname) - lookup = RunningPiHole.dig.run(dig_cmd) - assert lookup.rc == 0 - lookup_ip = lookup.stdout.split()[4] - assert lookup_ip == expected_ip - - for part_of_output in expected_messages: - assert part_of_output in RunningPiHole.cmd.stdout - -@pytest.mark.parametrize('start_cmd,expected_message', [ - ('-up', 'Function not supported in Docker images') -]) -def test_pihole_update_command(RunningPiHole, start_cmd, expected_message): - assert RunningPiHole.cmd.stdout.strip() == expected_message - diff --git a/test/test_start.py b/test/test_start.py deleted file mode 100644 index 2f44e0837..000000000 --- a/test/test_start.py +++ /dev/null @@ -1,69 +0,0 @@ - -import pytest -import time -''' conftest.py provides the defaults through fixtures ''' -''' Note, testinfra builtins don't seem fully compatible with - docker containers (esp. musl based OSs) stripped down nature ''' - -# If the test runs /start.sh, do not let s6 run it too! Kill entrypoint to avoid race condition/duplicated execution -@pytest.mark.parametrize('persist_entrypoint,persist_cmd,persist_args_env', [('--entrypoint=tail','-f /dev/null','')]) -def test_ServerIP_missing_is_not_required_anymore(RunningPiHole): - ''' When args to docker are empty start.sh exits saying ServerIP is required ''' - start = Docker.run('/start.sh') - error_msg = "ERROR: To function correctly you must pass an environment variables of 'ServerIP' into the docker container" - assert start.rc == 1 - assert error_msg in start.stdout - -# If the test runs /start.sh, do not let s6 run it too! Kill entrypoint to avoid race condition/duplicated execution -@pytest.mark.parametrize('entrypoint,cmd', [('--entrypoint=tail','-f /dev/null')]) -@pytest.mark.parametrize('args,error_msg,expect_rc', [ - ('-e ServerIP="1.2.3.z"', "ServerIP Environment variable (1.2.3.z) doesn't appear to be a valid IPv4 address",1), - ('-e ServerIP="1.2.3.4" -e ServerIPv6="1234:1234:1234:ZZZZ"', "Environment variable (1234:1234:1234:ZZZZ) doesn't appear to be a valid IPv6 address",1), - ('-e ServerIP="1.2.3.4" -e ServerIPv6="kernel"', "ERROR: You passed in IPv6 with a value of 'kernel'",1), -]) -def test_ServerIP_invalid_IPs_triggers_exit_error(Docker, error_msg, expect_rc): - ''' When args to docker are empty start.sh exits saying ServerIP is required ''' - start = Docker.run('/start.sh') - assert start.rc == expect_rc - assert 'ERROR' in start.stdout - assert error_msg in start.stdout - -@pytest.mark.parametrize('hostname,expected_ip', [ - ('pi.hole', '127.0.0.1'), - ('google-public-dns-a.google.com', '8.8.8.8'), - ('b.resolvers.Level3.net', '4.2.2.2') -]) -def test_dns_responses(RunningPiHole, hostname, expected_ip): - dig_cmd = "dig +time=1 +noall +answer {} @test_pihole | awk '{{ print $5 }}'".format(hostname) - lookup = RunningPiHole.dig.run(dig_cmd).stdout.rstrip('\n') - assert lookup == expected_ip - -def test_indecies_are_present(RunningPiHole): - File = RunningPiHole.get_module('File') - File('/var/www/html/pihole/index.html').exists - File('/var/www/html/pihole/index.js').exists - -def validate_curl(http_rc, expected_http_code, page_contents): - if int(http_rc.rc) != 0 or int(http_rc.stdout) != expected_http_code: - print('CURL return code: {}'.format(http_rc.rc)) - print('CURL stdout: {}'.format(http_rc.stdout)) - print('CURL stderr:{}'.format(http_rc.stderr)) - print('CURL file:\n{}\n'.format(page_contents.encode('utf-8'))) - - -@pytest.mark.parametrize('addr', [ 'localhost' ] ) -@pytest.mark.parametrize('url', [ '/admin/', '/admin/index.php' ] ) -def test_admin_requests_load_as_expected(RunningPiHole, version, addr, url): - command = 'curl -L -s -o /tmp/curled_file -w "%{{http_code}}" http://{}{}'.format(addr, url) - http_rc = RunningPiHole.run(command) - page_contents = RunningPiHole.run('cat /tmp/curled_file ').stdout - expected_http_code = 200 - - validate_curl(http_rc, expected_http_code, page_contents) - assert http_rc.rc == 0 - assert int(http_rc.stdout) == expected_http_code - for html_text in ['dns_queries_today', 'Content-Security-Policy', - 'scripts/pi-hole/js/footer.js']: - # version removed, not showing up in footer of test env (fix me) - assert html_text in page_contents - diff --git a/test/test_volume_data.sh b/test/test_volume_data.sh deleted file mode 100755 index 0acf9b202..000000000 --- a/test/test_volume_data.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/bash -set -ex -# Trying something different from the python test, this is a big integration test in bash -# Tests multiple volume settings and how they are impacted by the complete startup scripts + restart/re-creation of container -# Maybe a bit easier to read the workflow/debug in bash than python for others? -# This workflow is VERY similar to python's tests, but in bash so not object-oriented/pytest fixture based - -# Debug can be added anywhere to check current state mid-test -RED='\033[0;31m' -NC='\033[0m' # No Color -if [ $(id -u) != 0 ] ; then - sudo=sudo # do not need if root (in docker) -fi -debug() { - $sudo grep -r . "$VOL_PH" - $sudo grep -r . "$VOL_DM" -} -# Cleanup at the end, print debug on fail -cleanup() { - retcode=$? - { set +x; } 2>/dev/null - if [ $retcode != 0 ] ; then - printf "${RED}ERROR / FAILURE${NC} - printing all volume info" - debug - fi - docker rm -f $CONTAINER - $sudo rm -rf $VOLUMES - exit $retcode -} -trap "cleanup" INT TERM EXIT - - -# VOLUME TESTS - -# Given... -DEBIAN_VERSION="$(DEBIAN_VERSION:-stretch)" -IMAGE="${1:-pihole:v5.0-amd64}-${DEBIAN_VERSION}" # Default is latest build test image (generic, non release/branch tag) -VOLUMES="$(mktemp -d)" # A fresh volume directory -VOL_PH="$VOLUMES/pihole" -VOL_DM="$VOLUMES/dnsmasq.d" -tty -s && TTY='-t' || TTY='' - -echo "Testing $IMAGE with volumes base path $VOLUMES" - -# When -# Running stock+empty volumes (no ports to avoid conflicts) -CONTAINER="$( - docker run -d \ - -v "$VOL_PH:/etc/pihole/" \ - -v "$VOL_DM:/etc/dnsmasq.d/" \ - -v "/dev/null:/etc/pihole/adlists.list" \ - --entrypoint='' \ - $IMAGE \ - tail -f /dev/null -)" # container backgrounded for multipiple operations over time - -EXEC() { - local container="$1" - # Must quote for complex commands - docker exec $TTY $container bash -c "$2" -} -EXEC $CONTAINER /start.sh # run all the startup scripts - -# Then default are present -grep "PIHOLE_DNS_1=8.8.8.8" "$VOL_PH/setupVars.conf" -grep "PIHOLE_DNS_2=8.8.4.4" "$VOL_PH/setupVars.conf" -grep "IPV4_ADDRESS=0.0.0.0" "$VOL_PH/setupVars.conf" -grep -E "WEBPASSWORD=.+" "$VOL_PH/setupVars.conf" - -# Given the settings are manually changed (not good settings, just for testing changes) -EXEC $CONTAINER 'pihole -a setdns 127.1.1.1,127.2.2.2,127.3.3.3,127.4.4.4' -EXEC $CONTAINER '. /opt/pihole/webpage.sh ; change_setting IPV4_ADDRESS 10.0.0.0' -EXEC $CONTAINER 'pihole -a -p login' -assert_new_settings() { - grep "PIHOLE_DNS_1=127.1.1.1" "$VOL_PH/setupVars.conf" - grep "PIHOLE_DNS_2=127.2.2.2" "$VOL_PH/setupVars.conf" - grep "PIHOLE_DNS_3=127.3.3.3" "$VOL_PH/setupVars.conf" - grep "PIHOLE_DNS_4=127.4.4.4" "$VOL_PH/setupVars.conf" - grep "IPV4_ADDRESS=10.0.0.0" "$VOL_PH/setupVars.conf" - grep "WEBPASSWORD=6060d59351e8c2f48140f01b2c3f3b61652f396c53a5300ae239ebfbe7d5ff08" "$VOL_PH/setupVars.conf" - grep "server=127.1.1.1" $VOL_DM/01-pihole.conf - grep "server=127.2.2.2" $VOL_DM/01-pihole.conf -} -assert_new_settings - -# When Restarting -docker restart $CONTAINER -# Then settings are still manual changed values -assert_new_settings - -# When removing/re-creating the container -docker rm -f $CONTAINER -CONTAINER="$( - docker run -d \ - -v "$VOL_PH:/etc/pihole/" \ - -v "$VOL_DM:/etc/dnsmasq.d/" \ - -v "/dev/null:/etc/pihole/adlists.list" \ - --entrypoint='' \ - $IMAGE \ - tail -f /dev/null -)" # container backgrounded for multipiple operations over time - -# Then settings are still manual changed values -assert_new_settings diff --git a/test/test_volumes.py b/test/test_volumes.py deleted file mode 100644 index 1207f27e1..000000000 --- a/test/test_volumes.py +++ /dev/null @@ -1,4 +0,0 @@ -def test_volume_shell_script(arch, run_and_stream_command_output): - # only one arch should be necessary - if arch == 'amd64': - run_and_stream_command_output('./test/test_volume_data.sh') diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 9342d6bc5..000000000 --- a/tox.ini +++ /dev/null @@ -1,18 +0,0 @@ -[tox] -envlist = py38 - -[testenv] -commands = echo "Use ./gh-actions-test.sh instead for now" - -# Currently out of comission post-python3 upgrade due to failed monkey patch of testinfra sh -> bash -#[testenv] -#whitelist_externals = docker -#deps = -rrequirements.txt -## 2 parallel max b/c race condition with docker fixture (I think?) -#commands = docker run --rm --privileged multiarch/qemu-user-static:register --reset -# ./Dockerfile.py -v --arch amd64 -# pytest -vv -n auto -k amd64 ./test/ -# ./Dockerfile.py -v --arch armhf --arch arm64 --arch armel -# pytest -vv -n auto -k arm64 ./test/ -# pytest -vv -n auto -k armhf ./test/ -# pytest -vv -n auto -k armel ./test/