diff --git a/README.md b/README.md index 91988939..d9f724d3 100644 --- a/README.md +++ b/README.md @@ -449,11 +449,15 @@ Each caddy docker proxy instance can be executed in one of the following modes. Acts as a proxy to your Docker resources. The server starts without any configuration, and will not serve anything until it is configured by a "controller". -In order to make a server discoverable and configurable by controllers, you need to mark it with label `caddy_controlled_server` and define the controller network via CLI option `controller-network` or environment variable `CADDY_CONTROLLER_NETWORK`. +In order to make a server discoverable and configurable by controllers, you need to mark it with label `caddy_controlled_server`. Server instances doesn't need access to Docker host socket and you can run it in manager or worker nodes. -[Configuration example](examples/distributed.yaml#L5) +[Configuration example](examples/distributed-single-network.yaml#L5) + +When using a separate controller network, you must also configure the controller url via CLI option `controller-url` or environment variable `CADDY_CONTROLLER_URL`, that allows caddy server to fetch the controller networks configured in the controller. + +[Configuration example](examples/distributed-controller-network.yaml#L5) ### Controller @@ -482,29 +486,42 @@ Run `caddy help docker-proxy` to see all available flags. ``` Usage of docker-proxy: --caddyfile-path string - Path to a base Caddyfile that will be extended with Docker sites + Path to a base Caddyfile that will be extended with Docker sites. + Applicable to modes: controller, standalone --controller-network string - Network allowed to configure Caddy server in CIDR notation. Ex: 10.200.200.0/24 + Controller network name. Ex: caddy_controller. + Applicable to modes: controller + --controller-url string + Controller url, used by servers to fetch controller subnets. Ex: http://caddy-controller + Applicable to modes: server --ingress-networks string Comma separated name of ingress networks connecting Caddy servers to containers. When not defined, networks attached to controller container are considered ingress networks + Applicable to modes: controller, standalone --docker-sockets Comma separated docker sockets When not defined, DOCKER_HOST (or default docker socket if DOCKER_HOST not defined) + Applicable to modes: controller, standalone --docker-certs-path Comma separated cert path, you could use empty value when no cert path for the concern index docker socket like cert_path0,,cert_path2 + Applicable to modes: controller, standalone --docker-apis-version Comma separated apis version, you could use empty value when no api version for the concern index docker socket like cert_path0,,cert_path2 + Applicable to modes: controller, standalone --label-prefix string Prefix for Docker labels (default "caddy") + Applicable to modes: controller, standalone --mode Which mode this instance should run: standalone | controller | server --polling-interval duration Interval Caddy should manually check Docker for a new Caddyfile (default 30s) + Applicable to modes: controller, standalone --process-caddyfile Process Caddyfile before loading it, removing invalid servers (default true) + Applicable to modes: controller, standalone --proxy-service-tasks Proxy to service tasks instead of service load balancer (default true) + Applicable to modes: controller, standalone ``` Those flags can also be set via environment variables: diff --git a/cmd.go b/cmd.go index 65d99e7f..ebcaa50e 100644 --- a/cmd.go +++ b/cmd.go @@ -1,8 +1,12 @@ package caddydockerproxy import ( + "encoding/json" "flag" + "io/ioutil" "net" + "net/http" + "net/url" "os" "regexp" "strings" @@ -31,35 +35,50 @@ func init() { "Which mode this instance should run: standalone | controller | server") fs.String("docker-sockets", "", - "Docker sockets comma separate") + "Docker sockets comma separate.\n"+ + "Applicable to modes: controller, standalone") fs.String("docker-certs-path", "", - "Docker socket certs path comma separate") + "Docker socket certs path comma separate.\n"+ + "Applicable to modes: controller, standalone") fs.String("docker-apis-version", "", - "Docker socket apis version comma separate") + "Docker socket apis version comma separate.\n"+ + "Applicable to modes: controller, standalone") fs.String("controller-network", "", - "Network allowed to configure caddy server in CIDR notation. Ex: 10.200.200.0/24") + "Controller network name. Ex: caddy_controller.\n"+ + "When not defined, all networks attached to controller container are considered controller networks\n"+ + "Applicable to modes: controller, standalone") + + fs.String("controller-url", "", + "Controller url, used by servers to fetch controller subnets. Ex: http://caddy-controller\n"+ + "Applicable to modes: server") fs.String("ingress-networks", "", "Comma separated name of ingress networks connecting caddy servers to containers.\n"+ - "When not defined, networks attached to controller container are considered ingress networks") + "When not defined, all networks attached to controller container are considered ingress networks\n"+ + "Applicable to modes: controller, standalone") fs.String("caddyfile-path", "", - "Path to a base Caddyfile that will be extended with docker sites") + "Path to a base Caddyfile that will be extended with docker sites.\n"+ + "Applicable to modes: controller, standalone") fs.String("label-prefix", generator.DefaultLabelPrefix, - "Prefix for Docker labels") + "Prefix for Docker labels.\n"+ + "Applicable to modes: controller, standalone") fs.Bool("proxy-service-tasks", true, - "Proxy to service tasks instead of service load balancer") + "Proxy to service tasks instead of service load balancer.\n"+ + "Applicable to modes: controller, standalone") fs.Bool("process-caddyfile", true, - "Process Caddyfile before loading it, removing invalid servers") + "Process Caddyfile before loading it, removing invalid servers.\n"+ + "Applicable to modes: controller, standalone") fs.Duration("polling-interval", 30*time.Second, - "Interval caddy should manually check docker for a new caddyfile") + "Interval caddy should manually check docker for a new caddyfile.\n"+ + "Applicable to modes: controller, standalone") return fs }(), @@ -75,9 +94,14 @@ func cmdFunc(flags caddycmd.Flags) (int, error) { if options.Mode&config.Server == config.Server { log.Info("Running caddy proxy server") - err := caddy.Run(&caddy.Config{ + bindAddress, err := getAdminListen(options) + if err != nil { + return 1, err + } + + err = caddy.Run(&caddy.Config{ Admin: &caddy.AdminConfig{ - Listen: getAdminListen(options), + Listen: bindAddress, }, }) if err != nil { @@ -87,8 +111,8 @@ func cmdFunc(flags caddycmd.Flags) (int, error) { if options.Mode&config.Controller == config.Controller { log.Info("Running caddy proxy controller") - loader := CreateDockerLoader(options) - if err := loader.Start(); err != nil { + controller := CreateCaddyController(options) + if err := controller.Start(); err != nil { if err := caddy.Stop(); err != nil { return 1, err } @@ -100,37 +124,72 @@ func cmdFunc(flags caddycmd.Flags) (int, error) { select {} } -func getAdminListen(options *config.Options) string { - if options.ControllerNetwork != nil { - ifaces, err := net.Interfaces() - log := logger() +func getAdminListen(options *config.Options) (string, error) { + if options.Mode&config.Controller == config.Controller { + return "tcp/localhost:2019", nil + } + + log := logger() + + var controllerNetworks []string + + if options.ControllerNetwork != "" { + controllerNetworks = append(controllerNetworks, options.ControllerNetwork) + } + if options.ControllerUrl != nil { + url := strings.TrimRight(options.ControllerUrl.String(), "/") + "/controller-subnets" + log.Info("Fetching controller networks from url", zap.String("url", url)) + resp, err := http.Get(url) if err != nil { - log.Error("Failed to get network interfaces", zap.Error(err)) + log.Error("Failed to fetch controller networks from contoller", zap.String("url", url), zap.Error(err)) + return "", err } - for _, i := range ifaces { - addrs, err := i.Addrs() - if err != nil { - log.Error("Failed to get network interface addresses", zap.Error(err)) - continue - } - for _, a := range addrs { - switch v := a.(type) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + json.Unmarshal(body, &controllerNetworks) + } + + var ipNets []*net.IPNet + for _, controllerNetwork := range controllerNetworks { + _, ipNet, err := net.ParseCIDR(controllerNetwork) + if err != nil { + log.Error("Failed to parse controller network", zap.String("ControllerNetwork", controllerNetwork), zap.Error(err)) + return "", err + } + ipNets = append(ipNets, ipNet) + } + + ifaces, err := net.Interfaces() + if err != nil { + log.Error("Failed to get network interfaces", zap.Error(err)) + return "", err + } + for _, iface := range ifaces { + addrs, err := iface.Addrs() + if err != nil { + log.Error("Failed to get network interface addresses", zap.Error(err)) + return "", err + } + for _, addr := range addrs { + for _, ipNet := range ipNets { + switch v := addr.(type) { case *net.IPAddr: - if options.ControllerNetwork.Contains(v.IP) { - return "tcp/" + v.IP.String() + ":2019" + if ipNet.Contains(v.IP) { + return "tcp/" + v.IP.String() + ":2019", nil } - break case *net.IPNet: - if options.ControllerNetwork.Contains(v.IP) { - return "tcp/" + v.IP.String() + ":2019" + if ipNet.Contains(v.IP) { + return "tcp/" + v.IP.String() + ":2019", nil } - break } } } } - return "tcp/localhost:2019" + + return "tcp/0.0.0.0:2019", nil } func createOptions(flags caddycmd.Flags) *config.Options { @@ -140,7 +199,8 @@ func createOptions(flags caddycmd.Flags) *config.Options { processCaddyfileFlag := flags.Bool("process-caddyfile") pollingIntervalFlag := flags.Duration("polling-interval") modeFlag := flags.String("mode") - controllerSubnetFlag := flags.String("controller-network") + controllerNetwork := flags.String("controller-network") + controllerUrl := flags.String("controller-url") dockerSocketsFlag := flags.String("docker-sockets") dockerCertsPathFlag := flags.String("docker-certs-path") dockerAPIsVersionFlag := flags.String("docker-apis-version") @@ -186,19 +246,23 @@ func createOptions(flags caddycmd.Flags) *config.Options { options.DockerAPIsVersion = strings.Split(dockerAPIsVersionFlag, ",") } - if controllerIPRangeEnv := os.Getenv("CADDY_CONTROLLER_NETWORK"); controllerIPRangeEnv != "" { - _, ipNet, err := net.ParseCIDR(controllerIPRangeEnv) - if err != nil { - log.Error("Failed to parse CADDY_CONTROLLER_NETWORK", zap.String("CADDY_CONTROLLER_NETWORK", controllerIPRangeEnv), zap.Error(err)) - } else if ipNet != nil { - options.ControllerNetwork = ipNet + if controllerNetworkEnv := os.Getenv("CADDY_CONTROLLER_NETWORK"); controllerNetworkEnv != "" { + options.ControllerNetwork = controllerNetworkEnv + } else { + options.ControllerNetwork = controllerNetwork + } + + if controllerUrlEnv := os.Getenv("CADDY_CONTROLLER_URL"); controllerUrlEnv != "" { + if url, err := url.Parse(controllerUrlEnv); err != nil { + log.Error("Failed to parse CADDY_CONTROLLER_URL", zap.String("value", controllerUrlEnv), zap.Error(err)) + } else { + options.ControllerUrl = url } - } else if controllerSubnetFlag != "" { - _, ipNet, err := net.ParseCIDR(controllerSubnetFlag) - if err != nil { - log.Error("Failed to parse controller-network", zap.String("controller-network", controllerSubnetFlag), zap.Error(err)) - } else if ipNet != nil { - options.ControllerNetwork = ipNet + } else if controllerUrl != "" { + if url, err := url.Parse(controllerUrl); err != nil { + log.Error("Failed to parse controller-url", zap.String("value", controllerUrl), zap.Error(err)) + } else { + options.ControllerUrl = url } } diff --git a/config/options.go b/config/options.go index fc271928..166d00ec 100644 --- a/config/options.go +++ b/config/options.go @@ -1,7 +1,7 @@ package config import ( - "net" + "net/url" "time" ) @@ -9,8 +9,8 @@ import ( type Options struct { CaddyfilePath string DockerSockets []string - DockerCertsPath []string - DockerAPIsVersion []string + DockerCertsPath []string + DockerAPIsVersion []string LabelPrefix string ControlledServersLabel string ProxyServiceTasks bool @@ -18,7 +18,8 @@ type Options struct { PollingInterval time.Duration Mode Mode Secret string - ControllerNetwork *net.IPNet + ControllerNetwork string + ControllerUrl *url.URL IngressNetworks []string } diff --git a/loader.go b/controller.go similarity index 65% rename from loader.go rename to controller.go index 5f91b7af..2737057a 100644 --- a/loader.go +++ b/controller.go @@ -28,8 +28,8 @@ import ( var CaddyfileAutosavePath = filepath.Join(caddy.AppConfigDir(), "Caddyfile.autosave") -// DockerLoader generates caddy files from docker swarm information -type DockerLoader struct { +// CaddyController generates caddy files from docker swarm information and send to caddy servers +type CaddyController struct { options *config.Options initialized bool dockerClients []docker.Client @@ -43,9 +43,9 @@ type DockerLoader struct { serversUpdating *utils.StringBoolCMap } -// CreateDockerLoader creates a docker loader -func CreateDockerLoader(options *config.Options) *DockerLoader { - return &DockerLoader{ +// CreateCaddyController creates a caddy controller +func CreateCaddyController(options *config.Options) *CaddyController { + return &CaddyController{ options: options, serversVersions: utils.NewStringInt64CMap(), serversUpdating: utils.NewStringBoolCMap(), @@ -57,33 +57,33 @@ func logger() *zap.Logger { Named("docker-proxy") } -// Start docker loader -func (dockerLoader *DockerLoader) Start() error { - if !dockerLoader.initialized { - dockerLoader.initialized = true +// Start controller +func (controller *CaddyController) Start() error { + if !controller.initialized { + controller.initialized = true log := logger() dockerClients := []docker.Client{} - for i, dockerSocket := range dockerLoader.options.DockerSockets { + for i, dockerSocket := range controller.options.DockerSockets { // cf https://github.com/docker/go-docker/blob/master/client.go // setenv to use NewEnvClient // or manually os.Setenv("DOCKER_HOST", dockerSocket) - if len(dockerLoader.options.DockerCertsPath) >= i+1 && dockerLoader.options.DockerCertsPath[i] != "" { - os.Setenv("DOCKER_CERT_PATH", dockerLoader.options.DockerCertsPath[i]) + if len(controller.options.DockerCertsPath) >= i+1 && controller.options.DockerCertsPath[i] != "" { + os.Setenv("DOCKER_CERT_PATH", controller.options.DockerCertsPath[i]) } else { os.Unsetenv("DOCKER_CERT_PATH") } - if len(dockerLoader.options.DockerAPIsVersion) >= i+1 && dockerLoader.options.DockerAPIsVersion[i] != "" { - os.Setenv("DOCKER_API_VERSION", dockerLoader.options.DockerAPIsVersion[i]) + if len(controller.options.DockerAPIsVersion) >= i+1 && controller.options.DockerAPIsVersion[i] != "" { + os.Setenv("DOCKER_API_VERSION", controller.options.DockerAPIsVersion[i]) } else { os.Unsetenv("DOCKER_API_VERSION") } - dockerClient, err := client.NewEnvClient() + dockerClient, err := client.NewClientWithOpts(client.FromEnv) if err != nil { log.Error("Docker connection failed to docker specify socket", zap.Error(err), zap.String("DockerSocket", dockerSocket)) return err @@ -104,8 +104,8 @@ func (dockerLoader *DockerLoader) Start() error { // by default it will used the env docker if len(dockerClients) == 0 { - dockerClient, err := client.NewEnvClient() - dockerLoader.options.DockerSockets = append(dockerLoader.options.DockerSockets, os.Getenv("DOCKER_HOST")) + dockerClient, err := client.NewClientWithOpts(client.FromEnv) + controller.options.DockerSockets = append(controller.options.DockerSockets, os.Getenv("DOCKER_HOST")) if err != nil { log.Error("Docker connection failed", zap.Error(err)) return err @@ -124,49 +124,71 @@ func (dockerLoader *DockerLoader) Start() error { dockerClients = append(dockerClients, wrappedClient) } - dockerLoader.dockerClients = dockerClients - dockerLoader.skipEvents = make([]bool, len(dockerLoader.dockerClients)) + controller.dockerClients = dockerClients + controller.skipEvents = make([]bool, len(controller.dockerClients)) - dockerLoader.generator = generator.CreateGenerator( + controller.generator = generator.CreateGenerator( dockerClients, docker.CreateUtils(), - dockerLoader.options, + controller.options, ) - log.Info( - "Start", - zap.String("CaddyfilePath", dockerLoader.options.CaddyfilePath), - zap.String("LabelPrefix", dockerLoader.options.LabelPrefix), - zap.Duration("PollingInterval", dockerLoader.options.PollingInterval), - zap.Bool("ProcessCaddyfile", dockerLoader.options.ProcessCaddyfile), - zap.Bool("ProxyServiceTasks", dockerLoader.options.ProxyServiceTasks), - zap.String("IngressNetworks", fmt.Sprintf("%v", dockerLoader.options.IngressNetworks)), - zap.Strings("DockerSockets", dockerLoader.options.DockerSockets), - zap.Strings("DockerCertsPath", dockerLoader.options.DockerCertsPath), - zap.Strings("DockerAPIsVersion", dockerLoader.options.DockerAPIsVersion), - ) + log.Info("Start", zap.Any("options", controller.options)) ready := make(chan struct{}) - dockerLoader.timer = time.AfterFunc(0, func() { + controller.timer = time.AfterFunc(0, func() { <-ready - dockerLoader.update() + controller.update() }) close(ready) - go dockerLoader.monitorEvents() + if controller.options.Mode&config.Server == 0 { + err := controller.startHttpServer() + if err != nil { + return err + } + } + + go controller.monitorEvents() } return nil } -func (dockerLoader *DockerLoader) monitorEvents() { +func (controller *CaddyController) startHttpServer() error { + http.HandleFunc("/controller-subnets", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + controllerNetworkGroup, err := controller.generator.GetControllerNetworkGroup(logger()) + if err != nil { + http.Error(w, err.Error(), 500) + return + } + + var controllerSubnets []string + for _, networkInfo := range controllerNetworkGroup.Networks { + for _, subnet := range networkInfo.Subnets { + controllerSubnets = append(controllerSubnets, subnet.String()) + } + } + + err = json.NewEncoder(w).Encode(controllerSubnets) + if err != nil { + http.Error(w, err.Error(), 500) + return + } + }) + return http.ListenAndServe(":80", nil) +} + +func (controller *CaddyController) monitorEvents() { for { - dockerLoader.listenEvents() + controller.listenEvents() time.Sleep(30 * time.Second) } } -func (dockerLoader *DockerLoader) listenEvents() { +func (controller *CaddyController) listenEvents() { args := filters.NewArgs() if !isTrue.MatchString(os.Getenv("CADDY_DOCKER_NO_SCOPE")) { // This env var is useful for Podman where in some instances the scope can cause some issues. @@ -177,7 +199,7 @@ func (dockerLoader *DockerLoader) listenEvents() { args.Add("type", "container") args.Add("type", "config") - for i, dockerClient := range dockerLoader.dockerClients { + for i, dockerClient := range controller.dockerClients { context, cancel := context.WithCancel(context.Background()) eventsChan, errorChan := dockerClient.Events(context, types.EventsOptions{ @@ -185,13 +207,13 @@ func (dockerLoader *DockerLoader) listenEvents() { }) log := logger() - log.Info("Connecting to docker events", zap.String("DockerSocket", dockerLoader.options.DockerSockets[i])) + log.Info("Connecting to docker events", zap.String("DockerSocket", controller.options.DockerSockets[i])) ListenEvents: for { select { case event := <-eventsChan: - if dockerLoader.skipEvents[i] { + if controller.skipEvents[i] { continue } @@ -207,8 +229,8 @@ func (dockerLoader *DockerLoader) listenEvents() { (event.Type == "config" && event.Action == "remove") if update { - dockerLoader.skipEvents[i] = true - dockerLoader.timer.Reset(100 * time.Millisecond) + controller.skipEvents[i] = true + controller.timer.Reset(100 * time.Millisecond) } case err := <-errorChan: cancel() @@ -221,19 +243,19 @@ func (dockerLoader *DockerLoader) listenEvents() { } } -func (dockerLoader *DockerLoader) update() bool { - dockerLoader.timer.Reset(dockerLoader.options.PollingInterval) - for i := 0; i < len(dockerLoader.skipEvents); i++ { - dockerLoader.skipEvents[i] = false +func (controller *CaddyController) update() bool { + controller.timer.Reset(controller.options.PollingInterval) + for i := 0; i < len(controller.skipEvents); i++ { + controller.skipEvents[i] = false } // Don't cache the logger more globally, it can change based on config reloads log := logger() - caddyfile, controlledServers := dockerLoader.generator.GenerateCaddyfile(log) + caddyfile, controlledServers := controller.generator.GenerateCaddyfile(log) - caddyfileChanged := !bytes.Equal(dockerLoader.lastCaddyfile, caddyfile) + caddyfileChanged := !bytes.Equal(controller.lastCaddyfile, caddyfile) - dockerLoader.lastCaddyfile = caddyfile + controller.lastCaddyfile = caddyfile if caddyfileChanged { log.Info("New Caddyfile", zap.ByteString("caddyfile", caddyfile)) @@ -257,36 +279,36 @@ func (dockerLoader *DockerLoader) update() bool { log.Info("New Config JSON", zap.ByteString("json", configJSON)) - dockerLoader.lastJSONConfig = configJSON - dockerLoader.lastVersion++ + controller.lastJSONConfig = configJSON + controller.lastVersion++ } var wg sync.WaitGroup for _, server := range controlledServers { wg.Add(1) - go dockerLoader.updateServer(&wg, server) + go controller.updateServer(&wg, server) } wg.Wait() return true } -func (dockerLoader *DockerLoader) updateServer(wg *sync.WaitGroup, server string) { +func (controller *CaddyController) updateServer(wg *sync.WaitGroup, server string) { defer wg.Done() // Skip servers that are being updated already - if dockerLoader.serversUpdating.Get(server) { + if controller.serversUpdating.Get(server) { return } // Flag and unflag updating - dockerLoader.serversUpdating.Set(server, true) - defer dockerLoader.serversUpdating.Delete(server) + controller.serversUpdating.Set(server, true) + defer controller.serversUpdating.Delete(server) - version := dockerLoader.lastVersion + version := controller.lastVersion // Skip servers that already have this version - if dockerLoader.serversVersions.Get(server) >= version { + if controller.serversVersions.Get(server) >= version { return } @@ -295,7 +317,7 @@ func (dockerLoader *DockerLoader) updateServer(wg *sync.WaitGroup, server string url := "http://" + server + ":2019/load" - postBody, err := addAdminListen(dockerLoader.lastJSONConfig, "tcp/"+server+":2019") + postBody, err := addAdminListen(controller.lastJSONConfig, "tcp/"+server+":2019") if err != nil { log.Error("Failed to add admin listen to", zap.String("server", server), zap.Error(err)) return @@ -325,7 +347,7 @@ func (dockerLoader *DockerLoader) updateServer(wg *sync.WaitGroup, server string return } - dockerLoader.serversVersions.Set(server, version) + controller.serversVersions.Set(server, version) log.Info("Successfully configured", zap.String("server", server)) } diff --git a/examples/distributed.yaml b/examples/distributed-controller-network.yaml similarity index 92% rename from examples/distributed.yaml rename to examples/distributed-controller-network.yaml index e1c5a639..a9442076 100644 --- a/examples/distributed.yaml +++ b/examples/distributed-controller-network.yaml @@ -12,7 +12,7 @@ services: - caddy environment: - CADDY_DOCKER_MODE=server - - CADDY_CONTROLLER_NETWORK=10.200.200.0/24 + - CADDY_CONTROLLER_URL=http://caddy_controller volumes: # this volume is needed to keep the certificates # otherwise, new ones will be re-issued upon restart @@ -29,7 +29,7 @@ services: - caddy environment: - CADDY_DOCKER_MODE=controller - - CADDY_CONTROLLER_NETWORK=10.200.200.0/24 + - CADDY_CONTROLLER_NETWORK=caddy_controller volumes: - /var/run/docker.sock:/var/run/docker.sock @@ -94,13 +94,11 @@ services: networks: caddy: + name: caddy driver: overlay caddy_controller: + name: caddy_controller driver: overlay - ipam: - driver: default - config: - - subnet: "10.200.200.0/24" volumes: caddy_data: {} diff --git a/tests/distributed/compose.yaml b/examples/distributed-single-network.yaml similarity index 74% rename from tests/distributed/compose.yaml rename to examples/distributed-single-network.yaml index a2789025..fb7ef678 100644 --- a/tests/distributed/compose.yaml +++ b/examples/distributed-single-network.yaml @@ -3,33 +3,31 @@ version: '3.7' services: caddy_server: - image: caddy-docker-proxy:local + image: lucaslorentz/caddy-docker-proxy:ci-alpine ports: - 80:80 - 443:443 networks: - - caddy_controller - caddy environment: - CADDY_DOCKER_MODE=server - - CADDY_CONTROLLER_NETWORK=10.200.200.0/24 + volumes: + # this volume is needed to keep the certificates + # otherwise, new ones will be re-issued upon restart + - caddy_data:/data deploy: replicas: 3 labels: caddy_controlled_server: caddy_controller: - image: caddy-docker-proxy:local + image: lucaslorentz/caddy-docker-proxy:ci-alpine networks: - - caddy_controller - caddy environment: - CADDY_DOCKER_MODE=controller - - CADDY_CONTROLLER_NETWORK=10.200.200.0/24 volumes: - - source: "${DOCKER_SOCKET_PATH}" - target: "${DOCKER_SOCKET_PATH}" - type: ${DOCKER_SOCKET_TYPE} + - /var/run/docker.sock:/var/run/docker.sock # Proxy to service whoami0: @@ -40,7 +38,9 @@ services: labels: caddy: whoami0.example.com caddy.reverse_proxy: "{{upstreams 80}}" - caddy.tls: "internal" + # remove the following line when you have verified your setup + # Otherwise you risk being rate limited by let's encrypt + caddy.tls.ca: https://acme-staging-v02.api.letsencrypt.org/directory # Proxy to service whoami1: @@ -90,11 +90,8 @@ services: networks: caddy: - name: caddy_test - external: true - caddy_controller: + name: caddy driver: overlay - ipam: - driver: default - config: - - subnet: "10.200.200.0/24" \ No newline at end of file + +volumes: + caddy_data: {} diff --git a/examples/standalone.yaml b/examples/standalone.yaml index fbd0824d..abe2ae95 100644 --- a/examples/standalone.yaml +++ b/examples/standalone.yaml @@ -97,6 +97,7 @@ services: networks: caddy: + name: caddy driver: overlay volumes: diff --git a/generator/containers.go b/generator/containers.go index be23e54a..e14ba44e 100644 --- a/generator/containers.go +++ b/generator/containers.go @@ -1,6 +1,8 @@ package generator import ( + "net" + "github.com/docker/docker/api/types" "github.com/lucaslorentz/caddy-docker-proxy/v2/caddyfile" "go.uber.org/zap" @@ -10,21 +12,24 @@ func (g *CaddyfileGenerator) getContainerCaddyfile(container *types.Container, l caddyLabels := g.filterLabels(container.Labels) return labelsToCaddyfile(caddyLabels, container, func() ([]string, error) { - return g.getContainerIPAddresses(container, logger, true) + return g.getContainerIPAddresses(container, logger, g.ingressNetworks) }) } -func (g *CaddyfileGenerator) getContainerIPAddresses(container *types.Container, logger *zap.Logger, ingress bool) ([]string, error) { +func (g *CaddyfileGenerator) getContainerIPAddresses(container *types.Container, logger *zap.Logger, networkGroup *NetworkGroup) ([]string, error) { ips := []string{} for _, network := range container.NetworkSettings.Networks { - if !ingress || g.ingressNetworks[network.NetworkID] { + if networkGroup == nil || + networkGroup.MatchesID(network.NetworkID) || + networkGroup.MatchesName(network.NetworkID) || + networkGroup.ContainsIP(net.ParseIP(network.IPAddress)) { ips = append(ips, network.IPAddress) } } if len(ips) == 0 { - logger.Warn("Container is not in same network as caddy", zap.String("container", container.ID), zap.String("container id", container.ID)) + logger.Warn("Container is not in network group", zap.Strings("container", container.Names), zap.String("containerId", container.ID), zap.Any("networkGroup", networkGroup)) } diff --git a/generator/containers_test.go b/generator/containers_test.go index 15707d44..3d9d6fd8 100644 --- a/generator/containers_test.go +++ b/generator/containers_test.go @@ -96,12 +96,12 @@ func TestContainers_DifferentNetwork(t *testing.T) { "}\n" const expectedLogs = commonLogs + - `WARN Container is not in same network as caddy {"container": "CONTAINER-ID", "container id": "CONTAINER-ID"}` + newLine + `WARN Container is not in network group {"container": [], "containerId": "CONTAINER-ID", "networkGroup": {"Name":"ingress","Networks":[{"ID":"network-id","Name":"network-name","Subnets":null}]}}` + newLine testGeneration(t, dockerClient, nil, expectedCaddyfile, expectedLogs) } -func TestContainers_ManualIngressNetworks(t *testing.T) { +func TestContainers_ManualIngressAndControllerNetworks(t *testing.T) { dockerClient := createBasicDockerClientMock() dockerClient.NetworksData = []types.NetworkResource{ { @@ -131,10 +131,11 @@ func TestContainers_ManualIngressNetworks(t *testing.T) { " reverse_proxy 10.0.0.1\n" + "}\n" - const expectedLogs = otherIngressNetworksMapLog + swarmIsAvailableLog + const expectedLogs = otherIngressNetworksMapLog + otherControllerNetworksMapLog + swarmIsAvailableLog testGeneration(t, dockerClient, func(options *config.Options) { options.IngressNetworks = []string{"other-network-name"} + options.ControllerNetwork = "other-network-name" }, expectedCaddyfile, expectedLogs) } diff --git a/generator/generator.go b/generator/generator.go index 6a5f5c23..8c99c599 100644 --- a/generator/generator.go +++ b/generator/generator.go @@ -29,7 +29,8 @@ type CaddyfileGenerator struct { labelRegex *regexp.Regexp dockerClients []docker.Client dockerUtils docker.Utils - ingressNetworks map[string]bool + ingressNetworks *NetworkGroup + controllerNetworks *NetworkGroup swarmIsAvailable []bool swarmIsAvailableTime time.Time } @@ -52,7 +53,7 @@ func (g *CaddyfileGenerator) GenerateCaddyfile(logger *zap.Logger) ([]byte, []st var caddyfileBuffer bytes.Buffer if g.ingressNetworks == nil { - ingressNetworks, err := g.getIngressNetworks(logger) + ingressNetworks, err := g.createNetworkGroup(logger, "ingress", g.options.IngressNetworks) if err == nil { g.ingressNetworks = ingressNetworks } else { @@ -60,6 +61,15 @@ func (g *CaddyfileGenerator) GenerateCaddyfile(logger *zap.Logger) ([]byte, []st } } + if g.controllerNetworks == nil { + controllerNetworks, err := g.createNetworkGroup(logger, "controller", []string{g.options.ControllerNetwork}) + if err == nil { + g.controllerNetworks = controllerNetworks + } else { + logger.Error("Failed to get controller networks", zap.Error(err)) + } + } + if time.Since(g.swarmIsAvailableTime) > swarmAvailabilityCacheInterval { g.checkSwarmAvailability(logger, time.Time.IsZero(g.swarmIsAvailableTime)) g.swarmIsAvailableTime = time.Now() @@ -119,17 +129,14 @@ func (g *CaddyfileGenerator) GenerateCaddyfile(logger *zap.Logger) ([]byte, []st if err == nil { for _, container := range containers { if _, isControlledServer := container.Labels[g.options.ControlledServersLabel]; isControlledServer { - ips, err := g.getContainerIPAddresses(&container, logger, false) + ips, err := g.getContainerIPAddresses(&container, logger, g.controllerNetworks) if err != nil { logger.Error("Failed to get Container IPs", zap.String("container", container.ID), zap.Error(err)) } else { - for _, ip := range ips { - if g.options.ControllerNetwork == nil || g.options.ControllerNetwork.Contains(net.ParseIP(ip)) { - controlledServers = append(controlledServers, ip) - } - } + controlledServers = append(controlledServers, ips...) } } + containerCaddyfile, err := g.getContainerCaddyfile(&container, logger) if err == nil { caddyfileBlock.Merge(containerCaddyfile) @@ -149,19 +156,14 @@ func (g *CaddyfileGenerator) GenerateCaddyfile(logger *zap.Logger) ([]byte, []st logger.Debug("Swarm service", zap.String("service", service.Spec.Name)) if _, isControlledServer := service.Spec.Labels[g.options.ControlledServersLabel]; isControlledServer { - ips, err := g.getServiceTasksIps(&service, logger, false) + ips, err := g.getServiceTasksIps(&service, logger, g.controllerNetworks) if err != nil { logger.Error("Failed to get Swarm service IPs", zap.String("service", service.Spec.Name), zap.Error(err)) } else { - for _, ip := range ips { - if g.options.ControllerNetwork == nil || g.options.ControllerNetwork.Contains(net.ParseIP(ip)) { - controlledServers = append(controlledServers, ip) - } - } + controlledServers = append(controlledServers, ips...) } } - // caddy. labels based config serviceCaddyfile, err := g.getServiceCaddyfile(&service, logger) if err == nil { caddyfileBlock.Merge(serviceCaddyfile) @@ -211,8 +213,12 @@ func (g *CaddyfileGenerator) GenerateCaddyfile(logger *zap.Logger) ([]byte, []st return caddyfileContent, controlledServers } -func (g *CaddyfileGenerator) checkSwarmAvailability(logger *zap.Logger, isFirstCheck bool) { +// GetControllerNetworkGroup returns the controller network group +func (g *CaddyfileGenerator) GetControllerNetworkGroup(logger *zap.Logger) (*NetworkGroup, error) { + return g.controllerNetworks, nil +} +func (g *CaddyfileGenerator) checkSwarmAvailability(logger *zap.Logger, isFirstCheck bool) { for i, dockerClient := range g.dockerClients { info, err := dockerClient.Info(context.Background()) if err == nil { @@ -228,8 +234,10 @@ func (g *CaddyfileGenerator) checkSwarmAvailability(logger *zap.Logger, isFirstC } } -func (g *CaddyfileGenerator) getIngressNetworks(logger *zap.Logger) (map[string]bool, error) { - ingressNetworks := map[string]bool{} +func (g *CaddyfileGenerator) createNetworkGroup(logger *zap.Logger, groupName string, input []string) (*NetworkGroup, error) { + networkGroup := NetworkGroup{ + Name: groupName, + } for _, dockerClient := range g.dockerClients { if len(g.options.IngressNetworks) > 0 { @@ -241,10 +249,30 @@ func (g *CaddyfileGenerator) getIngressNetworks(logger *zap.Logger) (map[string] if dockerNetwork.Ingress { continue } + foundNetwork := false for _, ingressNetwork := range g.options.IngressNetworks { if dockerNetwork.Name == ingressNetwork { - ingressNetworks[dockerNetwork.ID] = true - ingressNetworks[dockerNetwork.Name] = true + foundNetwork = true + networkInfo := NetworkInfo{ + ID: dockerNetwork.ID, + Name: dockerNetwork.Name, + } + for _, ipamConfig := range dockerNetwork.IPAM.Config { + if _, ipNet, err := net.ParseCIDR(ipamConfig.Subnet); err == nil && ipNet != nil { + networkInfo.Subnets = append(networkInfo.Subnets, *ipNet) + } + } + networkGroup.Networks = append(networkGroup.Networks, &networkInfo) + } + } + if !foundNetwork { + if _, ipNet, err := net.ParseCIDR(g.options.ControllerNetwork); err == nil && ipNet != nil { + networkInfo := NetworkInfo{ + Subnets: []net.IPNet{*ipNet}, + } + networkGroup.Networks = append(networkGroup.Networks, &networkInfo) + } else { + logger.Warn("Controller network not found", zap.Any("network", g.ingressNetworks)) } } } @@ -260,22 +288,30 @@ func (g *CaddyfileGenerator) getIngressNetworks(logger *zap.Logger) (map[string] } for _, network := range container.NetworkSettings.Networks { - networkInfo, err := dockerClient.NetworkInspect(context.Background(), network.NetworkID, types.NetworkInspectOptions{}) + dockerNetwork, err := dockerClient.NetworkInspect(context.Background(), network.NetworkID, types.NetworkInspectOptions{}) if err != nil { return nil, err } - if networkInfo.Ingress { + if dockerNetwork.Ingress { continue } - ingressNetworks[networkInfo.ID] = true - ingressNetworks[networkInfo.Name] = true + networkInfo := NetworkInfo{ + ID: dockerNetwork.ID, + Name: dockerNetwork.Name, + } + for _, ipamConfig := range dockerNetwork.IPAM.Config { + if _, ipNet, err := net.ParseCIDR(ipamConfig.Subnet); err == nil && ipNet != nil { + networkInfo.Subnets = append(networkInfo.Subnets, *ipNet) + } + } + networkGroup.Networks = append(networkGroup.Networks, &networkInfo) } } } - logger.Info("IngressNetworksMap", zap.String("ingres", fmt.Sprintf("%v", ingressNetworks))) + logger.Info("Network group created", zap.String("name", networkGroup.Name), zap.Any("networks", networkGroup.Networks)) - return ingressNetworks, nil + return &networkGroup, nil } func (g *CaddyfileGenerator) filterLabels(labels map[string]string) map[string]string { diff --git a/generator/generator_test.go b/generator/generator_test.go index 66749403..fd340654 100644 --- a/generator/generator_test.go +++ b/generator/generator_test.go @@ -24,11 +24,13 @@ var caddyNetworkName = "network-name" const newLine = "\n" const containerIdLog = `INFO Caddy ContainerID {"ID": "container-id"}` + newLine -const ingressNetworksMapLog = `INFO IngressNetworksMap {"ingres": "map[network-id:true network-name:true]"}` + newLine -const otherIngressNetworksMapLog = `INFO IngressNetworksMap {"ingres": "map[other-network-id:true other-network-name:true]"}` + newLine +const ingressNetworksGroupLog = `INFO Network group created {"name": "ingress", "networks": [{"ID":"network-id","Name":"network-name","Subnets":null}]}` + newLine +const controllerNetworksGroupLog = `INFO Network group created {"name": "controller", "networks": [{"ID":"network-id","Name":"network-name","Subnets":null}]}` + newLine +const otherIngressNetworksMapLog = `INFO Network group created {"name": "ingress", "networks": [{"ID":"other-network-id","Name":"other-network-name","Subnets":null}]}` + newLine +const otherControllerNetworksMapLog = `INFO Network group created {"name": "controller", "networks": [{"ID":"other-network-id","Name":"other-network-name","Subnets":null}]}` + newLine const swarmIsAvailableLog = `INFO Swarm is available {"new": true}` + newLine const swarmIsDisabledLog = `INFO Swarm is available {"new": false}` + newLine -const commonLogs = containerIdLog + ingressNetworksMapLog + swarmIsAvailableLog +const commonLogs = containerIdLog + ingressNetworksGroupLog + containerIdLog + controllerNetworksGroupLog + swarmIsAvailableLog func init() { log.SetOutput(ioutil.Discard) diff --git a/generator/networkgroup.go b/generator/networkgroup.go new file mode 100644 index 00000000..f8a00ed3 --- /dev/null +++ b/generator/networkgroup.go @@ -0,0 +1,35 @@ +package generator + +import "net" + +type NetworkGroup struct { + Name string + Networks []*NetworkInfo +} + +func (n *NetworkGroup) MatchesID(id string) bool { + for _, selector := range n.Networks { + if selector.MatchesID(id) { + return true + } + } + return false +} + +func (n *NetworkGroup) MatchesName(name string) bool { + for _, selector := range n.Networks { + if selector.MatchesName(name) { + return true + } + } + return false +} + +func (n *NetworkGroup) ContainsIP(ip net.IP) bool { + for _, selector := range n.Networks { + if selector.ContainsIP(ip) { + return true + } + } + return false +} diff --git a/generator/networkinfo.go b/generator/networkinfo.go new file mode 100644 index 00000000..c92258c1 --- /dev/null +++ b/generator/networkinfo.go @@ -0,0 +1,26 @@ +package generator + +import "net" + +type NetworkInfo struct { + ID string + Name string + Subnets []net.IPNet +} + +func (n *NetworkInfo) MatchesID(id string) bool { + return n.ID != "" && n.ID == id +} + +func (n *NetworkInfo) MatchesName(name string) bool { + return n.Name != "" && n.Name == name +} + +func (n *NetworkInfo) ContainsIP(ip net.IP) bool { + for _, subnet := range n.Subnets { + if subnet.Contains(ip) { + return true + } + } + return false +} diff --git a/generator/services.go b/generator/services.go index 116ce5cb..9a02df04 100644 --- a/generator/services.go +++ b/generator/services.go @@ -16,16 +16,16 @@ func (g *CaddyfileGenerator) getServiceCaddyfile(service *swarm.Service, logger caddyLabels := g.filterLabels(service.Spec.Labels) return labelsToCaddyfile(caddyLabels, service, func() ([]string, error) { - return g.getServiceProxyTargets(service, logger, true) + return g.getServiceProxyTargets(service, logger, g.ingressNetworks) }) } -func (g *CaddyfileGenerator) getServiceProxyTargets(service *swarm.Service, logger *zap.Logger, ingress bool) ([]string, error) { +func (g *CaddyfileGenerator) getServiceProxyTargets(service *swarm.Service, logger *zap.Logger, networkGroup *NetworkGroup) ([]string, error) { if g.options.ProxyServiceTasks { - return g.getServiceTasksIps(service, logger, ingress) + return g.getServiceTasksIps(service, logger, networkGroup) } - _, err := g.getServiceVirtualIps(service, logger, ingress) + _, err := g.getServiceVirtualIps(service, logger, networkGroup) if err != nil { return nil, err } @@ -33,23 +33,26 @@ func (g *CaddyfileGenerator) getServiceProxyTargets(service *swarm.Service, logg return []string{service.Spec.Name}, nil } -func (g *CaddyfileGenerator) getServiceVirtualIps(service *swarm.Service, logger *zap.Logger, ingress bool) ([]string, error) { +func (g *CaddyfileGenerator) getServiceVirtualIps(service *swarm.Service, logger *zap.Logger, networkGroup *NetworkGroup) ([]string, error) { virtualIps := []string{} for _, virtualIP := range service.Endpoint.VirtualIPs { - if !ingress || g.ingressNetworks[virtualIP.NetworkID] { + if networkGroup == nil || + networkGroup.MatchesID(virtualIP.NetworkID) || + networkGroup.MatchesName(virtualIP.NetworkID) || + networkGroup.ContainsIP(net.ParseIP(virtualIP.Addr)) { virtualIps = append(virtualIps, virtualIP.Addr) } } if len(virtualIps) == 0 { - logger.Warn("Service is not in same network as caddy", zap.String("service", service.Spec.Name), zap.String("serviceId", service.ID)) + logger.Warn("Service is not in network group", zap.String("service", service.Spec.Name), zap.String("serviceId", service.ID), zap.Any("networkGroup", networkGroup)) } return virtualIps, nil } -func (g *CaddyfileGenerator) getServiceTasksIps(service *swarm.Service, logger *zap.Logger, ingress bool) ([]string, error) { +func (g *CaddyfileGenerator) getServiceTasksIps(service *swarm.Service, logger *zap.Logger, networkGroup *NetworkGroup) ([]string, error) { taskListFilter := filters.NewArgs() taskListFilter.Add("service", service.ID) taskListFilter.Add("desired-state", "running") @@ -67,10 +70,12 @@ func (g *CaddyfileGenerator) getServiceTasksIps(service *swarm.Service, logger * if task.Status.State == swarm.TaskStateRunning { hasRunningTasks = true for _, networkAttachment := range task.NetworksAttachments { - if !ingress || g.ingressNetworks[networkAttachment.Network.ID] { - for _, address := range networkAttachment.Addresses { - ipAddress, _, _ := net.ParseCIDR(address) - tasksIps = append(tasksIps, ipAddress.String()) + for _, address := range networkAttachment.Addresses { + ip, _, _ := net.ParseCIDR(address) + if networkGroup == nil || + networkGroup.MatchesID(networkAttachment.Network.ID) || + networkGroup.ContainsIP(ip) { + tasksIps = append(tasksIps, ip.String()) } } } @@ -82,7 +87,7 @@ func (g *CaddyfileGenerator) getServiceTasksIps(service *swarm.Service, logger * logger.Warn("Service has no tasks in running state", zap.String("service", service.Spec.Name), zap.String("serviceId", service.ID)) } else if len(tasksIps) == 0 { - logger.Warn("Service is not in same network as caddy", zap.String("service", service.Spec.Name), zap.String("serviceId", service.ID)) + logger.Warn("Service is not in network group", zap.String("service", service.Spec.Name), zap.String("serviceId", service.ID), zap.Any("networkGroup", networkGroup)) } return tasksIps, nil diff --git a/generator/services_test.go b/generator/services_test.go index 458a004f..f51569c5 100644 --- a/generator/services_test.go +++ b/generator/services_test.go @@ -92,12 +92,12 @@ func TestServices_DifferentNetwork(t *testing.T) { "}\n" const expectedLogs = commonLogs + - `WARN Service is not in same network as caddy {"service": "service", "serviceId": "SERVICE-ID"}` + newLine + `WARN Service is not in network group {"service": "service", "serviceId": "SERVICE-ID", "networkGroup": {"Name":"ingress","Networks":[{"ID":"network-id","Name":"network-name","Subnets":null}]}}` + newLine testGeneration(t, dockerClient, nil, expectedCaddyfile, expectedLogs) } -func TestServices_ManualIngressNetwork(t *testing.T) { +func TestServices_ManualIngressAndControllerNetwork(t *testing.T) { dockerClient := createBasicDockerClientMock() dockerClient.NetworksData = []types.NetworkResource{ { @@ -131,10 +131,11 @@ func TestServices_ManualIngressNetwork(t *testing.T) { " reverse_proxy service\n" + "}\n" - const expectedLogs = otherIngressNetworksMapLog + swarmIsAvailableLog + const expectedLogs = otherIngressNetworksMapLog + otherControllerNetworksMapLog + swarmIsAvailableLog testGeneration(t, dockerClient, func(options *config.Options) { options.IngressNetworks = []string{"other-network-name"} + options.ControllerNetwork = "other-network-name" }, expectedCaddyfile, expectedLogs) } @@ -169,7 +170,7 @@ func TestServices_SwarmDisabled(t *testing.T) { const expectedCaddyfile = "# Empty caddyfile" - const expectedLogs = containerIdLog + ingressNetworksMapLog + swarmIsDisabledLog + const expectedLogs = containerIdLog + ingressNetworksGroupLog + containerIdLog + controllerNetworksGroupLog + swarmIsDisabledLog testGeneration(t, dockerClient, nil, expectedCaddyfile, expectedLogs) } @@ -318,14 +319,14 @@ func TestServiceTasks_DifferentNetwork(t *testing.T) { "}\n" const expectedLogs = commonLogs + - `WARN Service is not in same network as caddy {"service": "service", "serviceId": "SERVICEID"}` + newLine + `WARN Service is not in network group {"service": "service", "serviceId": "SERVICEID", "networkGroup": {"Name":"ingress","Networks":[{"ID":"network-id","Name":"network-name","Subnets":null}]}}` + newLine testGeneration(t, dockerClient, func(options *config.Options) { options.ProxyServiceTasks = true }, expectedCaddyfile, expectedLogs) } -func TestServiceTasks_ManualIngressNetwork(t *testing.T) { +func TestServiceTasks_ManualIngressAndContorllerNetwork(t *testing.T) { dockerClient := createBasicDockerClientMock() dockerClient.ServicesData = []swarm.Service{ { @@ -374,11 +375,12 @@ func TestServiceTasks_ManualIngressNetwork(t *testing.T) { " reverse_proxy 10.0.0.1:5000\n" + "}\n" - const expectedLogs = otherIngressNetworksMapLog + swarmIsAvailableLog + const expectedLogs = otherIngressNetworksMapLog + otherControllerNetworksMapLog + swarmIsAvailableLog testGeneration(t, dockerClient, func(options *config.Options) { options.ProxyServiceTasks = true options.IngressNetworks = []string{"other-network-name"} + options.ControllerNetwork = "other-network-name" }, expectedCaddyfile, expectedLogs) } diff --git a/tests/caddyfile+config/config/caddy/Caddyfile.autosave b/tests/caddyfile+config/config/caddy/Caddyfile.autosave new file mode 100644 index 00000000..3e6cda60 --- /dev/null +++ b/tests/caddyfile+config/config/caddy/Caddyfile.autosave @@ -0,0 +1,22 @@ +{ + email test@example.com +} +(caddyfileSnippet) { + respond /caddyfile caddyfile 200 +} +(configSnippet) { + respond /config config 200 +} +caddyfile.local { + respond / caddyfile 200 + tls internal +} +config.local { + respond / config 200 + tls internal +} +service.local { + import caddyfileSnippet + import configSnippet + tls internal +} diff --git a/tests/caddyfile+config/config/caddy/autosave.json b/tests/caddyfile+config/config/caddy/autosave.json new file mode 100644 index 00000000..a625bcab --- /dev/null +++ b/tests/caddyfile+config/config/caddy/autosave.json @@ -0,0 +1 @@ +{"admin":{"listen":"tcp/localhost:2019"},"apps":{"http":{"servers":{"srv0":{"listen":[":443"],"routes":[{"handle":[{"handler":"subroute","routes":[{"handle":[{"body":"caddyfile","handler":"static_response","status_code":200}],"match":[{"path":["/"]}]}]}],"match":[{"host":["caddyfile.local"]}],"terminal":true},{"handle":[{"handler":"subroute","routes":[{"handle":[{"body":"caddyfile","handler":"static_response","status_code":200}],"match":[{"path":["/caddyfile"]}]},{"handle":[{"body":"config","handler":"static_response","status_code":200}],"match":[{"path":["/config"]}]}]}],"match":[{"host":["service.local"]}],"terminal":true},{"handle":[{"handler":"subroute","routes":[{"handle":[{"body":"config","handler":"static_response","status_code":200}],"match":[{"path":["/"]}]}]}],"match":[{"host":["config.local"]}],"terminal":true}]}}},"tls":{"automation":{"policies":[{"issuers":[{"module":"internal"}],"subjects":["caddyfile.local","service.local","config.local"]}]}}}} \ No newline at end of file diff --git a/tests/distributed-controller-network-legacy/compose.yaml b/tests/distributed-controller-network-legacy/compose.yaml new file mode 100644 index 00000000..cb8e4bdb --- /dev/null +++ b/tests/distributed-controller-network-legacy/compose.yaml @@ -0,0 +1,62 @@ +version: '3.7' + +services: + + caddy_server: + image: caddy-docker-proxy:local + ports: + - 80:80 + - 443:443 + networks: + - caddy_controller + - caddy + environment: + - CADDY_DOCKER_MODE=server + - CADDY_CONTROLLER_NETWORK=10.200.200.0/24 + deploy: + replicas: 3 + labels: + caddy_controlled_server: + + caddy_controller: + image: caddy-docker-proxy:local + networks: + - caddy_controller + - caddy + environment: + - CADDY_DOCKER_MODE=controller + - CADDY_CONTROLLER_NETWORK=10.200.200.0/24 + volumes: + - source: "${DOCKER_SOCKET_PATH}" + target: "${DOCKER_SOCKET_PATH}" + type: ${DOCKER_SOCKET_TYPE} + + whoami_service: + image: containous/whoami + networks: + - caddy + deploy: + labels: + caddy: whoami_service.example.com + caddy.reverse_proxy: "{{upstreams 80}}" + caddy.tls: "internal" + + whoami_container: + image: containous/whoami + networks: + - caddy + labels: + caddy: whoami_container.example.com + caddy.reverse_proxy: "{{upstreams 80}}" + caddy.tls: "internal" + +networks: + caddy: + name: caddy_test + external: true + caddy_controller: + driver: overlay + ipam: + driver: default + config: + - subnet: "10.200.200.0/24" \ No newline at end of file diff --git a/tests/distributed-controller-network-legacy/run.sh b/tests/distributed-controller-network-legacy/run.sh new file mode 100755 index 00000000..2856957f --- /dev/null +++ b/tests/distributed-controller-network-legacy/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -e + +. ../functions.sh + +docker stack deploy -c compose.yaml --prune caddy_test + +retry curl --show-error -s -k -f --resolve whoami_service.example.com:443:127.0.0.1 https://whoami_service.example.com && +retry curl --show-error -s -k -f --resolve whoami_container.example.com:443:127.0.0.1 https://whoami_container.example.com || { + docker service logs caddy_test_caddy_controller + docker service logs caddy_test_caddy_server + exit 1 +} diff --git a/tests/distributed-controller-network/compose.yaml b/tests/distributed-controller-network/compose.yaml new file mode 100644 index 00000000..488e182b --- /dev/null +++ b/tests/distributed-controller-network/compose.yaml @@ -0,0 +1,59 @@ +version: '3.7' + +services: + + caddy_server: + image: caddy-docker-proxy:local + ports: + - 80:80 + - 443:443 + networks: + - caddy_controller + - caddy + environment: + - CADDY_DOCKER_MODE=server + - CADDY_CONTROLLER_URL=http://caddy_controller + deploy: + replicas: 3 + labels: + caddy_controlled_server: + + caddy_controller: + image: caddy-docker-proxy:local + networks: + - caddy_controller + - caddy + environment: + - CADDY_DOCKER_MODE=controller + - CADDY_CONTROLLER_NETWORK=caddy_controller + volumes: + - source: "${DOCKER_SOCKET_PATH}" + target: "${DOCKER_SOCKET_PATH}" + type: ${DOCKER_SOCKET_TYPE} + + whoami_service: + image: containous/whoami + networks: + - caddy + deploy: + labels: + caddy: whoami_service.example.com + caddy.reverse_proxy: "{{upstreams 80}}" + caddy.tls: "internal" + + whoami_container: + image: containous/whoami + networks: + - caddy + labels: + caddy: whoami_container.example.com + caddy.reverse_proxy: "{{upstreams 80}}" + caddy.tls: "internal" + +networks: + caddy: + name: caddy_test + external: true + caddy_controller: + name: caddy_controller + driver: overlay \ No newline at end of file diff --git a/tests/distributed-controller-network/run.sh b/tests/distributed-controller-network/run.sh new file mode 100755 index 00000000..2856957f --- /dev/null +++ b/tests/distributed-controller-network/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -e + +. ../functions.sh + +docker stack deploy -c compose.yaml --prune caddy_test + +retry curl --show-error -s -k -f --resolve whoami_service.example.com:443:127.0.0.1 https://whoami_service.example.com && +retry curl --show-error -s -k -f --resolve whoami_container.example.com:443:127.0.0.1 https://whoami_container.example.com || { + docker service logs caddy_test_caddy_controller + docker service logs caddy_test_caddy_server + exit 1 +} diff --git a/tests/distributed-single-network/compose.yaml b/tests/distributed-single-network/compose.yaml new file mode 100644 index 00000000..49023040 --- /dev/null +++ b/tests/distributed-single-network/compose.yaml @@ -0,0 +1,52 @@ +version: '3.7' + +services: + + caddy_server: + image: caddy-docker-proxy:local + ports: + - 80:80 + - 443:443 + networks: + - caddy + environment: + - CADDY_DOCKER_MODE=server + deploy: + replicas: 3 + labels: + caddy_controlled_server: + + caddy_controller: + image: caddy-docker-proxy:local + networks: + - caddy + environment: + - CADDY_DOCKER_MODE=controller + volumes: + - source: "${DOCKER_SOCKET_PATH}" + target: "${DOCKER_SOCKET_PATH}" + type: ${DOCKER_SOCKET_TYPE} + + whoami_service: + image: containous/whoami + networks: + - caddy + deploy: + labels: + caddy: whoami_service.example.com + caddy.reverse_proxy: "{{upstreams 80}}" + caddy.tls: "internal" + + whoami_container: + image: containous/whoami + networks: + - caddy + labels: + caddy: whoami_container.example.com + caddy.reverse_proxy: "{{upstreams 80}}" + caddy.tls: "internal" + +networks: + caddy: + name: caddy_test + external: true diff --git a/tests/distributed-single-network/run.sh b/tests/distributed-single-network/run.sh new file mode 100755 index 00000000..2856957f --- /dev/null +++ b/tests/distributed-single-network/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -e + +. ../functions.sh + +docker stack deploy -c compose.yaml --prune caddy_test + +retry curl --show-error -s -k -f --resolve whoami_service.example.com:443:127.0.0.1 https://whoami_service.example.com && +retry curl --show-error -s -k -f --resolve whoami_container.example.com:443:127.0.0.1 https://whoami_container.example.com || { + docker service logs caddy_test_caddy_controller + docker service logs caddy_test_caddy_server + exit 1 +} diff --git a/tests/distributed/run.sh b/tests/distributed/run.sh deleted file mode 100755 index a8fa02cb..00000000 --- a/tests/distributed/run.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -set -e - -. ../functions.sh - -docker stack deploy -c compose.yaml --prune caddy_test - -retry curl --show-error -s -k -f --resolve whoami0.example.com:443:127.0.0.1 https://whoami0.example.com && -retry curl --show-error -s -k -f --resolve whoami1.example.com:443:127.0.0.1 https://whoami1.example.com && -retry curl --show-error -s -k -f --resolve whoami2.example.com:443:127.0.0.1 https://whoami2.example.com && -retry curl --show-error -s -k -f --resolve whoami3.example.com:443:127.0.0.1 https://whoami3.example.com && -retry curl --show-error -s -k -f --resolve echo0.example.com:443:127.0.0.1 https://echo0.example.com/sourcepath/something || { - docker service logs caddy_test_caddy_controller - docker service logs caddy_test_caddy_server - exit 1 -}