diff --git a/.gitignore b/.gitignore index 28472d9..8e7f5bb 100644 --- a/.gitignore +++ b/.gitignore @@ -49,4 +49,7 @@ dependency-reduced-pom.xml buildNumber.properties .mvn/timing.properties - +# Eclipse +.classpath +.settings +.project diff --git a/Dockerfile.example b/Dockerfile.example index d042689..1538421 100644 --- a/Dockerfile.example +++ b/Dockerfile.example @@ -9,11 +9,11 @@ RUN yum install -y openssl RUN mkdir -p /srv/app/config COPY target/grand-central-1.0-SNAPSHOT.jar /srv/app/ -COPY config/configuration.yaml /srv/app/config/ -COPY config/pod.yaml /srv/app/config/ +COPY config/configuration.yml /srv/app/config/ +COPY config/pod.yml /srv/app/config/ RUN echo -n | openssl s_client -connect :443 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > /srv/app/config/k8s.pem RUN keytool -importkeystore -srckeystore /usr/java/latest/lib/security/cacerts -destkeystore /srv/app/config/grandcentral.jks -srcstorepass changeit -deststorepass changeit RUN echo "yes" | keytool -import -v -trustcacerts -alias local_k8s -file /srv/app/config/k8s.pem -keystore /srv/app/config/grandcentral.jks -keypass changeit -storepass changeit -CMD cd /srv/app && /usr/bin/java -jar /srv/app/grand-central-1.0-SNAPSHOT.jar server /srv/app/config/configuration.yaml +CMD cd /srv/app && /usr/bin/java -jar /srv/app/grand-central-1.0-SNAPSHOT.jar server /srv/app/config/configuration.yml diff --git a/README.md b/README.md index 01da35a..7634fd6 100644 --- a/README.md +++ b/README.md @@ -60,13 +60,26 @@ sudo route add -net 10.2.47 172.17.4.99 ## Local K8S Certificate The K8S cluster has a self-signed SSL certificate. It must be added to a keystore as a trusted certificate before requests are permitted. +To retrieve K8S master ip, assuming using GCP, you need to first get your username/password via: + +``` +gcloud container clusters describe hello-zeppelin --zone us-central1-f +``` + +Then you can look up the IP of the K8S dashboard via + +``` +kubectl cluster-info | grep kubernetes-dashboard +``` + + **OS X** ``` brew install openssl -echo -n | /usr/local/Cellar/openssl/1.0.2e/bin/openssl s_client -connect :443 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > config/local.pem +echo -n | /usr/local/Cellar/openssl/1.0.2g/bin/openssl s_client -connect :443 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > config/local.pem keytool -importkeystore -srckeystore $JAVA_HOME/jre/lib/security/cacerts -destkeystore config/grandcentral.jks -srcstorepass changeit -deststorepass changeit -ho "yes" | keytool -import -v -trustcacerts -alias local_k8s -file k8s/local.pem -keystore config/grandcentral.jks -keypass changeit -storepass changeit +echo "yes" | keytool -import -v -trustcacerts -alias local_k8s -file config/local.pem -keystore config/grandcentral.jks -keypass changeit -storepass changeit ``` **Linux** @@ -74,7 +87,7 @@ ho "yes" | keytool -import -v -trustcacerts -alias local_k8s -file k8s/local.pem ``` echo -n | openssl s_client -connect 172.17.4.99:443 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > config/local.pem keytool -importkeystore -srckeystore $JAVA_HOME/jre/lib/security/cacerts -destkeystore config/grandcentral.jks -srcstorepass changeit -deststorepass changeit -echo "yes" | keytool -import -v -trustcacerts -alias local_k8s -file k8s/local.pem -keystore config/grandcentral.jks -keypass changeit -storepass changeit +echo "yes" | keytool -import -v -trustcacerts -alias local_k8s -file config/local.pem -keystore config/grandcentral.jks -keypass changeit -storepass changeit ``` ### Logging in to GCR.io diff --git a/README_dockercloud.md b/README_dockercloud.md new file mode 100644 index 0000000..b23b905 --- /dev/null +++ b/README_dockercloud.md @@ -0,0 +1,88 @@ +# Grand Central +*Quepid's automated review deployment tool. Gut-check in the cloud* + +Grand Central is a tool for automated deployment and cleanup of developer review environments / containers. Requests are parsed and routed based on their URL structure. If the target container exists the request is proxied along. If not Grand Central will spin up a container and forward the request once it comes online. + +## URL Structure +The appropriate container is determined by parsing the first part of the domain name. `*.review.quepid.com` in DNS is directed at the Grand Central service. The application parses the domain name to retrieve the appropriate Git version to deploy. `http://db139cf.review.quepid.com/secure` would route to a container running version `db139cf` if it exists. + +## Request Flow +When a request is received by the system the following processing takes place. + +1. Validate the git version supplied in the `Host` header. *Does it match a valid short hash signature?* +1. Verify if the version is currently running + * If so, proxy the request + * If not, continue +1. Verify version exists in Container Registry. *We can't deploy a version which doesn't exist. + * If so, continue + * If not, 404 +1. Create DockerCloud Stack (?) containing app version, database, and loader +1. Create Service for Stack (routes requests internally within the cluster) +1. Proxy the original request + +*Note* that all requests will have some metrics stored to determine activity for a give stack. This is useful when reaping old stacks. + +**BUT WAIT!** *What happens when two requests come in for the same version?* + +Easy, state is maintained within an Atomically accessed Map. As soon as a version is detected to not be running we instantiate it before releasing the lock. If a version exists, but isn't running we pause the request until the creation process is complete on another thread. + +## Stack Creation +Should a container not be running in the DockerCloud cluster when the request is performed the following process starts. + +1. Create a Stack with the following components (ERIC: IS THIS AT ALL RIGHT?) + * Rails Application + * MySQL Instance + * Data dump loader (golden review image stored block store) +1. Create a service referencing that stack + +## Stack Clean-up +Every hour a janitorial task runs which cleans up stacks / services that have not received a request in the past *x* seconds. This keeps cluster resources available. Since stacks are trivial to create they can be re-instantiated easily. + +## Resource Constraints +There is a hard limit to the number of simultaneous stacks running on the cluster. To prevent Denial of Service by our own team the maximum number of review environments is capped at *y*. Should a new stack be requested when the currently running count is already maxed the stack with the oldest most recent request will be removed. + +## Future Features +* Persistent hashes - the ability to mark a version as persistent. This prevents the janitor from reaping the pod. +* Admin interface - allow a RESTful interface to manage stacks. List all stacks, create a new one, delete an old one out of the janitorial process etc. +* Security?! + + +## Curling + +curl --user username:apikey "https://cloud.docker.com/api/app/v1/stack/" + +curl --user username:apikey "https://cloud.docker.com/api/app/v1/stack/" + +## Development/Testing +There are a lot of moving pieces to GrandCentral, you need GC itself, plus the configuration to work with either Kubernetes or DockerCloud. This is how I set up my local environment: + +1. Set up your /etc/hosts with a couple of fake DNS entries that map to two released versions of Apache. You can see these tags at https://hub.docker.com/r/eboraas/apache/tags/. + +``` +127.0.0.1 latest.apache.grandcentral.com +127.0.0.1 stretch.apache.grandcentral.com +``` + +1. I like to run the core GrandCentral application in Eclipse in debug mode. Fortunately that is very easy. Just setup a _Java Application_ run/debug configuration. In the _Arguments_ tab tell Dropwizard to run in _server_ mode and pass in the configuration file: `server src/test/resources/local-dockercloud.yml`. Then in the _Enrivonment_ tab, add an entry for `DOCKERCLOUD_APIKEY` and `DOCKERCLOUD_USERNAME`. + +1. Fire up the application, and you'll see some startup checks that verify access to DockerCloud. + +1. Browse to http://latest.apache.grandcentral.com:8080 and in about 10 seconds you should see a default Debian Apache install page load up! Check your DockerCloud dashboard, you'll see the service fired up and running on an internal port. Then, pull up http://stretch.apache.grandcentral.com:8080 and you'll see the new pod started, and the old pod deleted due to the _maximum_stack_count=1_. + + +## /etc/hosts + +Add to make testing your local set up easier this to your `/etc/hosts` file: + +``` +127.0.0.1 v1.datastart.grandcentral.com +127.0.0.1 v2.datastart.grandcentral.com +``` + + +## Dockerizing GrandCentral + + +## Implementation + +All logic for checking / creation of pods may be performed in a [`javax.servlet.Filter`](http://docs.oracle.com/javaee/7/api/javax/servlet/Filter.html?is-external=true). Requests may then be passed along to a [`org.eclipse.jetty.proxy.ProxyServlet`](http://download.eclipse.org/jetty/stable-9/apidocs/org/eclipse/jetty/proxy/ProxyServlet.html) after stack management is complete. diff --git a/config/configuration.yml.example b/config/configuration.yml.example index 8af0532..d3b3f79 100644 --- a/config/configuration.yml.example +++ b/config/configuration.yml.example @@ -21,4 +21,10 @@ gcloud: project: quepid-1051 container_name: quails - +dockercloud: + protocol: https + hostname: cloud.docker.com + namespace: datastart + stack_json_path: src/test/resources/docker-cloud.json + username: ${DOCKERCLOUD_USERNAME} + apikey: ${DOCKERCLOUD_APIKEY} diff --git a/config/docker-cloud.json.example b/config/docker-cloud.json.example new file mode 100644 index 0000000..2b38f18 --- /dev/null +++ b/config/docker-cloud.json.example @@ -0,0 +1,11 @@ +{ + "name": "echostack", + "services": [ + { + "name":"echoheaders", + "please_note": "Grand Central replaces __DOCKER_TAG__ in the image with the version", + "image": "gcr.io/google_containers/echoserver:__DOCKER_TAG__", + "ports": ["8080:8080"] + } + ] +} diff --git a/config/pod.yaml.example b/config/pod.yml.example similarity index 100% rename from config/pod.yaml.example rename to config/pod.yml.example diff --git a/pom.xml b/pom.xml index dd6a3dc..c23f371 100644 --- a/pom.xml +++ b/pom.xml @@ -7,11 +7,12 @@ Grand Central com.o19s grand-central - 1.0-SNAPSHOT + 1.1-SNAPSHOT - 0.9.1 - 9.2.13.v20150730 + 1.0.2 + 9.3.9.v20160517 + 4.12 @@ -30,6 +31,53 @@ jetty-proxy ${jetty.version} + + junit + junit + ${junit.version} + test + + + com.github.tomakehurst + wiremock + 2.1.11 + test + + + com.fasterxml.jackson.core + jackson-core + + + com.fasterxml.jackson.core + jackson-annotations + + + com.fasterxml.jackson.core + jackson-databind + + + org.eclipse.jetty + jetty-server + + + org.eclipse.jetty + jetty-security + + + org.eclipse.jetty + jetty-webapp + + + org.eclipse.jetty + jetty-servlet + + + org.eclipse.jetty + jetty-servlets + + + + @@ -70,7 +118,7 @@ - com.o19s.grandcentral.GrandCentralApplication + com.o19s.grandcentral.GrandCentralApplication2 @@ -79,4 +127,4 @@ - \ No newline at end of file + diff --git a/src/main/java/com/o19s/grandcentral/GrandCentralApplication.java b/src/main/java/com/o19s/grandcentral/GrandCentralApplication.java index 1c0339d..e6fe306 100644 --- a/src/main/java/com/o19s/grandcentral/GrandCentralApplication.java +++ b/src/main/java/com/o19s/grandcentral/GrandCentralApplication.java @@ -1,17 +1,19 @@ package com.o19s.grandcentral; +import io.dropwizard.Application; +import io.dropwizard.setup.Bootstrap; +import io.dropwizard.setup.Environment; + +import java.util.EnumSet; + +import javax.servlet.DispatcherType; + import com.o19s.grandcentral.gcloud.GCloudRegistry; import com.o19s.grandcentral.healthchecks.ContainerRegistryHealthCheck; import com.o19s.grandcentral.healthchecks.KubernetesMasterHealthCheck; import com.o19s.grandcentral.kubernetes.PodManager; import com.o19s.grandcentral.servlets.PodProxyServlet; import com.o19s.grandcentral.servlets.PodServletFilter; -import io.dropwizard.Application; -import io.dropwizard.setup.Bootstrap; -import io.dropwizard.setup.Environment; - -import javax.servlet.DispatcherType; -import java.util.EnumSet; public class GrandCentralApplication extends Application { public static void main(String[] args) throws Exception { @@ -44,7 +46,7 @@ public void run(GrandCentralConfiguration config, Environment environment) throw config.getKubernetesConfiguration().getNamespace())); // Build the PodManager - PodManager podManager = new PodManager( + LinkedContainerManager podManager = new PodManager( config.getKubernetesConfiguration(), config.getKeystorePath(), config.getRefreshIntervalInMs(), @@ -52,7 +54,7 @@ public void run(GrandCentralConfiguration config, Environment environment) throw config.getPodYamlPath() ); - GCloudRegistry gCloudRegistry = new GCloudRegistry(config.getGCloudConfiguration(), config.getKeystorePath()); + ImageRegistry gCloudRegistry = new GCloudRegistry(config.getGCloudConfiguration(), config.getKeystorePath()); // Define the filter and proxy final PodServletFilter psv = new PodServletFilter(config.getGrandcentralDomain(), podManager, gCloudRegistry); diff --git a/src/main/java/com/o19s/grandcentral/GrandCentralApplication2.java b/src/main/java/com/o19s/grandcentral/GrandCentralApplication2.java new file mode 100644 index 0000000..6a77441 --- /dev/null +++ b/src/main/java/com/o19s/grandcentral/GrandCentralApplication2.java @@ -0,0 +1,87 @@ +package com.o19s.grandcentral; + +import io.dropwizard.Application; +import io.dropwizard.configuration.EnvironmentVariableSubstitutor; +import io.dropwizard.configuration.SubstitutingSourceProvider; +import io.dropwizard.setup.Bootstrap; +import io.dropwizard.setup.Environment; + +import java.util.EnumSet; + +import javax.servlet.DispatcherType; + +import com.o19s.grandcentral.dockercloud.DockercloudRegistry; +import com.o19s.grandcentral.dockercloud.StackManager; +import com.o19s.grandcentral.servlets.PodProxyServlet; +import com.o19s.grandcentral.servlets.PodServletFilter; +//import com.o19s.grandcentral.healthchecks.ContainerRegistryHealthCheck; +//import com.o19s.grandcentral.healthchecks.KubernetesMasterHealthCheck; + +public class GrandCentralApplication2 extends Application { + public static void main(String[] args) throws Exception { + new GrandCentralApplication2().run(args); + } + + @Override + public String getName() { + return "Grand Central"; + } + + @Override + public void initialize(Bootstrap bootstrap) { + // Enable variable substitution with environment variables + bootstrap.setConfigurationSourceProvider( + new SubstitutingSourceProvider(bootstrap.getConfigurationSourceProvider(), + new EnvironmentVariableSubstitutor(false) + ) + ); + + } + + @Override + public void run(GrandCentralConfiguration2 config, Environment environment) throws Exception { + + // FIXME: Should be a healthcheck that confirms access to DockerCloud. + System.out.println("Username:" + config.getDockercloudConfiguration().getUsername()); + // Add health checks + /* + environment.healthChecks().register("container_registry", new ContainerRegistryHealthCheck( + config.getKeystorePath(), + config.getGCloudConfiguration().getRegistryDomain(), + config.getGCloudConfiguration().getProject(), + config.getGCloudConfiguration().getContainerName(), + config.getGCloudConfiguration().getRegistryUsername(), + config.getGCloudConfiguration().getRegistryPassword())); + environment.healthChecks().register("kubernetes_master", new KubernetesMasterHealthCheck( + config.getKubernetesConfiguration().getMasterIp(), + config.getKeystorePath(), + config.getKubernetesConfiguration().getUsername(), + config.getKubernetesConfiguration().getPassword(), + config.getKubernetesConfiguration().getNamespace())); + */ + + // Build the StackManager + LinkedContainerManager linkedContainerManager = new StackManager( + config.getDockercloudConfiguration(), + config.getRefreshIntervalInMs(), + config.getMaximumStackCount() + ); + + + ImageRegistry imageRegistry = new DockercloudRegistry(config.getDockercloudConfiguration()); + + + // Define the filter and proxy + final PodServletFilter psv = new PodServletFilter(config.getGrandcentralDomain(), linkedContainerManager, imageRegistry); + final PodProxyServlet pps = new PodProxyServlet(config.getPodPort()); + + // Disable Jersey in the proxy environment + environment.jersey().disable(); + + // Setup Servlet filters and proxies + environment.servlets().addFilter("Pod Servlet Filter", psv) + .addMappingForUrlPatterns(EnumSet.of(DispatcherType.REQUEST), true, "/*"); + environment.servlets().addServlet("Pod Proxy Servlet", pps) + .addMapping("/*"); + } +} diff --git a/src/main/java/com/o19s/grandcentral/GrandCentralConfiguration.java b/src/main/java/com/o19s/grandcentral/GrandCentralConfiguration.java index a519555..892c6ce 100644 --- a/src/main/java/com/o19s/grandcentral/GrandCentralConfiguration.java +++ b/src/main/java/com/o19s/grandcentral/GrandCentralConfiguration.java @@ -1,15 +1,17 @@ package com.o19s.grandcentral; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.o19s.grandcentral.gcloud.GCloudConfiguration; -import com.o19s.grandcentral.kubernetes.KubernetesConfiguration; import io.dropwizard.Configuration; import io.dropwizard.jackson.JsonSnakeCase; -import org.hibernate.validator.constraints.NotEmpty; import javax.validation.Valid; import javax.validation.constraints.NotNull; +import org.hibernate.validator.constraints.NotEmpty; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.o19s.grandcentral.gcloud.GCloudConfiguration; +import com.o19s.grandcentral.kubernetes.KubernetesConfiguration; + /** * Configuration values for the {@link GrandCentralApplication}. Data is loaded from a provided YAML file on start or * passed in via Environment variables. diff --git a/src/main/java/com/o19s/grandcentral/GrandCentralConfiguration2.java b/src/main/java/com/o19s/grandcentral/GrandCentralConfiguration2.java new file mode 100644 index 0000000..895ce47 --- /dev/null +++ b/src/main/java/com/o19s/grandcentral/GrandCentralConfiguration2.java @@ -0,0 +1,112 @@ +package com.o19s.grandcentral; + +import io.dropwizard.Configuration; +import io.dropwizard.jackson.JsonSnakeCase; + +import javax.validation.Valid; +import javax.validation.constraints.NotNull; + +import org.hibernate.validator.constraints.NotEmpty; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.o19s.grandcentral.dockercloud.DockercloudConfiguration; + +/** + * Configuration values for the {@link GrandCentralApplication}. Data is loaded from a provided YAML file on start or + * passed in via Environment variables. + */ +@JsonSnakeCase +public class GrandCentralConfiguration2 extends Configuration { + @Valid + @NotNull + private long janitorCleanupThreshold; + + @Valid + @NotNull + private int maximumStackCount; + + @Valid + @NotEmpty + @NotNull + private String grandcentralDomain; + + @Valid + @NotNull + private long refreshIntervalInMs; + + @Valid + @NotNull + private int podPort; + + + + + @Valid + @NotNull + private DockercloudConfiguration dockercloud = new DockercloudConfiguration(); + + + @JsonProperty + public long getJanitorCleanupThreshold() { + return janitorCleanupThreshold; + } + + @JsonProperty + public void setJanitorCleanupThreshold(long janitorCleanupThreshold) { + this.janitorCleanupThreshold = janitorCleanupThreshold; + } + + + @JsonProperty + public int getMaximumStackCount() { + return maximumStackCount; + } + + @JsonProperty + public void setMaximumStackCount(int maximumStackCount) { + this.maximumStackCount = maximumStackCount; + } + + @JsonProperty + public String getGrandcentralDomain() { + return grandcentralDomain; + } + + @JsonProperty + public void setGrandcentralDomain(String grandcentralDomain) { + this.grandcentralDomain = grandcentralDomain; + } + + @JsonProperty + public long getRefreshIntervalInMs() { + return refreshIntervalInMs; + } + + + @JsonProperty + public void setRefreshIntervalInMs(long refreshIntervalInMs) { + this.refreshIntervalInMs = refreshIntervalInMs; + } + + + @JsonProperty("dockercloud") + public DockercloudConfiguration getDockercloudConfiguration() { + return dockercloud; + } + + @JsonProperty("dockercloud") + public void setDockercloudConfiguration(DockercloudConfiguration factory) { + this.dockercloud = factory; + } + + + @JsonProperty + public int getPodPort() { + return podPort; + } + + @JsonProperty + public void setPodPort(int podPort) { + this.podPort = podPort; + } +} diff --git a/src/main/java/com/o19s/grandcentral/ImageRegistry.java b/src/main/java/com/o19s/grandcentral/ImageRegistry.java new file mode 100644 index 0000000..3ad676a --- /dev/null +++ b/src/main/java/com/o19s/grandcentral/ImageRegistry.java @@ -0,0 +1,8 @@ +package com.o19s.grandcentral; + +public interface ImageRegistry { + + public boolean imageExistsInRegistry(String dockerTag) + throws Exception; + +} \ No newline at end of file diff --git a/src/main/java/com/o19s/grandcentral/LinkedContainerManager.java b/src/main/java/com/o19s/grandcentral/LinkedContainerManager.java new file mode 100644 index 0000000..fd34164 --- /dev/null +++ b/src/main/java/com/o19s/grandcentral/LinkedContainerManager.java @@ -0,0 +1,29 @@ +package com.o19s.grandcentral; + +import java.io.IOException; + +import com.o19s.grandcentral.kubernetes.Pod; + +public interface LinkedContainerManager { + + /** + * Get pod information for the given name + * @param dockerTag Git hash / name of the pod to return + * @return The pod which matches the given key. + */ + public abstract Pod get(String dockerTag) throws IOException; + + /** + * Does the provided dockerTag currently exist within the cluster + * @param dockerTag Git hash / name of the pod to check + * @return True if the pod exists + */ + public abstract Boolean contains(String dockerTag); + + /** + * Adds a pod with the docker tag + * @param dockerTag Git hash / name of the pod to deploy + */ + public abstract Pod add(String dockerTag) throws Exception; + +} \ No newline at end of file diff --git a/src/main/java/com/o19s/grandcentral/dockercloud/DockercloudConfiguration.java b/src/main/java/com/o19s/grandcentral/dockercloud/DockercloudConfiguration.java new file mode 100644 index 0000000..b43baa8 --- /dev/null +++ b/src/main/java/com/o19s/grandcentral/dockercloud/DockercloudConfiguration.java @@ -0,0 +1,110 @@ +package com.o19s.grandcentral.dockercloud; + +import io.dropwizard.jackson.JsonSnakeCase; + +import javax.validation.constraints.NotNull; + +import org.hibernate.validator.constraints.NotEmpty; + +import com.fasterxml.jackson.annotation.JsonProperty; + + +@JsonSnakeCase +public class DockercloudConfiguration { + @NotNull + @NotEmpty + private String hostname; + + @NotNull + @NotEmpty + private String username; + + @NotNull + @NotEmpty + private String apikey; + + @NotNull + @NotEmpty + private String namespace; + + // this can be null + private String protocol; + + @NotNull + @NotEmpty + private String stackExistsTestImage; // We do a hacky test of a image with a version to see if it exists or not. what image? + + @NotNull + @NotEmpty + private String stackJsonPath; + + @JsonProperty + public String getHostname() { + return hostname; + } + + @JsonProperty + public void setHostname(String masterIp) { + this.hostname = masterIp; + } + + @JsonProperty + public String getUsername() { + return username; + } + + @JsonProperty + public String getProtocol() { + return (protocol == null) ? "https" : protocol; + } + + @JsonProperty + public void setProtocol(String protocol) { + this.protocol = protocol; + } + + @JsonProperty + public void setUsername(String username) { + this.username = username; + } + + @JsonProperty + public String getApikey() { + return apikey; + } + + @JsonProperty + public void setApikey(String apikey) { + this.apikey = apikey; + } + + @JsonProperty + public String getNamespace() { + return namespace; + } + + @JsonProperty + public void setNamespace(String namespace) { + this.namespace = namespace; + } + + @JsonProperty + public String getStackJsonPath() { + return stackJsonPath; + } + + @JsonProperty + public void setStackJsonPath(String stackJsonPath) { + this.stackJsonPath = stackJsonPath; + } + + @JsonProperty + public String getStackExistsTestImage() { + return stackExistsTestImage; + } + + @JsonProperty + public void setStackExistsTestImage(String stackExistsTestImage) { + this.stackExistsTestImage = stackExistsTestImage; + } +} diff --git a/src/main/java/com/o19s/grandcentral/dockercloud/DockercloudRegistry.java b/src/main/java/com/o19s/grandcentral/dockercloud/DockercloudRegistry.java new file mode 100644 index 0000000..bdacf74 --- /dev/null +++ b/src/main/java/com/o19s/grandcentral/dockercloud/DockercloudRegistry.java @@ -0,0 +1,207 @@ +package com.o19s.grandcentral.dockercloud; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.concurrent.CompletableFuture; + +import org.apache.http.HttpEntity; +import org.apache.http.HttpStatus; +import org.apache.http.auth.UsernamePasswordCredentials; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.impl.auth.BasicScheme; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.o19s.grandcentral.ImageRegistry; + +public class DockercloudRegistry implements ImageRegistry { + private static final Logger LOGGER = LoggerFactory + .getLogger(DockercloudRegistry.class); + private DockercloudConfiguration dockercloudConfiguration; + private CloseableHttpClient httpClient; + private final JsonFactory jsonFactory = new JsonFactory(); + private final ObjectMapper jsonObjectMapper = new ObjectMapper(jsonFactory); + + public DockercloudRegistry(DockercloudConfiguration dockercloudConfiguration) { + this.dockercloudConfiguration = dockercloudConfiguration; + + httpClient = HttpClients.createDefault(); + } + + @Override + public boolean imageExistsInRegistry(String dockerTag) throws Exception { + + LOGGER.info("Checking if Docker tag exists in registry: " + dockerTag); + + boolean imageExists = false; + + String serviceName = "chk-" + dockerTag; + + String imageName = dockercloudConfiguration.getStackExistsTestImage() + ":" + dockerTag; + + final String podUUID = createValidityCheckService(serviceName, imageName); + + imageExists = startService(podUUID); + + + + CompletableFuture futureDelete = CompletableFuture.supplyAsync( + () -> { + boolean deleteSuccessful = false; + try { + + int sleepTime = 2000; + // Simulate long running task + do { + sleepTime = sleepTime * 2;// Double each iteration. + LOGGER.info("Sleeping for " + sleepTime + "ms before delete on pod: " + podUUID); + Thread.sleep(sleepTime); + deleteSuccessful = deleteService(podUUID); + LOGGER.info("Was delete successful for " + podUUID +": " + deleteSuccessful); + + } + while (!deleteSuccessful && sleepTime <= 64000); + + + } catch (InterruptedException e) { } + return deleteSuccessful; + }); + + return imageExists; + } + + private String createValidityCheckService(String serviceName, String imageName){ + String podUUID = null; + try { + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + + String s = String.join("\n", "{", " \"name\": \"" + serviceName + + "\",", " \"image\": \"" + imageName + "\"" + + ",", " \"run_command\": \"ls\"" + , "}"); + + LOGGER.info("Checking for " + imageName + " via " + s); + baos.write(s.getBytes()); + + HttpPost serviceCreate = new HttpPost( + dockercloudConfiguration.getProtocol() + "://" + + dockercloudConfiguration.getHostname() + + "/api/app/v1/service/"); + serviceCreate.addHeader("accept", "application/json"); + serviceCreate.addHeader(BasicScheme.authenticate( + new UsernamePasswordCredentials(dockercloudConfiguration + .getUsername(), dockercloudConfiguration + .getApikey()), "UTF-8", false)); + + HttpEntity podJson = new ByteArrayEntity(baos.toByteArray()); + serviceCreate.setEntity(podJson); + + try (CloseableHttpResponse response = httpClient + .execute(serviceCreate)) { + int status = response.getStatusLine().getStatusCode(); + HttpEntity entity = response.getEntity(); + InputStream responseBody = entity.getContent(); + + JsonNode rootNode = jsonObjectMapper.readTree(responseBody); + JsonNode objectsNode = rootNode.get("objects"); + if (status == HttpStatus.SC_CREATED) { + LOGGER.info("Pod " + serviceName + ": Scheduled"); + podUUID = rootNode.get("uuid").asText(); + + } else if (status == HttpStatus.SC_CONFLICT) { + LOGGER.info("Pod " + serviceName + ": Already running"); + } else { + LOGGER.info("Pod " + serviceName + ": Not scheduled (" + + response.getStatusLine().toString() + ":" + rootNode+ ")"); + } + } catch (IOException ioe) { + LOGGER.error("Pod " + serviceName + ": Error scheduling pod", ioe); + } + } catch (IOException ioe) { + LOGGER.error("Pod " + serviceName + ": Error scheduling pod", ioe); + } + return podUUID; + } + + private boolean deleteService(String podUUID) { + boolean result = false; + HttpDelete serviceDelete = new HttpDelete( + dockercloudConfiguration.getProtocol() + "://" + + dockercloudConfiguration.getHostname() + + "/api/app/v1/service/" + podUUID + "/"); + serviceDelete.addHeader("accept", "application/json"); + serviceDelete.addHeader(BasicScheme.authenticate( + new UsernamePasswordCredentials(dockercloudConfiguration + .getUsername(), dockercloudConfiguration.getApikey()), + "UTF-8", false)); + + try (CloseableHttpResponse response = httpClient.execute(serviceDelete)) { + int status = response.getStatusLine().getStatusCode(); + HttpEntity entity = response.getEntity(); + InputStream responseBody = entity.getContent(); + + JsonNode rootNode = jsonObjectMapper.readTree(responseBody); + + if (status == HttpStatus.SC_ACCEPTED) { + LOGGER.info("Pod " + podUUID + ": Deleted"); + result = true; + + } + + } catch (IOException ioe) { + LOGGER.error("Pod " + podUUID + ": Error pod", ioe); + } + + return result; + + } + + private boolean startService(String podUUID) { + boolean result = false; + + HttpPost serviceStart = new HttpPost( + dockercloudConfiguration.getProtocol() + "://" + + dockercloudConfiguration.getHostname() + + "/api/app/v1/service/" + podUUID + "/start/"); + serviceStart.addHeader("accept", "application/json"); + serviceStart.addHeader(BasicScheme.authenticate( + new UsernamePasswordCredentials(dockercloudConfiguration + .getUsername(), dockercloudConfiguration.getApikey()), + "UTF-8", false)); + + try (CloseableHttpResponse response = httpClient.execute(serviceStart)) { + int status = response.getStatusLine().getStatusCode(); + HttpEntity entity = response.getEntity(); + InputStream responseBody = entity.getContent(); + + JsonNode rootNode = jsonObjectMapper.readTree(responseBody); + + if (status == HttpStatus.SC_ACCEPTED) { + LOGGER.info("Pod " + podUUID + ": Started"); + result = true; + + } + else if (status == HttpStatus.SC_BAD_REQUEST){ + LOGGER.info("Pod " + podUUID + ": attempted start, and image not found"); + result = false; + } + + } catch (IOException ioe) { + LOGGER.error("Pod " + podUUID + ": Error pod", ioe); + } + + return result; + + } + +} diff --git a/src/main/java/com/o19s/grandcentral/dockercloud/StackManager.java b/src/main/java/com/o19s/grandcentral/dockercloud/StackManager.java new file mode 100644 index 0000000..6617b6f --- /dev/null +++ b/src/main/java/com/o19s/grandcentral/dockercloud/StackManager.java @@ -0,0 +1,571 @@ +package com.o19s.grandcentral.dockercloud; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import org.apache.commons.lang3.StringUtils; +import org.apache.http.HttpEntity; +import org.apache.http.HttpStatus; +import org.apache.http.auth.AuthScope; +import org.apache.http.auth.UsernamePasswordCredentials; +import org.apache.http.client.CredentialsProvider; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.impl.auth.BasicScheme; +import org.apache.http.impl.client.BasicCredentialsProvider; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.joda.time.DateTime; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.o19s.grandcentral.LinkedContainerManager; +import com.o19s.grandcentral.http.HttpDelete; // IMPORTANT, allows DELETE requests with bodies +import com.o19s.grandcentral.kubernetes.Pod; + +/** + * Manages all stacks present within a namespace + * + * Currently reusing the K8N Pod object. + * Should be implementing some sort of interface along with PodManager! + * Using the word Stack and Pod interchangable right now! + */ +public class StackManager implements LinkedContainerManager { + private static final Logger LOGGER = LoggerFactory.getLogger(StackManager.class); + + private long lastRefresh; + private static final Map pods = new HashMap<>(); + + private DockercloudConfiguration dockercloudConfiguration; + + private long refreshIntervalInMs; + private int maximumPodCount; + + private CloseableHttpClient httpClient; + private HttpClientContext httpContext; + + private final JsonFactory jsonFactory = new JsonFactory(); + private final ObjectMapper jsonObjectMapper = new ObjectMapper(jsonFactory); + private final ObjectNode stackDefinition; + + + static final ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(true); + static final Lock readLock = readWriteLock.readLock(); + static final Lock writeLock = readWriteLock.writeLock(); + + /** + * Instantiates a new manages with the specified settings. + * TODO think about refactoring StackManager to being "LinkedContainerManager" and abstract + * the specific HTTP calls away to a seperate DockerCloudAPI so we could drop in a MockDockerCloudAPI. + * + */ + public StackManager(DockercloudConfiguration dockercloudConfiguration, + + long refreshIntervalInMs, + int maximumPodCount) throws IOException { + lastRefresh = 0; + + this.dockercloudConfiguration = dockercloudConfiguration; + + this.refreshIntervalInMs = refreshIntervalInMs; + this.maximumPodCount = maximumPodCount; + + stackDefinition = jsonObjectMapper.createObjectNode(); + stackDefinition.setAll((ObjectNode)jsonObjectMapper.readTree(new File(dockercloudConfiguration.getStackJsonPath()))); + + LOGGER.info("Loaded Stack Definition: " + stackDefinition); + + try { + // Setup SSL and plain connection socket factories +/* SSLContext sslContext = null; SSLContexts.custom() + .loadTrustMaterial(new File(keystorePath), "changeit".toCharArray()) + .build(); + */ + + // LayeredConnectionSocketFactory sslsf = new SSLConnectionSocketFactory(sslContext); + // PlainConnectionSocketFactory plainsf = PlainConnectionSocketFactory.getSocketFactory(); +/* + Registry r = RegistryBuilder.create() + .register("http", plainsf) + .register("https", sslsf) + .build(); + HttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(r); + */ + + // Build the HTTP Client + httpClient = HttpClients.createDefault(); +// httpClient = HttpClients.custom() + // .setConnectionManager(cm) + // .build(); + + // Configure K8S HTTP Context (Authentication) + httpContext = HttpClientContext.create(); + CredentialsProvider dockercloudCredentialsProvider = new BasicCredentialsProvider(); + dockercloudCredentialsProvider.setCredentials( + new AuthScope(dockercloudConfiguration.getHostname(), 80), + new UsernamePasswordCredentials(dockercloudConfiguration.getUsername(), dockercloudConfiguration.getApikey())); + httpContext.setCredentialsProvider(dockercloudCredentialsProvider); + + } catch (Exception e) { + LOGGER.error("Error configuring HTTP clients", e); + } + + // Initial loading of pod information + refreshPods(); + } + + /** + * Get pod information for the given name + * @param dockerTag Git hash / name of the pod to return + * @return The pod which matches the given key. + */ + public Pod get(String dockerTag) throws IOException { + Pod pod = null; + + // Force a refresh of the data from K8S if the interval has passed + if (DateTime.now().getMillis() - lastRefresh > refreshIntervalInMs) { + try { + refreshPods(); + } catch (Exception e) { + throw new IOException(e); + } + } + + + if (contains(dockerTag)) { + readLock.lock(); + try { + pod = pods.get(dockerTag); + } finally { + readLock.unlock(); + } + } + + return pod; + } + + /** + * Does the provided dockerTag currently exist within the cluster + * @param dockerTag Git hash / name of the pod to check + * @return True if the pod exists + */ + public Boolean contains(String dockerTag) { + readLock.lock(); + boolean contains = false; + + try { + contains = pods.containsKey(dockerTag); + } finally { + readLock.unlock(); + } + + return contains; + } + + /** + * Adds a pod with the docker tag + * @param dockerTag Git hash / name of the pod to deploy + */ + public Pod add(String dockerTag) throws Exception { + if (!contains(dockerTag)) { + Pod pod = null; + + // Get the read lock + readLock.lock(); + + try { + // Schedule the new Stack + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + JsonGenerator generator = jsonFactory.createGenerator(baos); + + ObjectNode newStackDefinition = stackDefinition.deepCopy(); + newStackDefinition.put("name", dockercloudConfiguration.getNamespace() + "-" + dockerTag.replace(".", "-")); + + String image; + for (JsonNode serviceNode : newStackDefinition.get("services")) { + image = serviceNode.get("image").asText(); + if (image.endsWith("__DOCKER_TAG__")) { + ((ObjectNode) serviceNode).put("image", image.replace("__DOCKER_TAG__", dockerTag)); + } + } + + LOGGER.info("Generated definition for \"" + dockerTag + "\": " + newStackDefinition); + + generator.writeObject(newStackDefinition); + generator.flush(); + generator.close(); + + HttpPost stackCreate = new HttpPost(dockercloudConfiguration.getProtocol() + "://" + dockercloudConfiguration.getHostname() + "/api/app/v1/stack/"); + stackCreate.addHeader("accept", "application/json"); + stackCreate.addHeader(BasicScheme.authenticate( + new UsernamePasswordCredentials(dockercloudConfiguration.getUsername(), dockercloudConfiguration.getApikey()), + "UTF-8", false)); + + + HttpEntity podJson = new ByteArrayEntity(baos.toByteArray()); + stackCreate.setEntity(podJson); + + boolean podCreated = false; + String podUUID = null; + try (CloseableHttpResponse response = httpClient.execute(stackCreate)) { + int status = response.getStatusLine().getStatusCode(); + HttpEntity entity = response.getEntity(); + InputStream responseBody = entity.getContent(); + + if (status == HttpStatus.SC_CREATED) { + LOGGER.info("Pod " + dockerTag + ": Scheduled"); + + JsonNode rootNode = jsonObjectMapper.readTree(responseBody); + JsonNode objectsNode = rootNode.get("objects"); + podUUID = rootNode.get("uuid").asText(); + podCreated = true; + + } else if (status == HttpStatus.SC_CONFLICT) { + LOGGER.info("Pod " + dockerTag + ": Already running"); + } else if (status == HttpStatus.SC_BAD_REQUEST) { + LOGGER.info("Pod " + dockerTag + ": Couldn't create stack. Check definition: " + newStackDefinition); + } else { + LOGGER.info("Pod " + dockerTag + ": Not scheduled (" + response.getStatusLine().toString() + ")"); + } + } catch (IOException ioe) { + LOGGER.error("Pod " + dockerTag + ": Error scheduling pod", ioe); + } + + + + + // Here should be a check to see if it the stack was created, not sure how long it takes!!! + + + // Start the stack if it was created + if (podCreated && podUUID != null){ + // Start the stack + HttpPost stackStart = new HttpPost(dockercloudConfiguration.getProtocol() + "://" + dockercloudConfiguration.getHostname() + "/api/app/v1/stack/" + podUUID + "/start/"); + stackStart.addHeader("accept", "application/json"); + stackStart.addHeader(BasicScheme.authenticate( + new UsernamePasswordCredentials(dockercloudConfiguration.getUsername(), dockercloudConfiguration.getApikey()), + "UTF-8", false)); + + try (CloseableHttpResponse response = httpClient.execute(stackStart)) { + int status = response.getStatusLine().getStatusCode(); + HttpEntity entity = response.getEntity(); + InputStream responseBody = entity.getContent(); + JsonNode rootNode = jsonObjectMapper.readTree(responseBody); + } + catch (IOException ioe) { + LOGGER.error("Pod " + dockerTag + ": Error scheduling pod", ioe); + } + + + } + + // Wait until Pod is running + + if (podCreated){ + boolean podRunning = false; + int numberOfSecondsWaiting = 0; + HttpGet stackStatus = new HttpGet(dockercloudConfiguration.getProtocol() + "://" + dockercloudConfiguration.getHostname() + "/api/app/v1/stack/" + podUUID); + stackStatus.addHeader("accept", "application/json"); + stackStatus.addHeader(BasicScheme.authenticate( + new UsernamePasswordCredentials(dockercloudConfiguration.getUsername(), dockercloudConfiguration.getApikey()), + "UTF-8", false)); + + do { + numberOfSecondsWaiting++; + LOGGER.info("Pod " + dockerTag + ": Waiting for start. Seconds:" + numberOfSecondsWaiting); + Thread.sleep(1000); + + try (CloseableHttpResponse response = httpClient.execute(stackStatus)) { + HttpEntity entity = response.getEntity(); + try (InputStream responseBody = entity.getContent()) { + + + JsonNode rootNode = jsonObjectMapper.readTree(responseBody); + String name = rootNode.get("name").asText(); + String status = rootNode.get("state").asText(); + JsonNode servicesNode = rootNode.get("services"); + List servicesURI = new ArrayList(); + for (JsonNode node : servicesNode){ + servicesURI.add(node.asText()); + } + + + String publicDNS = getTargetDNSForStack(dockercloudConfiguration.getStackExistsTestImage(),servicesURI); + + pod = new Pod(dockerTag, publicDNS, status); + pod.setUuid(podUUID); + + podRunning = pod != null && pod.isRunning(); + } catch (IOException ioe) { + LOGGER.error("Pod " + dockerTag + ": Error getting DNS for pod", ioe); + } + } + } while (!podRunning | numberOfSecondsWaiting > 120); + + if (podRunning){ + LOGGER.info("Pod " + dockerTag + ": Started"); + } + } + else { + LOGGER.error("Pod " + dockerTag + ": Timed out checking for start"); + } + } finally { + readLock.unlock(); + } + + // Force a refresh of the pod list + refreshPods(); + + return pod; + } + + return null; + } + + /** + * Stops the pod containing the specified docker tag. Note this does not force a refresh of the pods state + * @param dockerTag + * @throws IOException + */ + public void remove(String dockerTag, String stackUUID) throws IOException { + if (contains(dockerTag)) { + readLock.lock(); + + try { + + // Start the stack + HttpDelete stackDelete = new HttpDelete(dockercloudConfiguration.getProtocol() + "://" + dockercloudConfiguration.getHostname() + "/api/app/v1/stack/" + stackUUID +"/"); + stackDelete.addHeader("accept", "application/json"); + stackDelete.addHeader(BasicScheme.authenticate( + new UsernamePasswordCredentials(dockercloudConfiguration.getUsername(), dockercloudConfiguration.getApikey()), + "UTF-8", false)); + + + try (CloseableHttpResponse response = httpClient.execute(stackDelete)) { + if (response.getStatusLine().getStatusCode() == HttpStatus.SC_ACCEPTED) { + LOGGER.info("Stack " + dockerTag + ": Removed"); + Thread.sleep(10000); + } else { + LOGGER.info("Stack " + dockerTag + ": Error removing stack (" + response.getStatusLine().toString() + ")"); + } + } catch (IOException ioe) { + LOGGER.error("Stack " + dockerTag + ": Error removing stack", ioe); + } catch (InterruptedException e) { + LOGGER.error("Stack " + dockerTag + ": Error sleeping"); + } + } finally { + readLock.unlock(); + } + } else { + throw new IllegalArgumentException("Stack doesn't exist"); + } + } + + /** + * Removes the oldest running pods until maximumPodCount is reached + * @throws IOException + */ + private void removeExtraPods() throws IOException { + readLock.lock(); + if (pods.size() > maximumPodCount) { + readLock.unlock(); + writeLock.lock(); + + try { + // Check again since there was a time where we didn't have the lock + if (pods.size() > maximumPodCount) { + LOGGER.info("Removing extra pods"); + + // Determine the pods to remove + Pod[] sortedPodsByRequestAge = null; + sortedPodsByRequestAge = pods.values().toArray(new Pod[pods.size()]); + Arrays.sort( + sortedPodsByRequestAge, + (Pod left, Pod right) -> { + if (left.getLastRequest() > right.getLastRequest()) + return 1; + else if (left.getLastRequest() < right.getLastRequest()) + return -1; + else + return 0; + } + ); + + // Remove the pods + int amountToRemove = sortedPodsByRequestAge.length - maximumPodCount; + for (int i = 0; i < amountToRemove; i++) { + remove(sortedPodsByRequestAge[i].getDockerTag(), sortedPodsByRequestAge[i].getUuid()); + } + } + } finally { + writeLock.unlock(); + } + + refreshPods(); + } else { + readLock.unlock(); + } + } + + /** + * Refreshes the internal map which tracks all running pods + * @throws Exception + */ + private void refreshPods() throws IOException { + + HttpGet stacksGet = new HttpGet(dockercloudConfiguration.getProtocol() + "://" + dockercloudConfiguration.getHostname() + "/api/app/v1/stack/"); + stacksGet.addHeader("accept", "application/json"); + stacksGet.addHeader(BasicScheme.authenticate( + new UsernamePasswordCredentials(dockercloudConfiguration.getUsername(), dockercloudConfiguration.getApikey()), + "UTF-8", false)); + +// try (CloseableHttpResponse response = httpClient.execute(stacksGet, httpContext)) { + try (CloseableHttpResponse response = httpClient.execute(stacksGet)) { + + HttpEntity entity = response.getEntity(); + if (entity != null) { + // Grab the write lock + writeLock.lock(); + + if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { + LOGGER.info("Stack status query suceeded"); + } else { + LOGGER.error("Stack status query failed. (" + response.getStatusLine().toString() + ")"); + } + + try (InputStream responseBody = entity.getContent()) { + + JsonNode rootNode = jsonObjectMapper.readTree(responseBody); + JsonNode objectsNode = rootNode.get("objects"); + + // Update our internal pod hash + Set toDelete = new HashSet<>(pods.size()); + toDelete.addAll(pods.keySet()); + for (int i = 0; i < objectsNode.size(); i++) { + JsonNode serviceNode = objectsNode.get(i); + Pod pod = null; + String name = serviceNode.get("name").asText(); + + LOGGER.info("Refresh: Checking if stack " + name + " is jumping aboard the Grand Central Express!"); + + String dockerTag = null; + String podName = name; + String podUUID = serviceNode.get("uuid").asText(); + String state = serviceNode.get("state").asText(); + + if (name.indexOf("-")> -1){ + String[] namebits = name.split("-"); + if (namebits.length==2){ + dockerTag = namebits[1]; + } + else { + dockerTag = namebits[1] + "." + namebits[2]; + } + } + + List servicesURI2 = new ArrayList(); + for (JsonNode node : serviceNode.get("services")){ + servicesURI2.add(node.asText()); + } + + + + if (dockerTag != null && !servicesURI2.isEmpty() && podName.contains(dockercloudConfiguration.getNamespace())){ + + String publicDNS = getTargetDNSForStack(dockercloudConfiguration.getStackExistsTestImage(), servicesURI2); + + pod = new Pod(dockerTag, publicDNS, state); + pod.setUuid(podUUID); + } + + + + if (pod != null && pod.isRunning()) { + // The pod is valid and should be managed + if (pods.containsKey(pod.getDockerTag())) { + LOGGER.info("Refresh: Updating pod " + pod.getDockerTag() + " in internal hash"); + + // Update the pod's address + pods.get(pod.getDockerTag()).setAddress(pod.getAddress()); + + // Remove the pending delete task for pods that exist + toDelete.remove(pod.getDockerTag()); + } else { + LOGGER.info("Refresh: Adding pod " + pod.getDockerTag() + " to internal hash"); + pods.put(pod.getDockerTag(), pod); + } + } + } + + // Delete pods that have been removed (delete refers to our hash, not k8s). This calls remove on the hash, not the manager. + toDelete.forEach((dockerTag) -> LOGGER.info("Refresh: Removing pod " + dockerTag + " from internal hash")); + toDelete.forEach(pods::remove); + } catch (IOException ioe) { + LOGGER.error("Pod Refresh: Error parsing pods", ioe); + } finally { + writeLock.unlock(); + } + } + } catch (IOException ioe) { + LOGGER.error("Pod Refresh: Error retrieving pods", ioe); + } + + // Cleanup old pods + removeExtraPods(); + + // Update the lastRefresh time + lastRefresh = DateTime.now().getMillis(); + } + + private String getTargetDNSForStack(String targetImage, List servicesURI) throws IOException{ + + for (String serviceURI : servicesURI){ + HttpGet stackServices = new HttpGet(dockercloudConfiguration.getProtocol() + "://" + dockercloudConfiguration.getHostname() + serviceURI); + stackServices.addHeader("accept", "application/json"); + stackServices.addHeader(BasicScheme.authenticate( + new UsernamePasswordCredentials(dockercloudConfiguration.getUsername(), dockercloudConfiguration.getApikey()), + "UTF-8", false)); + try (CloseableHttpResponse response2 = httpClient.execute(stackServices)) { + HttpEntity entity2 = response2.getEntity(); + try (InputStream responseBody2 = entity2.getContent()) { + + + JsonNode rootNode2 = jsonObjectMapper.readTree(responseBody2); + if (rootNode2.get("image_name").asText().contains(targetImage)){ + String publicDNS = rootNode2.get("public_dns").asText(); + return publicDNS; + } + + } catch (IOException ioe) { + LOGGER.error("Pod at " + serviceURI + ": Error starting pod", ioe); + } + + + } catch (IOException ioe) { + LOGGER.error("Pod " + serviceURI + ": Error getting public dns for pod", ioe); + } + } + + throw new IOException("Dude, wheres my DNS"); + + } +} diff --git a/src/main/java/com/o19s/grandcentral/gcloud/GCloudConfiguration.java b/src/main/java/com/o19s/grandcentral/gcloud/GCloudConfiguration.java index f96ccb5..6a718b7 100644 --- a/src/main/java/com/o19s/grandcentral/gcloud/GCloudConfiguration.java +++ b/src/main/java/com/o19s/grandcentral/gcloud/GCloudConfiguration.java @@ -1,8 +1,9 @@ package com.o19s.grandcentral.gcloud; -import com.fasterxml.jackson.annotation.JsonProperty; import io.dropwizard.jackson.JsonSnakeCase; +import com.fasterxml.jackson.annotation.JsonProperty; + @JsonSnakeCase public class GCloudConfiguration { private String registryDomain; diff --git a/src/main/java/com/o19s/grandcentral/gcloud/GCloudRegistry.java b/src/main/java/com/o19s/grandcentral/gcloud/GCloudRegistry.java index 5d0d82b..5a79c0b 100644 --- a/src/main/java/com/o19s/grandcentral/gcloud/GCloudRegistry.java +++ b/src/main/java/com/o19s/grandcentral/gcloud/GCloudRegistry.java @@ -1,5 +1,11 @@ package com.o19s.grandcentral.gcloud; +import java.io.File; +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +import javax.net.ssl.SSLContext; + import org.apache.http.HttpStatus; import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; @@ -20,15 +26,12 @@ import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; import org.apache.http.ssl.SSLContexts; -import javax.net.ssl.SSLContext; -import java.io.File; -import java.nio.charset.StandardCharsets; -import java.util.Base64; +import com.o19s.grandcentral.ImageRegistry; /** * Created by cbradford on 1/18/16. */ -public class GCloudRegistry { +public class GCloudRegistry implements ImageRegistry { private CloseableHttpClient httpClient; private HttpClientContext httpContext; private GCloudConfiguration config; @@ -68,7 +71,11 @@ public GCloudRegistry(GCloudConfiguration config, String keystorePath) throws Ex httpContext.setCredentialsProvider(gcloudCredentialsProvider); } - public boolean imageExistsInRegistry(String dockerTag) throws Exception { + /* (non-Javadoc) + * @see com.o19s.grandcentral.gcloud.ImageRegistry#imageExistsInRegistry(java.lang.String) + */ +@Override +public boolean imageExistsInRegistry(String dockerTag) throws Exception { // Verify the image is available from GCR.io HttpGet verificationGet = new HttpGet("https://" + config.getRegistryDomain() + ":443/v2/" + config.getProject() + "/" + config.getContainerName() + "/manifests/" + dockerTag); diff --git a/src/main/java/com/o19s/grandcentral/healthchecks/ContainerRegistryHealthCheck.java b/src/main/java/com/o19s/grandcentral/healthchecks/ContainerRegistryHealthCheck.java index cc923bf..827bf7c 100644 --- a/src/main/java/com/o19s/grandcentral/healthchecks/ContainerRegistryHealthCheck.java +++ b/src/main/java/com/o19s/grandcentral/healthchecks/ContainerRegistryHealthCheck.java @@ -1,6 +1,12 @@ package com.o19s.grandcentral.healthchecks; -import com.codahale.metrics.health.HealthCheck; +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +import javax.net.ssl.SSLContext; + import org.apache.http.HttpStatus; import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; @@ -23,11 +29,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.net.ssl.SSLContext; -import java.io.File; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Base64; +import com.codahale.metrics.health.HealthCheck; public class ContainerRegistryHealthCheck extends HealthCheck { private static final Logger LOGGER = LoggerFactory.getLogger(ContainerRegistryHealthCheck.class); diff --git a/src/main/java/com/o19s/grandcentral/healthchecks/KubernetesMasterHealthCheck.java b/src/main/java/com/o19s/grandcentral/healthchecks/KubernetesMasterHealthCheck.java index 883700a..752ebed 100644 --- a/src/main/java/com/o19s/grandcentral/healthchecks/KubernetesMasterHealthCheck.java +++ b/src/main/java/com/o19s/grandcentral/healthchecks/KubernetesMasterHealthCheck.java @@ -1,6 +1,10 @@ package com.o19s.grandcentral.healthchecks; -import com.codahale.metrics.health.HealthCheck; +import java.io.File; +import java.io.IOException; + +import javax.net.ssl.SSLContext; + import org.apache.http.HttpStatus; import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; @@ -23,9 +27,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.net.ssl.SSLContext; -import java.io.File; -import java.io.IOException; +import com.codahale.metrics.health.HealthCheck; public class KubernetesMasterHealthCheck extends HealthCheck { private static final Logger LOGGER = LoggerFactory.getLogger(KubernetesMasterHealthCheck.class); diff --git a/src/main/java/com/o19s/grandcentral/http/HttpDelete.java b/src/main/java/com/o19s/grandcentral/http/HttpDelete.java index 9301640..93b8406 100644 --- a/src/main/java/com/o19s/grandcentral/http/HttpDelete.java +++ b/src/main/java/com/o19s/grandcentral/http/HttpDelete.java @@ -1,9 +1,9 @@ package com.o19s.grandcentral.http; -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; - import java.net.URI; +import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; + /** * HTTP Delete request with a request body */ diff --git a/src/main/java/com/o19s/grandcentral/kubernetes/KubernetesConfiguration.java b/src/main/java/com/o19s/grandcentral/kubernetes/KubernetesConfiguration.java index c7a2cfb..afb09df 100644 --- a/src/main/java/com/o19s/grandcentral/kubernetes/KubernetesConfiguration.java +++ b/src/main/java/com/o19s/grandcentral/kubernetes/KubernetesConfiguration.java @@ -1,13 +1,13 @@ package com.o19s.grandcentral.kubernetes; -import com.fasterxml.jackson.annotation.JsonProperty; import io.dropwizard.jackson.JsonSnakeCase; -import io.dropwizard.setup.Environment; -import org.apache.http.client.HttpClient; -import org.hibernate.validator.constraints.NotEmpty; import javax.validation.constraints.NotNull; +import org.hibernate.validator.constraints.NotEmpty; + +import com.fasterxml.jackson.annotation.JsonProperty; + @JsonSnakeCase public class KubernetesConfiguration { @@ -27,6 +27,9 @@ public class KubernetesConfiguration { @NotEmpty private String namespace; + // this can be null + private String protocol; + @JsonProperty public String getMasterIp() { return masterIp; @@ -42,6 +45,16 @@ public String getUsername() { return username; } + @JsonProperty + public String getProtocol() { + return (protocol == null) ? "https" : protocol; + } + + @JsonProperty + public void setProtocol(String protocol) { + this.protocol = protocol; + } + @JsonProperty public void setUsername(String username) { this.username = username; diff --git a/src/main/java/com/o19s/grandcentral/kubernetes/Pod.java b/src/main/java/com/o19s/grandcentral/kubernetes/Pod.java index ce8accf..8f69236 100644 --- a/src/main/java/com/o19s/grandcentral/kubernetes/Pod.java +++ b/src/main/java/com/o19s/grandcentral/kubernetes/Pod.java @@ -1,9 +1,9 @@ package com.o19s.grandcentral.kubernetes; -import org.joda.time.DateTime; - import java.util.concurrent.atomic.AtomicLong; +import org.joda.time.DateTime; + /** * Represents a Kubernetes pod. */ @@ -12,6 +12,7 @@ public class Pod { private String address; private String status; private AtomicLong lastRequest; + private String uuid; // Needed for Dockercloud, we pass UUID around. /** * Creates a new Pod @@ -51,8 +52,10 @@ public void setAddress(String address) { public String getStatus() { return status; } + //FIXME the Partly running is a Dockercloud thing, we have many bits that may or may not ALL be running. + // For example, a init script... public boolean isRunning() { - return status != null && status.equals("Running"); + return status != null && (status.equals("Running") || status.equals("Partly running")); } public long getLastRequest() { @@ -62,4 +65,12 @@ public long getLastRequest() { public void setLastRequest(long requestedAt) { this.lastRequest.set(requestedAt); } + +public String getUuid() { + return uuid; +} + +public void setUuid(String uuid) { + this.uuid = uuid; +} } diff --git a/src/main/java/com/o19s/grandcentral/kubernetes/PodManager.java b/src/main/java/com/o19s/grandcentral/kubernetes/PodManager.java index fa3f567..8239df5 100644 --- a/src/main/java/com/o19s/grandcentral/kubernetes/PodManager.java +++ b/src/main/java/com/o19s/grandcentral/kubernetes/PodManager.java @@ -1,18 +1,25 @@ package com.o19s.grandcentral.kubernetes; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonGenerator; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.ObjectNode; -import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import javax.net.ssl.SSLContext; + import org.apache.http.HttpEntity; import org.apache.http.HttpStatus; import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.client.CredentialsProvider; import org.apache.http.client.methods.CloseableHttpResponse; -import com.o19s.grandcentral.http.HttpDelete; // IMPORTANT, allows DELETE requests with bodies import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.protocol.HttpClientContext; @@ -33,19 +40,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.net.ssl.SSLContext; -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.util.*; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantReadWriteLock; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; +import com.o19s.grandcentral.LinkedContainerManager; +import com.o19s.grandcentral.http.HttpDelete; // IMPORTANT, allows DELETE requests with bodies /** * Manages all pods present within a namespace */ -public class PodManager { +public class PodManager implements LinkedContainerManager { private static final Logger LOGGER = LoggerFactory.getLogger(PodManager.class); private long lastRefresh; @@ -75,6 +82,7 @@ public class PodManager { * @param keystorePath Path to the Java Keystore containing trusted certificates * @param maximumPodCount Maximum number of pods to ever have running at once * @param refreshIntervalInMs Interval with which to refresh the pods + * @param podYamlPath the location of the yaml config for the application pod */ public PodManager(KubernetesConfiguration k8sConfiguration, String keystorePath, @@ -127,12 +135,11 @@ public PodManager(KubernetesConfiguration k8sConfiguration, refreshPods(); } - /** - * Get pod information for the given name - * @param dockerTag Git hash / name of the pod to return - * @return The pod which matches the given key. - */ - public Pod get(String dockerTag) throws IOException { + /* (non-Javadoc) + * @see com.o19s.grandcentral.kubernetes.StuffManagerInterfaceNeedBetterName#get(java.lang.String) + */ + @Override +public Pod get(String dockerTag) throws IOException { Pod pod = null; // Force a refresh of the data from K8S if the interval has passed @@ -153,12 +160,11 @@ public Pod get(String dockerTag) throws IOException { return pod; } - /** - * Does the provided dockerTag currently exist within the cluster - * @param dockerTag Git hash / name of the pod to check - * @return True if the pod exists - */ - public Boolean contains(String dockerTag) { + /* (non-Javadoc) + * @see com.o19s.grandcentral.kubernetes.StuffManagerInterfaceNeedBetterName#contains(java.lang.String) + */ + @Override +public Boolean contains(String dockerTag) { readLock.lock(); boolean contains = false; @@ -171,11 +177,11 @@ public Boolean contains(String dockerTag) { return contains; } - /** - * Adds a pod with the docker tag - * @param dockerTag Git hash / name of the pod to deploy - */ - public Pod add(String dockerTag) throws Exception { + /* (non-Javadoc) + * @see com.o19s.grandcentral.kubernetes.StuffManagerInterfaceNeedBetterName#add(java.lang.String) + */ + @Override +public Pod add(String dockerTag) throws Exception { if (!contains(dockerTag)) { Pod pod = null; @@ -343,7 +349,7 @@ else if (left.getLastRequest() < right.getLastRequest()) * @throws IOException */ private void refreshPods() throws IOException { - HttpGet podsGet = new HttpGet("https://" + k8sConfiguration.getMasterIp() + ":443/api/v1/namespaces/" + k8sConfiguration.getNamespace() + "/pods"); + HttpGet podsGet = new HttpGet(k8sConfiguration.getProtocol() + "://" + k8sConfiguration.getMasterIp() + "/api/v1/namespaces/" + k8sConfiguration.getNamespace() + "/pods"); try (CloseableHttpResponse response = httpClient.execute(podsGet, httpContext)) { HttpEntity entity = response.getEntity(); diff --git a/src/main/java/com/o19s/grandcentral/servlets/PodProxyServlet.java b/src/main/java/com/o19s/grandcentral/servlets/PodProxyServlet.java index f5dba8c..bfac7ed 100644 --- a/src/main/java/com/o19s/grandcentral/servlets/PodProxyServlet.java +++ b/src/main/java/com/o19s/grandcentral/servlets/PodProxyServlet.java @@ -1,13 +1,14 @@ package com.o19s.grandcentral.servlets; -import org.eclipse.jetty.proxy.ProxyServlet; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.net.URI; import javax.servlet.ServletConfig; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; -import java.net.URI; + +import org.eclipse.jetty.proxy.ProxyServlet; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Proxies requests to Pods running within the Kubernetes cluster. diff --git a/src/main/java/com/o19s/grandcentral/servlets/PodServletFilter.java b/src/main/java/com/o19s/grandcentral/servlets/PodServletFilter.java index 0cea849..a347969 100644 --- a/src/main/java/com/o19s/grandcentral/servlets/PodServletFilter.java +++ b/src/main/java/com/o19s/grandcentral/servlets/PodServletFilter.java @@ -1,16 +1,22 @@ package com.o19s.grandcentral.servlets; -import com.o19s.grandcentral.gcloud.GCloudRegistry; -import com.o19s.grandcentral.kubernetes.Pod; -import com.o19s.grandcentral.kubernetes.PodManager; +import java.io.IOException; + +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + import org.eclipse.jetty.http.HttpStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.servlet.*; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import java.io.IOException; +import com.o19s.grandcentral.ImageRegistry; +import com.o19s.grandcentral.LinkedContainerManager; +import com.o19s.grandcentral.kubernetes.Pod; /** * Filter which drops requests that do not match the appropriate host header format. @@ -19,15 +25,15 @@ public class PodServletFilter implements javax.servlet.Filter { private static final Logger LOGGER = LoggerFactory.getLogger(PodServletFilter.class); private String grandCentralDomain; - private PodManager podManager; - private GCloudRegistry gCloudRegistry; + private LinkedContainerManager podManager; + private ImageRegistry gCloudRegistry; /** * * @param grandCentralDomain The domain grand central is running on. This helps determine the portion of the URL representing the Git hash. * @param podManager */ - public PodServletFilter(String grandCentralDomain, PodManager podManager, GCloudRegistry gCloudRegistry) { + public PodServletFilter(String grandCentralDomain, LinkedContainerManager podManager, ImageRegistry gCloudRegistry) { this.grandCentralDomain = grandCentralDomain; this.podManager = podManager; this.gCloudRegistry = gCloudRegistry; @@ -50,7 +56,7 @@ public void doFilter(ServletRequest servletRequest, ServletResponse servletRespo String host = request.getHeader("Host"); String hostWithoutPort, dockerTag; - + // FIXME: if the host is null, should GC blow up? Can you have a null host? if (host != null && host.contains(":")) { hostWithoutPort = host.substring(0, host.indexOf(":")); } else { @@ -86,7 +92,8 @@ public void doFilter(ServletRequest servletRequest, ServletResponse servletRespo } } } else { - return_error(servletResponse, HttpStatus.BAD_REQUEST_400, "Host Header was not specified or is invalid"); + LOGGER.info("Host:" + host + ", dockerTag:" + dockerTag + "," + "grandCentralDomain:" + this.grandCentralDomain); + return_error(servletResponse, HttpStatus.BAD_REQUEST_400, "Host Header was not specified or is invalid:" +"Host:" + host + ", dockerTag:" + dockerTag + "," + "grandCentralDomain:" + this.grandCentralDomain); } } catch (Exception e) { LOGGER.error("Exception filtering request", e); diff --git a/src/test/java/com/o19s/grandcentral/dockercloud/DockercloudRegistryTest.java b/src/test/java/com/o19s/grandcentral/dockercloud/DockercloudRegistryTest.java new file mode 100644 index 0000000..ab90c00 --- /dev/null +++ b/src/test/java/com/o19s/grandcentral/dockercloud/DockercloudRegistryTest.java @@ -0,0 +1,32 @@ +package com.o19s.grandcentral.dockercloud; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.junit.Test; + +public class DockercloudRegistryTest { + + + @Test +// @Ignore + public void testCheck() throws Exception { + + DockercloudConfiguration dockercloudConfig = new DockercloudConfiguration(); + dockercloudConfig.setProtocol("https"); + dockercloudConfig.setHostname("cloud.docker.com"); + dockercloudConfig.setNamespace("datastart"); + dockercloudConfig.setUsername("dep4b"); + dockercloudConfig.setApikey("YOUR_API_KEY"); + dockercloudConfig.setStackExistsTestImage("dep4b/datastart"); + + + + DockercloudRegistry dockercloudRegistry = new DockercloudRegistry(dockercloudConfig); + + assertTrue(dockercloudRegistry.imageExistsInRegistry("v1")); + assertFalse(dockercloudRegistry.imageExistsInRegistry("v2")); + + } + +} diff --git a/src/test/java/com/o19s/grandcentral/dockercloud/StackManagerTest.java b/src/test/java/com/o19s/grandcentral/dockercloud/StackManagerTest.java new file mode 100644 index 0000000..3e01e5e --- /dev/null +++ b/src/test/java/com/o19s/grandcentral/dockercloud/StackManagerTest.java @@ -0,0 +1,46 @@ +package com.o19s.grandcentral.dockercloud; + +import static org.junit.Assert.*; + +import org.junit.Before; +import org.junit.Test; + +import com.o19s.grandcentral.kubernetes.Pod; + +public class StackManagerTest { + + private DockercloudConfiguration dockercloudConfig; + @Before + public void setUp() throws Exception { + dockercloudConfig = new DockercloudConfiguration(); + dockercloudConfig.setProtocol("https"); + dockercloudConfig.setHostname("cloud.docker.com"); + dockercloudConfig.setNamespace("gctest"); + dockercloudConfig.setUsername("dep4b"); + dockercloudConfig.setApikey("YOUR_API_KEY"); + dockercloudConfig.setStackJsonPath("./src/test/resources/docker-cloud.json"); + dockercloudConfig.setStackExistsTestImage("mysql"); + + } + + + @Test + public void testAddRemoveLifycycle() throws Exception{ + StackManager stackManager = new StackManager(dockercloudConfig,10000,2); + + Pod pod = stackManager.add("bogus_tag"); + assertNull(pod); + + pod = stackManager.add("latest"); + assertEquals("latest", pod.getDockerTag()); + assertTrue(pod.isRunning()); + + stackManager.remove(pod.getDockerTag(), pod.getUuid()); + + pod = stackManager.add("5.5"); + assertEquals("5.5", pod.getDockerTag()); + assertTrue(pod.isRunning()); + stackManager.remove(pod.getDockerTag(), pod.getUuid()); + } + +} diff --git a/src/test/java/com/o19s/grandcentral/kubernetes/PodManagerTest.java b/src/test/java/com/o19s/grandcentral/kubernetes/PodManagerTest.java new file mode 100644 index 0000000..6914f39 --- /dev/null +++ b/src/test/java/com/o19s/grandcentral/kubernetes/PodManagerTest.java @@ -0,0 +1,82 @@ +package com.o19s.grandcentral.kubernetes; +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; + +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +import com.github.tomakehurst.wiremock.junit.WireMockRule; +import com.o19s.grandcentral.LinkedContainerManager; + +; +/** + * Created by Omnifroodle on 5/17/16. + */ +public class PodManagerTest { + private KubernetesConfiguration kubecfg; + @Rule + public WireMockRule wireMockRule = new WireMockRule(8888); + + @Before + public void setUp() throws Exception { + this.kubecfg = new KubernetesConfiguration(); + kubecfg.setMasterIp("127.0.0.1:8888"); + kubecfg.setProtocol("http"); + kubecfg.setNamespace("test"); + kubecfg.setUsername("fred"); + kubecfg.setPassword("flintstone"); + } + + @After + public void tearDown() throws Exception { + } + + @Test + public void testGet() throws Exception { + + } + + @Test + public void testContains() throws Exception { + + } + + @Test + public void testAdd() throws Exception { + + } + + @Test + public void testRefreshPods() throws Exception { + stubFor(get(urlEqualTo("/api/v1/namespaces/test/pods")) + .willReturn(aResponse() + .withStatus(200) + .withHeader("Content-Type", "text/json") + .withBody("{\"items\":[" + + " {" + + " \"metadata\":{" + + " \"name\": \"abc\"" + + " }," + + " \"status\": {" + + " \"phase\": \"Running\"," + + " \"podIP\": \"1.1.1.1\"" + + " }" + + " }," + + " {" + + " \"metadata\":{" + + " \"name\": \"abcd\"" + + " }," + + " \"status\": {" + + " \"phase\": \"Running\"," + + " \"podIP\": \"1.1.1.2\"" + + " }" + + " }" + + " ]}"))); + + LinkedContainerManager manager = new PodManager(this.kubecfg, "config/grandcentral.jks", 100, 1, "./config/configuration.yml"); + } +} \ No newline at end of file diff --git a/src/test/resources/docker-cloud.json b/src/test/resources/docker-cloud.json new file mode 100644 index 0000000..276eeab --- /dev/null +++ b/src/test/resources/docker-cloud.json @@ -0,0 +1,10 @@ +{ + "name": "apache", + "services": [ + { + "name":"apache", + "image": "eboraas/apache:__DOCKER_TAG__", + "ports": ["80:80"] + } + ] +} diff --git a/src/test/resources/local-dockercloud.yml b/src/test/resources/local-dockercloud.yml new file mode 100644 index 0000000..02ea25a --- /dev/null +++ b/src/test/resources/local-dockercloud.yml @@ -0,0 +1,23 @@ +janitor_cleanup_threshold: 3600 +maximum_stack_count: 1 +grandcentral_domain: apache.grandcentral.com +refresh_interval_in_ms: 300000 + +pod_port: 80 + +dockercloud: + protocol: https + hostname: cloud.docker.com + namespace: apache + stack_json_path: src/test/resources/docker-cloud.json + stack_exists_test_image: eboraas/apache + username: ${DOCKERCLOUD_USERNAME} + apikey: ${DOCKERCLOUD_APIKEY} + +server: + type: simple + applicationContextPath: / + adminContextPath: /admin + connector: + type: http + port: 8080