diff --git a/database/influx/influx.js b/database/influx/influx.js
new file mode 100644
index 0000000..3833c67
--- /dev/null
+++ b/database/influx/influx.js
@@ -0,0 +1,60 @@
+const Influx = require("influx");
+const logger = require("./../../lib/log")(__filename);
+require("dotenv").config();
+
+const influxModule = {};
+let client;
+const HOST = process.env.INF_HOST||"localhost";
+const PORT = process.env.INF_PORT||8086;
+influxModule.startInflux = () => {
+ client = new Influx.InfluxDB({
+ host: HOST,
+ port: PORT,
+ });
+ logger.info("Sucessfully started InfluxDB");
+};
+
+influxModule.createAccount = async (account) => {
+ const { username, dbPassword } = account;
+ if (!username || !dbPassword) return;
+ try {
+ await client.createUser(username, dbPassword);
+ await client.createDatabase(username);
+ await client.grantPrivilege(username, "WRITE", username);
+ logger.info(
+ `Sucessfully created new user and database with ${username} name`
+ );
+ } catch (err) {
+ logger.error(err);
+ throw new Error(`failed to create new influx user with ${username} name`);
+ }
+};
+influxModule.deleteAccount = async (username) => {
+ if (!username) return;
+ try {
+ await client.dropUser(username);
+ await client.dropDatabase(username);
+ logger.info(`Sucessfully deleted account with ${username} name`);
+ } catch (err) {
+ logger.error(err);
+ throw new Error(`failed to delete influx ${username} account`);
+ }
+};
+influxModule.checkAccount = async (username) => {
+ if (!username) return;
+ try {
+ const users = await client.getUsers();
+ const user = users.filter((u) => u === username)[0];
+ logger.info(
+ user
+ ? `Found ${user} account with ${username}`
+ : `No account with ${username} were found`
+ );
+ return Boolean(user);
+ } catch (err) {
+ logger.error(err);
+ throw new Error(`failed to check for influx ${username} account`);
+ }
+};
+
+module.exports = influxModule;
diff --git a/database/influx/influx.test.js b/database/influx/influx.test.js
new file mode 100644
index 0000000..a3ef479
--- /dev/null
+++ b/database/influx/influx.test.js
@@ -0,0 +1,107 @@
+jest.mock("../../lib/log");
+const logGen = require("../../lib/log");
+const logger = { error: jest.fn(), info: jest.fn() };
+logGen.mockReturnValue(logger);
+jest.mock("dotenv");
+require("dotenv").config();
+jest.mock("influx");
+const { InfluxDB } = require("influx");
+const influx = require("influx");
+const {
+ startInflux,
+ createAccount,
+ deleteAccount,
+ checkAccount,
+} = require("./influx");
+describe("test InfluxDB", () => {
+ beforeEach(() => {
+ jest.clearAllMocks();
+ });
+ const mockClient = {
+ InfluxDB: jest.fn(),
+ createUser: jest.fn(),
+ createDatabase: jest.fn(),
+ grantPrivilege: jest.fn(),
+ dropUser: jest.fn(),
+ dropDatabase: jest.fn(),
+ getUsers: jest.fn(),
+ };
+ const user = { username: "username", dbPassword: "password" };
+ InfluxDB.mockImplementation(function () {
+ return mockClient;
+ });
+ it("should start influx client", () => {
+ startInflux();
+ expect(influx.InfluxDB).toHaveBeenCalledTimes(1);
+ });
+ it("should use global variables", () => {
+ startInflux();
+ expect(influx.InfluxDB.mock.calls[0][0].host).toEqual("localhost");
+ expect(influx.InfluxDB.mock.calls[0][0].port).toEqual(8086);
+ });
+ describe("createAccount", () => {
+ it("should sucessfully create new account", async () => {
+ await createAccount(user);
+ expect(mockClient.createUser).toHaveBeenCalledTimes(1);
+ expect(mockClient.createDatabase).toHaveBeenCalledTimes(1);
+ expect(mockClient.grantPrivilege).toHaveBeenCalledTimes(1);
+ });
+ it("should call logger in case of an error", async () => {
+ try {
+ mockClient.createUser.mockReturnValue(Promise.reject());
+ expect(await createAccount(user)).rejects.toThrow();
+ } catch (err) {
+ expect(logger.error).toHaveBeenCalledTimes(1);
+ }
+ });
+ it("should return if no account argument was provided", async () => {
+ const res = await createAccount({});
+ expect(res).toEqual(undefined);
+ });
+ });
+ describe("deleteAccount", () => {
+ it("should sucessfully delete account", async () => {
+ await deleteAccount(user.username);
+ expect(mockClient.dropUser).toHaveBeenCalledTimes(1);
+ expect(mockClient.dropDatabase).toHaveBeenCalledTimes(1);
+ });
+ it("should throw error to invalid credentials", async () => {
+ try {
+ mockClient.dropUser.mockReturnValue(Promise.reject());
+ expect(await deleteAccount(user.username)).rejects.toThrow();
+ } catch (err) {
+ expect(logger.error).toHaveBeenCalledTimes(1);
+ }
+ });
+ it("should return if no username was provided", async () => {
+ const res = await deleteAccount("");
+ expect(res).toEqual(undefined);
+ });
+ });
+ describe("checkAccount", () => {
+ it("should return true if account exists", async () => {
+ mockClient.getUsers.mockReturnValue(["name", "username"]);
+ const res = await checkAccount("username");
+ expect(mockClient.getUsers).toHaveBeenCalledTimes(1);
+ expect(res).toEqual(true);
+ });
+ it("should return false if account doesnt exitsts", async () => {
+ mockClient.getUsers.mockReturnValue([]);
+ const res = await checkAccount(user);
+ expect(mockClient.getUsers).toHaveBeenCalledTimes(1);
+ expect(res).toEqual(false);
+ });
+ it("should throw an error", async () => {
+ try {
+ mockClient.getUsers.mockReturnValue(Promise.reject());
+ expect(await checkAccount(user)).rejects.toThrow();
+ } catch (err) {
+ expect(logger.error).toHaveBeenCalledTimes(1);
+ }
+ });
+ it("should return if no username was provided", async () => {
+ const res = await checkAccount();
+ expect(res).toEqual(undefined);
+ });
+ });
+});
diff --git a/env.example b/env.example
index 83bca08..8f24475 100644
--- a/env.example
+++ b/env.example
@@ -16,12 +16,6 @@ PASSWORD=***
ALTER_DB=true
#password for session-express
SECRET=***
-#Neo4j graph database url
-NEO4J_URL=neo4j://104.168.169.204
-#Neo4j username
-NEO4J_USER=***
-#Neo4j password
-NEO4J_PASSWORD=***
#production/development flag, in jest tests this varible is "test" by default
NODE_ENV = development
#local port, default value 3052
@@ -29,3 +23,13 @@ PORT = 3052
#hostname adress, default is https://learndatabases.dev,
#if you want to use localhost you need to specify port, for example http://localhost:4000
HOSTNAME = https://learndatabases.dev
+#ArangoDB hostname
+ARANGO_URL = http://localhost:8529
+#ArangoDB username
+ARANGO_USER = root
+#ArangoDB password
+ARANGO_PW = ***
+#InfluxDB hostname
+INF_HOST = localhost
+#InfluxDB port
+INF_PORT = 8086
diff --git a/lib/util.js b/lib/util.js
index 42f678f..b706c61 100644
--- a/lib/util.js
+++ b/lib/util.js
@@ -3,6 +3,7 @@ const db = require("../sequelize/db");
const pg = require("../database/postgres/pg");
const arango = require("../database/arango/arango");
const es = require("../database/elasticsearch/elastic");
+const influx = require("../database/influx/influx");
const logger = require("./log")(__filename);
const util = {};
@@ -37,6 +38,9 @@ util.cleanAnonymous = async () => {
const arangoDbExists = await arango.checkIfDatabaseExists(username);
if (arangoDbExists) await arango.deleteAccount(username);
+ const influxDbExists = await influx.checkAccount(username);
+ if (influxDbExists) await influx.deleteAccount(username);
+
return await user.destroy();
})
).then(() => {
diff --git a/lib/util.test.js b/lib/util.test.js
index 1246099..52851af 100644
--- a/lib/util.test.js
+++ b/lib/util.test.js
@@ -3,6 +3,7 @@ jest.mock("../sequelize/db");
jest.mock("../database/postgres/pg");
jest.mock("../database/arango/arango");
jest.mock("../database/elasticsearch/elastic");
+jest.mock("../database/influx/influx");
jest.mock("./log");
const sequelize = require("sequelize");
@@ -10,6 +11,7 @@ const db = require("../sequelize/db");
const pg = require("../database/postgres/pg");
const es = require("../database/elasticsearch/elastic");
const arango = require("../database/arango/arango");
+const influx = require("../database/influx/influx");
sequelize.Op = { and: "and", lt: "lt" };
const Accounts = {
@@ -21,6 +23,7 @@ db.getModels = () => {
pg.deletePgAccount = jest.fn();
es.deleteAccount = jest.fn();
arango.deleteAccount = jest.fn();
+influx.deleteAccount = jest.fn();
const logGen = require("./log");
const logger = {
info: jest.fn(),
@@ -54,20 +57,24 @@ describe("Testing cleanAnonymous function", () => {
pg.userHasPgAccount = () => false;
es.checkAccount = () => false;
arango.checkIfDatabaseExists = () => false;
+ influx.checkAccount = () => false;
await util.cleanAnonymous();
expect(pg.deletePgAccount).not.toHaveBeenCalled();
expect(es.deleteAccount).not.toHaveBeenCalled();
expect(arango.deleteAccount).not.toHaveBeenCalled();
+ expect(influx.deleteAccount).not.toHaveBeenCalled();
});
it("should call database functions if expired accounts are found", async () => {
Accounts.findAll.mockReturnValue([{ destroy: () => {} }]);
pg.userHasPgAccount = () => true;
es.checkAccount = () => true;
arango.checkIfDatabaseExists = () => true;
+ influx.checkAccount = () => true;
await util.cleanAnonymous();
expect(pg.deletePgAccount).toHaveBeenCalled();
expect(es.deleteAccount).toHaveBeenCalled();
expect(arango.deleteAccount).toHaveBeenCalled();
+ expect(influx.deleteAccount).toHaveBeenCalled();
});
it("should call logger.error if cleaning fails", async () => {
Accounts.findAll.mockImplementation(() => {
diff --git a/package-lock.json b/package-lock.json
index de6ce7c..0c79941 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -4507,6 +4507,11 @@
"wrappy": "1"
}
},
+ "influx": {
+ "version": "5.6.3",
+ "resolved": "https://registry.npmjs.org/influx/-/influx-5.6.3.tgz",
+ "integrity": "sha512-j2biV776HXb2IbIcp2G24w50IqIWENDnKitm0Vj54vlpw9EfGzY7x7ndCRZSGzzm4fyDLSDQ+/cypZQpuDQxyA=="
+ },
"inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
diff --git a/package.json b/package.json
index 7ecf1be..dcd2edf 100644
--- a/package.json
+++ b/package.json
@@ -33,6 +33,7 @@
"ejs": "^3.1.3",
"express": "^4.17.1",
"express-session": "^1.17.1",
+ "influx": "^5.6.3",
"mailgun-js": "^0.22.0",
"mongodb": "^3.6.1",
"neo4j-driver": "^4.1.1",
diff --git a/public/TICK.png b/public/TICK.png
new file mode 100644
index 0000000..5b83b8d
Binary files /dev/null and b/public/TICK.png differ
diff --git a/public/lessons/Influx.md b/public/lessons/Influx.md
new file mode 100644
index 0000000..b9d70d6
--- /dev/null
+++ b/public/lessons/Influx.md
@@ -0,0 +1,121 @@
+# Timeseries overview
+
+Timeseries databases are optimized for storing and serving pairs of times and values. This could be various metrics, events, sensor data, stock currency exchange price data and other types of analytics.
+
+**Key-concepts**
+
+* More writes than reads: 95%-99% of the operations on time series data are writes.
+
+* Smooth, continuous, highly concurrent, and high throughput data writing. Time series data is usually generated at a fixed time frequency.
+
+* Data is written and is not updated. In the time dimension, as time passes by, all data written is new. No existing data is updated, except for manual revision.
+
+# InfluxDB
+
+InfluxDB is an open source time series platform. It uses following format `measurement-name tag-set field-set timestamp`. For example:
+```
+cpu, host=serverA, region=uswest idle=23, user=42, system=12 1464623548s
+```
+Measurement-name `cpu` is a string, the tag set `host=serverA, region=uswest` is a collection of key/value pairs where all values are strings and the field-set `idle=23, user=42, system=12` is a key/values pairs where values can be int64, float64, bool or string. Timestamps `1464623548s` in InfluxDB can be second, millisecond, microsecond, or nanosecond precision.
+
+Conceptually you can think of a measurement as an SQL table, where the primary index is always time. Tags and fields are effectively columns in the table. tags are indexed, and fields are not. The difference is that, with InfluxDB, you can have millions of measurements, you don’t have to define schemas up-front, and null values aren’t stored.
+
+Note that if you attempt to write data with a different type than previously used, InfluxDB will reject those data.
+So trying to add something like this (idle field was changed to string) to cpu measurement will result in error.
+```
+cpu, host=serverA, region=uswest idle=someString, user=42, system=12 1464623210s
+```
+InfluxDB stores data on the disk in columnar format which make calculating aggregates on a single field a very fast operation. On the other hand, map-reduce queries are slower compared to relational databases but such queries are rare in timeseries databases, so it's a worthwile tradeoff. The measurement name and tag sets are kept in an inverted index for fast lookups. You can read more about design decisions and tradeoffs [here](https://docs.influxdata.com/influxdb/v1.8/concepts/insights_tradeoffs/).
+
+# Node-influx
+
+InfluxDB has several open endpoints like `/query`, `/write`, by sending HTTP requests to these points you can read/write data. While working with node server it's more convinient to use [influx](https://www.npmjs.com/package/influx) npm package. First you need to configure influx client:
+```
+const client = new Influx.InfluxDB({
+ host: 'hostname',
+ database: 'database_name',
+ username:'your_username'
+})
+```
+You can ping your server to check if everything is OK:
+```
+client.ping(5000).then((hosts) => {
+ hosts.forEach((host) => {
+ console.log(
+ `${host.url.host} responded in ${host.rtt}ms running ${host.version})`
+ );
+ });
+});
+```
+It should return something like `hostname:port responded in 27ms running 1.8.3`.
+
+Now you're ready to write data to your database.
+
+```
+client.writePoints([
+ {
+ measurement: 'cpu',
+ tags: { host: 'serverA',region:'USWest' },
+ fields: { idle:23, user:12, server:11 },
+ }
+],{
+ database:"database_name"
+})
+```
+`writePoints` takes array of points as first argument and optional config object and returns a promise [(reference)](https://node-influx.github.io/class/src/index.js~InfluxDB.html#instance-method-writePoints).
+
+And read data:
+```
+client.query("select * from cpu", { database: "username" }).then((results) => {
+ console.log(results);
+});
+```
+`query` takes query string and optional config object and returns a promise [(reference)](https://node-influx.github.io/class/src/index.js~InfluxDB.html#instance-method-query). InfluxDB uses its own SQL-like language, you can learn more about writing queries with InfluxQL [here](https://docs.influxdata.com/influxdb/v1.8/query_language/explore-data/).
+
+
+# Continious queries and retention policies
+
+InfluxDB can handle millions of data points per second. Working with that much data over a long period can lead to storage concerns. InfluxDB automatically compacts data to minimize your storage space. In addition, you can easily downsample the data; keeping high-precision raw data for a limited time and storing the lower-precision, summarized data for much longer or until the end of time. InfluxDB has two features that help to automate the downsampling and data expiration processes — Continuous Queries and Retention Policies.
+
+**Retention policy** (RP) is the part of InfluxDB data structure that describes for how long InfluxDB keeps data. InfluxDB compares your local server’s timestamp to the timestamps on your data and deletes data older than the RP’s DURATION. A single database can have several RPs and RPs are unique per database. For example, the following command will create new retention policy `two_hours` for your database, which keeps data for duration of 2h and is default for this database. The replication factor is a required parameter but must always be set to 1 for single node instances.
+```
+client.createRetentionPolicy("two_hours",{
+ duration:"2h",
+ replication:1,
+ database:"database_name",
+ isDefault:true
+})
+```
+Next we create another retention policy which keeps the data for 52 weeks.
+```
+client.createRetentionPolicy("a_year",{
+ duration:"52w",
+ replication:1,
+ database:"database_name",
+ isDefault:false
+})
+```
+Note that any operations that don't explicitly specify RP will still go to the default `two_hours` retention policy.
+
+**Continuous query** (CQ) is an query that runs automatically and periodically within a database. CQs require a function in the SELECT clause and must include a GROUP BY time() clause.
+```
+client.createContinuousQuery("cpu_30",`SELECT mean("cpu") AS "mean_cpu"
+INTO "a_year"."downsampled_measurements"
+FROM "cpu"
+GROUP BY time(30m)`,"database_name")
+```
+This query creates a CQ called `cpu_30`. It tells InfluxDB to calculate the 30-minute average in the DEFAULT RP `two_hours`. It also tells InfluxDB to write it to the measurement `downsampled_measurements` in the retention policy `a_year` with the field keys `mean_cpu`. InfluxDB will run this query every 30 minutes for the previous 30 minutes.
+
+With such setup we can have detailed data of the last two hours while keeping 30m averages for a year.
+
+# The TICK stack
+
+InfluxDB is a part of so called TICK stack, which constists of Telegraf, InfluxDB, Chronograf and Kapacitor. The TICK Stack is a set of open source projects designed to handle massive amounts of time-stamped information to support analysis data.
+
+
+
+[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/): A metrics collection agent. Use it to collect and send metrics to InfluxDB. Telegraf’s plugin architecture supports collection of metrics from 100+ popular services right out of the box.
+
+[Chronograf](https://www.influxdata.com/time-series-platform/chronograf/): A UI layer for the whole TICK stack. Use it to set up graphs and dashboards of data in InfluxDB and hook up Kapacitor alerts.
+
+[Kapacitor](https://www.influxdata.com/time-series-platform/kapacitor/): A metrics and events processing and alerting engine. Use it to crunch time series data into actionable alerts and easily send those alerts to many popular products like PagerDuty and Slack.
diff --git a/src/routes/renderRoutes.js b/src/routes/renderRoutes.js
index 56c72d2..2cb5446 100644
--- a/src/routes/renderRoutes.js
+++ b/src/routes/renderRoutes.js
@@ -2,6 +2,7 @@ const db = require("../../sequelize/db");
const es = require("../../database/elasticsearch/elastic");
const pg = require("../../database/postgres/pg");
const arangoModule = require("../../database/arango/arango");
+const influxModule = require("../../database/influx/influx");
require("dotenv").config();
const routes = {};
@@ -10,6 +11,7 @@ const dev_dbHost = {
Postgres: process.env.HOST,
Elasticsearch: process.env.ES_HOST,
Arango: process.env.ARANGO_URL,
+ Influx: process.env.INF_HOST,
};
// This is the 'host' url for a person's database credentials in prod.
@@ -17,12 +19,14 @@ const dbHost = {
Postgres: "learndatabases.dev",
Elasticsearch: "elastic.learndatabases.dev",
Arango: "arangodb.learndatabases.dev",
+ Influx: "PRODUCTION INFLUXDB HOST",
};
const checkAccount = {
Postgres: pg.userHasPgAccount,
Elasticsearch: es.checkAccount,
Arango: arangoModule.checkIfDatabaseExists,
+ Influx: influxModule.checkAccount,
};
const prod = () => {
diff --git a/src/routes/userRoutes.js b/src/routes/userRoutes.js
index a00191e..3faaf39 100644
--- a/src/routes/userRoutes.js
+++ b/src/routes/userRoutes.js
@@ -10,6 +10,7 @@ const db = require("../../sequelize/db");
const pgModule = require("../../database/postgres/pg");
const es = require("../../database/elasticsearch/elastic");
const arangoModule = require("../../database/arango/arango");
+const influx = require("../../database/influx/influx.js");
const routes = {};
routes.resetPasswordEmail = async (req, res) => {
@@ -143,6 +144,7 @@ const createDatabaseAccount = {
Postgres: pgModule.userHasPgAccount,
Elasticsearch: es.createAccount,
Arango: arangoModule.createAccount,
+ Influx: influx.createAccount,
};
routes.createDatabase = async (req, res) => {
diff --git a/src/server.js b/src/server.js
index d53d710..3e3c1df 100644
--- a/src/server.js
+++ b/src/server.js
@@ -19,6 +19,7 @@ let server = null;
let app = null;
const arangoModule = require("../database/arango/arango");
+const influxModule = require("../database/influx/influx");
let cleaner = null;
@@ -30,6 +31,7 @@ const startServer = async (portNumber) => {
await dbModule.start();
await pgModule.startPGDB();
await arangoModule.startArangoDB();
+ influxModule.startInflux();
cleaner = await util.cleanAnonymous();
diff --git a/tests/integration/__snapshots__/welcome.test.js.snap b/tests/integration/__snapshots__/welcome.test.js.snap
index a260427..dac62e2 100644
--- a/tests/integration/__snapshots__/welcome.test.js.snap
+++ b/tests/integration/__snapshots__/welcome.test.js.snap
@@ -91,6 +91,7 @@ exports[`test welcome page should render arango page correctly 1`] = `
Postgres: \`database: \${username}\`,
Elasticsearch: \`index: \${username}-*\`,
Arango: \`databaseName: \${username}\`,
+ Influx:\`database: \${username}\`
};
credentials.innerHTML = \`\${creds}\${endingChoice[database]}
\`;
introduction.append(credentials);
@@ -281,6 +282,7 @@ exports[`test welcome page should render elasticsearch page correctly 1`] = `
Postgres: \`database: \${username}\`,
Elasticsearch: \`index: \${username}-*\`,
Arango: \`databaseName: \${username}\`,
+ Influx:\`database: \${username}\`
};
credentials.innerHTML = \`\${creds}\${endingChoice[database]}
\`;
introduction.append(credentials);
@@ -471,6 +473,7 @@ exports[`test welcome page should render postgres page correctly 1`] = `
Postgres: \`database: \${username}\`,
Elasticsearch: \`index: \${username}-*\`,
Arango: \`databaseName: \${username}\`,
+ Influx:\`database: \${username}\`
};
credentials.innerHTML = \`\${creds}\${endingChoice[database]}
\`;
introduction.append(credentials);
@@ -1059,7 +1062,11 @@ exports[`test welcome page should render welcome page correctly 1`] = `
+ +MongoDB (coming next sprint!)
An open source database management system using a document-oriented database model that supports various forms of data.
-
+Everything we do here is open source. Checkout our github Here @@ -1115,6 +1122,9 @@ exports[`test welcome page should render welcome page correctly 1`] = ` // }) document.getElementById('arango').addEventListener('click', () => { location.href = '/tutorial/Arango' + }) + document.getElementById('influx').addEventListener('click', () => { + location.href = '/tutorial/Influx' }) diff --git a/views/tutorial.ejs b/views/tutorial.ejs index 8fba84d..c8d7491 100644 --- a/views/tutorial.ejs +++ b/views/tutorial.ejs @@ -33,6 +33,7 @@ Postgres: `database: ${username}`, Elasticsearch: `index: ${username}-*`, Arango: `databaseName: ${username}`, + Influx:`database: ${username}` }; credentials.innerHTML = `InfluxDB
+Timeseries database.
+
${creds}${endingChoice[database]}
`;
introduction.append(credentials);
diff --git a/views/welcome.ejs b/views/welcome.ejs
index 9095bbc..920f367 100644
--- a/views/welcome.ejs
+++ b/views/welcome.ejs
@@ -21,7 +21,11 @@
+ +MongoDB (coming next sprint!)
An open source database management system using a document-oriented database model that supports various forms of data.
-
+Everything we do here is open source. Checkout our github Here @@ -77,6 +81,9 @@ // }) document.getElementById('arango').addEventListener('click', () => { location.href = '/tutorial/Arango' + }) + document.getElementById('influx').addEventListener('click', () => { + location.href = '/tutorial/Influx' })InfluxDB
+Timeseries database.
+