diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000..d9dbf33c --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,2 @@ +# Initial format commit +245bc11c93fcff5cbaceddb799de5e1fad132d3e \ No newline at end of file diff --git a/.gitignore b/.gitignore index 0d6993ae..995b9d4e 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,5 @@ bin-release/ .idea/documentation.iml .idea/modules.xml .idea/vcs.xml + +node_modules \ No newline at end of file diff --git a/docs/administration/administration.md b/docs/administration/administration.md index fee28102..b009ddba 100644 --- a/docs/administration/administration.md +++ b/docs/administration/administration.md @@ -6,9 +6,9 @@ Harper is designed for minimal administrative effort, and with managed services As a distributed database, data protection and recovery can benefit from different data protection strategies than a traditional single-server database. But multiple aspects of data protection and recovery should be considered: -* Availability: As a distributed database Harper is intrinsically built for high-availability and a cluster will continue to run even with complete server(s) failure. This is the first and primary defense for protecting against any downtime or data loss. Harper provides fast horizontal scaling functionality with node cloning, which facilitates ease of establishing high availability clusters. -* [Audit log](logging/audit-logging.md): Harper defaults to tracking data changes so malicious data changes can be found, attributed, and reverted. This provides security-level defense against data loss, allowing for fine-grained isolation and reversion of individual data without the large-scale reversion/loss of data associated with point-in-time recovery approaches. -* Snapshots: When used as a source-of-truth database for crucial data, we recommend using snapshot tools to regularly snapshot databases as a final backup/defense against data loss (this should only be used as a last resort in recovery). Harper has a [`get_backup`](../developers/operations-api/databases-and-tables.md#get-backup) operation, which provides direct support for making and retrieving database snapshots. An HTTP request can be used to get a snapshot. Alternatively, volume snapshot tools can be used to snapshot data at the OS/VM level. Harper can also provide scripts for replaying transaction logs from snapshots to facilitate point-in-time recovery when necessary (often customization may be preferred in certain recovery situations to minimize data loss). +- Availability: As a distributed database Harper is intrinsically built for high-availability and a cluster will continue to run even with complete server(s) failure. This is the first and primary defense for protecting against any downtime or data loss. Harper provides fast horizontal scaling functionality with node cloning, which facilitates ease of establishing high availability clusters. +- [Audit log](logging/audit-logging.md): Harper defaults to tracking data changes so malicious data changes can be found, attributed, and reverted. This provides security-level defense against data loss, allowing for fine-grained isolation and reversion of individual data without the large-scale reversion/loss of data associated with point-in-time recovery approaches. +- Snapshots: When used as a source-of-truth database for crucial data, we recommend using snapshot tools to regularly snapshot databases as a final backup/defense against data loss (this should only be used as a last resort in recovery). Harper has a [`get_backup`](../developers/operations-api/databases-and-tables.md#get-backup) operation, which provides direct support for making and retrieving database snapshots. An HTTP request can be used to get a snapshot. Alternatively, volume snapshot tools can be used to snapshot data at the OS/VM level. Harper can also provide scripts for replaying transaction logs from snapshots to facilitate point-in-time recovery when necessary (often customization may be preferred in certain recovery situations to minimize data loss). ### Horizontal Scaling with Node Cloning @@ -17,10 +17,11 @@ Harper provides rapid horizontal scaling capabilities through [node cloning func ### Monitoring Harper provides robust capabilities for analytics and observability to facilitate effective and informative monitoring: -* Analytics provides statistics on usage, request counts, load, memory usage with historical tracking. The analytics data can be [accessed through querying](../technical-details/reference/analytics.md). -* A large variety of real-time statistics about load, system information, database metrics, thread usage can be retrieved through the [`system_information` API](../developers/operations-api/utilities.md). -* Information about the current cluster configuration and status can be found in the [cluster APIs](../developers/operations-api/clustering.md). -* Analytics and system information can easily be exported to Prometheus with our [Prometheus exporter component](https://github.com/HarperDB-Add-Ons/prometheus_exporter), making it easy visualize and monitor Harper with Graphana. + +- Analytics provides statistics on usage, request counts, load, memory usage with historical tracking. The analytics data can be [accessed through querying](../technical-details/reference/analytics.md). +- A large variety of real-time statistics about load, system information, database metrics, thread usage can be retrieved through the [`system_information` API](../developers/operations-api/utilities.md). +- Information about the current cluster configuration and status can be found in the [cluster APIs](../developers/operations-api/clustering.md). +- Analytics and system information can easily be exported to Prometheus with our [Prometheus exporter component](https://github.com/HarperDB-Add-Ons/prometheus_exporter), making it easy visualize and monitor Harper with Graphana. ### Replication Transaction Logging diff --git a/docs/administration/cloning.md b/docs/administration/cloning.md index 040824c6..789f1eea 100644 --- a/docs/administration/cloning.md +++ b/docs/administration/cloning.md @@ -1,9 +1,9 @@ # Clone Node -Clone node is a configurable node script that when pointed to another instance of Harper will create a clone of that -instance's config, databases and setup full replication. If it is run in a location where there is no existing Harper install, +Clone node is a configurable node script that when pointed to another instance of Harper will create a clone of that +instance's config, databases and setup full replication. If it is run in a location where there is no existing Harper install, it will, along with cloning, install Harper. If it is run in a location where there is another Harper instance, it will -only clone config, databases and replication that do not already exist. +only clone config, databases and replication that do not already exist. Clone node is triggered when Harper is installed or started with certain environment or command line (CLI) variables set (see below). @@ -14,34 +14,36 @@ To start clone run `harperdb` in the CLI with either of the following variables #### Environment variables -* `HDB_LEADER_URL` - The URL of the leader node's operation API (usually port 9925). -* `HDB_LEADER_USERNAME` - The leader node admin username. -* `HDB_LEADER_PASSWORD` - The leader node admin password. -* `REPLICATION_HOSTNAME` - _(optional)_ The clones replication hostname. This value will be added to `replication.hostname` on the clone node. If this value is not set, replication will not be set up between the leader and clone. +- `HDB_LEADER_URL` - The URL of the leader node's operation API (usually port 9925). +- `HDB_LEADER_USERNAME` - The leader node admin username. +- `HDB_LEADER_PASSWORD` - The leader node admin password. +- `REPLICATION_HOSTNAME` - _(optional)_ The clones replication hostname. This value will be added to `replication.hostname` on the clone node. If this value is not set, replication will not be set up between the leader and clone. For example: + ``` HDB_LEADER_URL=https://node-1.my-domain.com:9925 REPLICATION_HOSTNAME=node-1.my-domain.com HDB_LEADER_USERNAME=... HDB_LEADER_PASSWORD=... harperdb ``` #### Command line variables -* `--HDB_LEADER_URL` - The URL of the leader node's operation API (usually port 9925). -* `--HDB_LEADER_USERNAME` - The leader node admin username. -* `--HDB_LEADER_PASSWORD` - The leader node admin password. -* `--REPLICATION_HOSTNAME` - _(optional)_ The clones clustering host. This value will be added to `replication.hostname` on the clone node. If this value is not set, replication will not be set up between the leader and clone. +- `--HDB_LEADER_URL` - The URL of the leader node's operation API (usually port 9925). +- `--HDB_LEADER_USERNAME` - The leader node admin username. +- `--HDB_LEADER_PASSWORD` - The leader node admin password. +- `--REPLICATION_HOSTNAME` - _(optional)_ The clones clustering host. This value will be added to `replication.hostname` on the clone node. If this value is not set, replication will not be set up between the leader and clone. For example: + ``` harperdb --HDB_LEADER_URL https://node-1.my-domain.com:9925 --REPLICATION_HOSTNAME node-1.my-domain.com --HDB_LEADER_USERNAME ... --HDB_LEADER_PASSWORD ... ``` -Each time clone is run it will set a value `cloned: true` in `harperdb-config.yaml`. This value will prevent clone from -running again. If you want to run clone again set this value to `false`. If Harper is started with the clone variables +Each time clone is run it will set a value `cloned: true` in `harperdb-config.yaml`. This value will prevent clone from +running again. If you want to run clone again set this value to `false`. If Harper is started with the clone variables still present and `cloned` is true, Harper will just start as normal. -Clone node does not require any additional configuration apart from the variables referenced above. -However, if you wish to set any configuration during clone this can be done by passing the config as environment/CLI +Clone node does not require any additional configuration apart from the variables referenced above. +However, if you wish to set any configuration during clone this can be done by passing the config as environment/CLI variables or cloning overtop of an existing `harperdb-config.yaml` file. More can be found in the Harper config documentation [here](../deployments/configuration.md). @@ -51,10 +53,10 @@ More can be found in the Harper config documentation [here](../deployments/confi To set any specific (optional) clone config, including the exclusion of any database and/or replication, there is a file called `clone-node-config.yaml` that can be used. -The file must be located in the `ROOTPATH` directory of your clone (the `hdb` directory where you clone will be installed. +The file must be located in the `ROOTPATH` directory of your clone (the `hdb` directory where you clone will be installed. If the directory does not exist, create one and add the file to it). -The config available in `clone-node-config.yaml` is: +The config available in `clone-node-config.yaml` is: ```yaml databaseConfig: @@ -68,27 +70,27 @@ componentConfig: - name: null ``` -_Note: only include the configuration that you are using. If no clone config file is provided nothing will be excluded, +_Note: only include the configuration that you are using. If no clone config file is provided nothing will be excluded, unless it already exists on the clone._ `databaseConfig` - Set any databases or tables that you wish to exclude from cloning. -`componentConfig` - Set any components that you do not want cloned. Clone node will not clone the component code, +`componentConfig` - Set any components that you do not want cloned. Clone node will not clone the component code, it will only clone the component reference that exists in the leader harperdb-config file. ### Cloning configuration -Clone node will not clone any configuration that is classed as unique to the leader node. This includes `replication.hostname`, `replication.url`,`clustering.nodeName`, -`rootPath` and any other path related values, for example `storage.path`, `logging.root`, `componentsRoot`, +Clone node will not clone any configuration that is classed as unique to the leader node. This includes `replication.hostname`, `replication.url`,`clustering.nodeName`, +`rootPath` and any other path related values, for example `storage.path`, `logging.root`, `componentsRoot`, any authentication certificate/key paths. ### Cloning system database -Harper uses a database called `system` to store operational information. Clone node will only clone the user and role +Harper uses a database called `system` to store operational information. Clone node will only clone the user and role tables from this database. It will also set up replication on this table, which means that any existing and future user and roles that are added will be replicated throughout the cluster. -Cloning the user and role tables means that once clone node is complete, the clone will share the same login credentials with +Cloning the user and role tables means that once clone node is complete, the clone will share the same login credentials with the leader. ### Replication @@ -103,20 +105,21 @@ If cloning with replication, the leader's JWT private and public keys will be cl ### Cloning overtop of an existing Harper instance -Clone node will not overwrite any existing config, database or replication. It will write/clone any config database or replication -that does not exist on the node it is running on. +Clone node will not overwrite any existing config, database or replication. It will write/clone any config database or replication +that does not exist on the node it is running on. -An example of how this can be useful is if you want to set Harper config before the clone is created. To do this you -would create a harperdb-config.yaml file in your local `hdb` root directory with the config you wish to set. Then +An example of how this can be useful is if you want to set Harper config before the clone is created. To do this you +would create a harperdb-config.yaml file in your local `hdb` root directory with the config you wish to set. Then when clone is run it will append the missing config to the file and install Harper with the desired config. -Another useful example could be retroactively adding another database to an existing instance. Running clone on -an existing instance could create a full clone of another database and set up replication between the database on the +Another useful example could be retroactively adding another database to an existing instance. Running clone on +an existing instance could create a full clone of another database and set up replication between the database on the leader and the clone. ### Cloning steps Clone node will execute the following steps when ran: + 1. Look for an existing Harper install. It does this by using the default (or user provided) `ROOTPATH`. 2. If an existing instance is found it will check for a `harperdb-config.yaml` file and search for the `cloned` value. If the value exists and is `true` clone will skip the clone logic and start Harper. 3. Clone harperdb-config.yaml values that don't already exist (excluding values unique to the leader node). diff --git a/docs/administration/harper-studio/README.md b/docs/administration/harper-studio/README.md index 46d5323c..de82e7e3 100644 --- a/docs/administration/harper-studio/README.md +++ b/docs/administration/harper-studio/README.md @@ -1,4 +1,5 @@ # Harper Studio + Harper Studio is the web-based GUI for Harper. Studio enables you to administer, navigate, and monitor all of your Harper instances in a simple, user-friendly interface without any knowledge of the underlying Harper API. It’s free to sign up, get started today! [Sign up for free!](https://studio.harperdb.io/sign-up) @@ -6,8 +7,11 @@ Harper Studio is the web-based GUI for Harper. Studio enables you to administer, Harper now includes a simplified local Studio that is packaged with all Harper installations and served directly from the instance. It can be enabled in the [configuration file](../../deployments/configuration.md#localstudio). This section is dedicated to the hosted Studio accessed at [studio.harperdb.io](https://studio.harperdb.io). --- + ## How does Studio Work? + While Harper Studio is web based and hosted by us, all database interactions are performed on the Harper instance the studio is connected to. The Harper Studio loads in your browser, at which point you login to your Harper instances. Credentials are stored in your browser cache and are not transmitted back to Harper. All database interactions are made via the Harper Operations API directly from your browser to your instance. ## What type of instances can I manage? -Harper Studio enables users to manage both Harper Cloud instances and privately hosted instances all from a single UI. All Harper instances feature identical behavior whether they are hosted by us or by you. \ No newline at end of file + +Harper Studio enables users to manage both Harper Cloud instances and privately hosted instances all from a single UI. All Harper instances feature identical behavior whether they are hosted by us or by you. diff --git a/docs/administration/harper-studio/create-account.md b/docs/administration/harper-studio/create-account.md index d9093cf0..8230b777 100644 --- a/docs/administration/harper-studio/create-account.md +++ b/docs/administration/harper-studio/create-account.md @@ -1,22 +1,23 @@ # Create a Studio Account + Start at the [Harper Studio sign up page](https://studio.harperdb.io/sign-up). -1) Provide the following information: - * First Name - * Last Name - * Email Address - * Subdomain - - *Part of the URL that will be used to identify your Harper Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https://c1-demo.harperdbcloud.com.* - * Coupon Code (optional) -2) Review the Privacy Policy and Terms of Service. -3) Click the sign up for free button. -4) You will be taken to a new screen to add an account password. Enter your password. - *Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character.* -5) Click the add account password button. +1. Provide the following information: + - First Name + - Last Name + - Email Address + - Subdomain -You will receive a Studio welcome email confirming your registration. + _Part of the URL that will be used to identify your Harper Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https://c1-demo.harperdbcloud.com._ + - Coupon Code (optional) +2. Review the Privacy Policy and Terms of Service. +3. Click the sign up for free button. +4. You will be taken to a new screen to add an account password. Enter your password. + _Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character._ +5. Click the add account password button. + +You will receive a Studio welcome email confirming your registration. -Note: Your email address will be used as your username and cannot be changed. \ No newline at end of file +Note: Your email address will be used as your username and cannot be changed. diff --git a/docs/administration/harper-studio/enable-mixed-content.md b/docs/administration/harper-studio/enable-mixed-content.md index 8e74b84c..855df002 100644 --- a/docs/administration/harper-studio/enable-mixed-content.md +++ b/docs/administration/harper-studio/enable-mixed-content.md @@ -2,6 +2,4 @@ Enabling mixed content is required in cases where you would like to connect the Harper Studio to Harper Instances via HTTP. This should not be used for production systems, but may be convenient for development and testing purposes. Doing so will allow your browser to reach HTTP traffic, which is considered insecure, through an HTTPS site like the Studio. - - -A comprehensive guide is provided by Adobe [here](https://experienceleague.adobe.com/docs/target/using/experiences/vec/troubleshoot-composer/mixed-content.html). \ No newline at end of file +A comprehensive guide is provided by Adobe [here](https://experienceleague.adobe.com/docs/target/using/experiences/vec/troubleshoot-composer/mixed-content.html). diff --git a/docs/administration/harper-studio/instance-configuration.md b/docs/administration/harper-studio/instance-configuration.md index 19cb2a03..84963e35 100644 --- a/docs/administration/harper-studio/instance-configuration.md +++ b/docs/administration/harper-studio/instance-configuration.md @@ -2,71 +2,63 @@ Harper instance configuration can be viewed and managed directly through the Harper Studio. Harper Cloud instances can be resized in two different ways via this page, either by modifying machine RAM or by increasing drive storage. Enterprise instances can have their licenses modified by modifying licensed RAM. - - All instance configuration is handled through the **config** page of the Harper Studio, accessed with the following instructions: -1) Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. +1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. -2) Click the appropriate organization that the instance belongs to. +2. Click the appropriate organization that the instance belongs to. -3) Select your desired instance. +3. Select your desired instance. -4) Click config in the instance control bar. +4. Click config in the instance control bar. -*Note, the **config** page will only be available to super users and certain items are restricted to Studio organization owners.* +_Note, the **config** page will only be available to super users and certain items are restricted to Studio organization owners._ ## Instance Overview The **instance overview** panel displays the following instance specifications: -* Instance URL +- Instance URL -* Applications URL +- Applications URL -* Instance Node Name (for clustering) +- Instance Node Name (for clustering) -* Instance API Auth Header (this user) - - *The Basic authentication header used for the logged in Harper database user* +- Instance API Auth Header (this user) -* Created Date (Harper Cloud only) + _The Basic authentication header used for the logged in Harper database user_ -* Region (Harper Cloud only) - - *The geographic region where the instance is hosted.* +- Created Date (Harper Cloud only) -* Total Price +- Region (Harper Cloud only) -* RAM + _The geographic region where the instance is hosted._ -* Storage (Harper Cloud only) +- Total Price -* Disk IOPS (Harper Cloud only) +- RAM -## Update Instance RAM +- Storage (Harper Cloud only) -Harper Cloud instance size and Enterprise instance licenses can be modified with the following instructions. This option is only available to Studio organization owners. +- Disk IOPS (Harper Cloud only) +## Update Instance RAM +Harper Cloud instance size and Enterprise instance licenses can be modified with the following instructions. This option is only available to Studio organization owners. Note: For Harper Cloud instances, upgrading RAM may add additional CPUs to your instance as well. Click here to see how many CPUs are provisioned for each instance size. -1) In the **update ram** panel at the bottom left: +1. In the **update ram** panel at the bottom left: + - Select the new instance size. + - If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. + - If you do have a credit card associated, you will be presented with the updated billing information. + - Click **Upgrade**. - * Select the new instance size. - - * If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. - - * If you do have a credit card associated, you will be presented with the updated billing information. - - * Click **Upgrade**. - -2) The instance will shut down and begin reprovisioning/relicensing itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. +2. The instance will shut down and begin reprovisioning/relicensing itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. -3) Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. +3. Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. -*Note, if Harper Cloud instance reprovisioning takes longer than 20 minutes, please submit a support ticket here: https://harperdbhelp.zendesk.com/hc/en-us/requests/new.* +_Note, if Harper Cloud instance reprovisioning takes longer than 20 minutes, please submit a support ticket here: https://harperdbhelp.zendesk.com/hc/en-us/requests/new._ ## Update Instance Storage @@ -74,48 +66,39 @@ The Harper Cloud instance storage size can be increased with the following instr Note: Instance storage can only be upgraded once every 6 hours. -1) In the **update storage** panel at the bottom left: - - * Select the new instance storage size. - - * If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. - - * If you do have a credit card associated, you will be presented with the updated billing information. - - * Click **Upgrade**. - -2) The instance will shut down and begin reprovisioning itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. - -3) Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. - -*Note, if this process takes longer than 20 minutes, please submit a support ticket here: https://harperdbhelp.zendesk.com/hc/en-us/requests/new.* +1. In the **update storage** panel at the bottom left: + - Select the new instance storage size. + - If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. + - If you do have a credit card associated, you will be presented with the updated billing information. + - Click **Upgrade**. + +2. The instance will shut down and begin reprovisioning itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. +3. Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. + +_Note, if this process takes longer than 20 minutes, please submit a support ticket here: https://harperdbhelp.zendesk.com/hc/en-us/requests/new._ ## Remove Instance The Harper instance can be deleted/removed from the Studio with the following instructions. Once this operation is started it cannot be undone. This option is only available to Studio organization owners. -1) In the **remove instance** panel at the bottom left: - * Enter the instance name in the text box. - - * The Studio will present you with a warning. - - * Click **Remove**. - -2) The instance will begin deleting immediately. - +1. In the **remove instance** panel at the bottom left: + - Enter the instance name in the text box. + - The Studio will present you with a warning. + - Click **Remove**. + +2. The instance will begin deleting immediately. + ## Restart Instance The Harper Cloud instance can be restarted with the following instructions. -1) In the **restart instance** panel at the bottom right: - * Enter the instance name in the text box. - - * The Studio will present you with a warning. - - * Click **Restart**. - -2) The instance will begin restarting immediately. +1. In the **restart instance** panel at the bottom right: + - Enter the instance name in the text box. + - The Studio will present you with a warning. + - Click **Restart**. + +2. The instance will begin restarting immediately. ## Instance Config (Read Only) -A JSON preview of the instance config is available for reference at the bottom of the page. This is a read only visual and is not editable via the Studio. To make changes to the instance config, review the [configuration file documentation](../../deployments/configuration.md#using-the-configuration-file-and-naming-conventions). \ No newline at end of file +A JSON preview of the instance config is available for reference at the bottom of the page. This is a read only visual and is not editable via the Studio. To make changes to the instance config, review the [configuration file documentation](../../deployments/configuration.md#using-the-configuration-file-and-naming-conventions). diff --git a/docs/administration/harper-studio/instances.md b/docs/administration/harper-studio/instances.md index 8237f600..6550e718 100644 --- a/docs/administration/harper-studio/instances.md +++ b/docs/administration/harper-studio/instances.md @@ -2,9 +2,9 @@ The Harper Studio allows you to administer all of your HarperDinstances in one place. Harper currently offers the following instance types: -* **Harper Cloud Instance** Managed installations of Harper, what we call [Harper Cloud](../../deployments/harper-cloud/). -* **5G Wavelength Instance** Managed installations of Harper running on the Verizon network through AWS Wavelength, what we call [5G Wavelength Instances](../../deployments/harper-cloud/verizon-5g-wavelength-instances.md). _Note, these instances are only accessible via the Verizon network._ -* **Enterprise Instance** Any Harper installation that is managed by you. These include instances hosted within your cloud provider accounts (for example, from the AWS or Digital Ocean Marketplaces), privately hosted instances, or instances installed locally. +- **Harper Cloud Instance** Managed installations of Harper, what we call [Harper Cloud](../../deployments/harper-cloud/). +- **5G Wavelength Instance** Managed installations of Harper running on the Verizon network through AWS Wavelength, what we call [5G Wavelength Instances](../../deployments/harper-cloud/verizon-5g-wavelength-instances.md). _Note, these instances are only accessible via the Verizon network._ +- **Enterprise Instance** Any Harper installation that is managed by you. These include instances hosted within your cloud provider accounts (for example, from the AWS or Digital Ocean Marketplaces), privately hosted instances, or instances installed locally. All interactions between the Studio and your instances take place directly from your browser. Harper stores metadata about your instances, which enables the Studio to display these instances when you log in. Beyond that, all traffic is routed from your browser to the Harper instances using the standard [Harper API](../../developers/operations-api/). @@ -20,26 +20,32 @@ A summary view of all instances within an organization can be viewed by clicking 4. Select your desired Instance Type. 5. For a Harper Cloud Instance or a Harper 5G Wavelength Instance, click **Create Harper Cloud Instance**. 1. Fill out Instance Info. - 1. Enter Instance Name + 1. Enter Instance Name - _This will be used to build your instance URL. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https://c1-demo.harperdbcloud.com. The Instance URL will be previewed below._ - 2. Enter Instance Username + _This will be used to build your instance URL. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https://c1-demo.harperdbcloud.com. The Instance URL will be previewed below._ - _This is the username of the initial Harper instance super user._ - 3. Enter Instance Password + 2. Enter Instance Username + + _This is the username of the initial Harper instance super user._ + + 3. Enter Instance Password + + _This is the password of the initial Harper instance super user._ - _This is the password of the initial Harper instance super user._ 2. Click **Instance Details** to move to the next page. 3. Select Instance Specs - 1. Select Instance RAM + 1. Select Instance RAM + + _Harper Cloud Instances are billed based on Instance RAM, this will select the size of your provisioned instance._ [_More on instance specs_](../../deployments/harper-cloud/instance-size-hardware-specs.md)_._ - _Harper Cloud Instances are billed based on Instance RAM, this will select the size of your provisioned instance._ [_More on instance specs_](../../deployments/harper-cloud/instance-size-hardware-specs.md)_._ - 2. Select Storage Size + 2. Select Storage Size - _Each instance has a mounted storage volume where your Harper data will reside. Storage is provisioned based on space and IOPS._ [_More on IOPS Impact on Performance_](../../deployments/harper-cloud/iops-impact.md)_._ - 3. Select Instance Region + _Each instance has a mounted storage volume where your Harper data will reside. Storage is provisioned based on space and IOPS._ [_More on IOPS Impact on Performance_](../../deployments/harper-cloud/iops-impact.md)_._ + + 3. Select Instance Region + + _The geographic area where your instance will be provisioned._ - _The geographic area where your instance will be provisioned._ 4. Click **Confirm Instance Details** to move to the next page. 5. Review your Instance Details, if there is an error, use the back button to correct it. 6. Review the [Privacy Policy](https://harperdb.io/legal/privacy-policy/) and [Terms of Service](https://harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. @@ -53,29 +59,36 @@ A summary view of all instances within an organization can be viewed by clicking 3. Click the **Create New Harper Cloud Instance + Register Enterprise Instance** card. 4. Select **Register Enterprise Instance**. 1. Fill out Instance Info. - 1. Enter Instance Name + 1. Enter Instance Name + + _This is used for descriptive purposes only._ + + 2. Enter Instance Username - _This is used for descriptive purposes only._ - 2. Enter Instance Username + _The username of a Harper super user that is already configured in your Harper installation._ - _The username of a Harper super user that is already configured in your Harper installation._ - 3. Enter Instance Password + 3. Enter Instance Password - _The password of a Harper super user that is already configured in your Harper installation._ - 4. Enter Host + _The password of a Harper super user that is already configured in your Harper installation._ - _The host to access the Harper instance. For example, `harperdb.myhost.com` or `localhost`._ - 5. Enter Port + 4. Enter Host - _The port to access the Harper instance. Harper defaults `9925` for HTTP and `31283` for HTTPS._ - 6. Select SSL + _The host to access the Harper instance. For example, `harperdb.myhost.com` or `localhost`._ + + 5. Enter Port + + _The port to access the Harper instance. Harper defaults `9925` for HTTP and `31283` for HTTPS._ + + 6. Select SSL + + _If your instance is running over SSL, select the SSL checkbox. If not, you will need to enable mixed content in your browser to allow the HTTPS Studio to access the HTTP instance. If there are issues connecting to the instance, the Studio will display a red error message._ - _If your instance is running over SSL, select the SSL checkbox. If not, you will need to enable mixed content in your browser to allow the HTTPS Studio to access the HTTP instance. If there are issues connecting to the instance, the Studio will display a red error message._ 2. Click **Instance Details** to move to the next page. 3. Select Instance Specs - 1. Select Instance RAM + 1. Select Instance RAM + + _Harper instances are billed based on Instance RAM. Selecting additional RAM will enable the ability for faster and more complex queries._ - _Harper instances are billed based on Instance RAM. Selecting additional RAM will enable the ability for faster and more complex queries._ 4. Click **Confirm Instance Details** to move to the next page. 5. Review your Instance Details, if there is an error, use the back button to correct it. 6. Review the [Privacy Policy](https://harperdb.io/legal/privacy-policy/) and [Terms of Service](https://harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. @@ -86,17 +99,18 @@ A summary view of all instances within an organization can be viewed by clicking Instance deletion has two different behaviors depending on the instance type. -* **Harper Cloud Instance** This instance will be permanently deleted, including all data. This process is irreversible and cannot be undone. -* **Enterprise Instance** The instance will be removed from the Harper Studio only. This does not uninstall Harper from your system and your data will remain intact. +- **Harper Cloud Instance** This instance will be permanently deleted, including all data. This process is irreversible and cannot be undone. +- **Enterprise Instance** The instance will be removed from the Harper Studio only. This does not uninstall Harper from your system and your data will remain intact. An instance can be deleted as follows: 1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. 2. Click the appropriate organization that the instance belongs to. 3. Identify the proper instance card and click the trash can icon. -4. Enter the instance name into the text box. +4. Enter the instance name into the text box. + + _This is done for confirmation purposes to ensure you do not accidentally delete an instance._ - _This is done for confirmation purposes to ensure you do not accidentally delete an instance._ 5. Click the **Do It** button. ## Upgrade an Instance @@ -117,10 +131,12 @@ To log in to an instance: 1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. 2. Click the appropriate organization that the instance belongs to. 3. Identify the proper instance card, it will have an unlocked icon and a status reading PLEASE LOG IN, and click the center of the card. -4. Enter the database username. +4. Enter the database username. + + _The username of a Harper user that is already configured in your Harper instance._ + +5. Enter the database password. - _The username of a Harper user that is already configured in your Harper instance._ -5. Enter the database password. + _The password of a Harper user that is already configured in your Harper instance._ - _The password of a Harper user that is already configured in your Harper instance._ 6. Click **Log In**. diff --git a/docs/administration/harper-studio/login-password-reset.md b/docs/administration/harper-studio/login-password-reset.md index 07e82327..01dfa8fa 100644 --- a/docs/administration/harper-studio/login-password-reset.md +++ b/docs/administration/harper-studio/login-password-reset.md @@ -4,35 +4,35 @@ To log into your existing Harper Studio account: -1) Navigate to the [Harper Studio](https://studio.harperdb.io/). -2) Enter your email address. -3) Enter your password. -4) Click **sign in**. +1. Navigate to the [Harper Studio](https://studio.harperdb.io/). +2. Enter your email address. +3. Enter your password. +4. Click **sign in**. ## Reset a Forgotten Password To reset a forgotten password: -1) Navigate to the Harper Studio password reset page. -2) Enter your email address. -3) Click **send password reset email**. -4) If the account exists, you will receive an email with a temporary password. -5) Navigate back to the Harper Studio login page. -6) Enter your email address. -7) Enter your temporary password. -8) Click **sign in**. -9) You will be taken to a new screen to reset your account password. Enter your new password. -*Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character.* -10) Click the **add account password** button. +1. Navigate to the Harper Studio password reset page. +2. Enter your email address. +3. Click **send password reset email**. +4. If the account exists, you will receive an email with a temporary password. +5. Navigate back to the Harper Studio login page. +6. Enter your email address. +7. Enter your temporary password. +8. Click **sign in**. +9. You will be taken to a new screen to reset your account password. Enter your new password. + _Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character._ +10. Click the **add account password** button. ## Change Your Password If you are already logged into the Studio, you can change your password though the user interface. -1) Navigate to the Harper Studio profile page. -2) In the **password** section, enter: +1. Navigate to the Harper Studio profile page. +2. In the **password** section, enter: + - Current password. + - New password. + - New password again _(for verification)_. - * Current password. - * New password. - * New password again *(for verification)*. -4) Click the **Update Password** button. \ No newline at end of file +3. Click the **Update Password** button. diff --git a/docs/administration/harper-studio/manage-applications.md b/docs/administration/harper-studio/manage-applications.md index f192fd1b..a732aa88 100644 --- a/docs/administration/harper-studio/manage-applications.md +++ b/docs/administration/harper-studio/manage-applications.md @@ -33,11 +33,11 @@ Accessing your application endpoints varies with which type of endpoint you're c Below is a breakdown of how to access each type of endpoint. In these examples, we will use a locally hosted instance with `securePort` set to `9926`: `https://localhost:9926`. -* **Standard REST Endpoints**\ +- **Standard REST Endpoints**\ Standard REST endpoints are defined via the `@export` directive to tables in your schema definition. You can read more about these in the [Adding an Endpoint section of the Applications documentation](../../developers/applications/#adding-an-endpoint). Here, if we are looking to access a record with ID `1` from table `Dog` on our instance, [per the REST documentation](../../developers/rest.md), we could send a `GET` (or since this is a GET, we could post the URL in our browser) to `https://localhost:9926/Dog/1`. -* **Augmented REST Endpoints**\ +- **Augmented REST Endpoints**\ Harper Applications enable you to write [Custom Functionality with JavaScript](../../developers/applications/#custom-functionality-with-javascript) for your resources. Accessing these endpoints is identical to accessing the standard REST endpoints above, though you may have defined custom behavior in each function. Taking the example from the [Applications documentation](../../developers/applications/#custom-functionality-with-javascript), if we are looking to access the `DogWithHumanAge` example, we could send the GET to `https://localhost:9926/DogWithHumanAge/1`. -* **Fastify Routes**\ +- **Fastify Routes**\ If you need more functionality than the REST applications can provide, you can define your own custom endpoints using [Fastify Routes](../../developers/applications/#define-fastify-routes). The paths to these routes are defined via the application `config.yaml` file. You can read more about how you can customize the configuration options in the [Define Fastify Routes documentation](../../developers/applications/define-routes.md). By default, routes are accessed via the following pattern: `[Instance URL]:[HTTP Port]/[Project Name]/[Route URL]`. Using the example from the [Harper Application Template](https://github.com/HarperDB/application-template/), where we've named our project `application-template`, we would access the `getAll` route at `https://localhost/application-template/getAll`. ## Creating a New Application diff --git a/docs/administration/harper-studio/manage-databases-browse-data.md b/docs/administration/harper-studio/manage-databases-browse-data.md index 6d749fbf..08641097 100644 --- a/docs/administration/harper-studio/manage-databases-browse-data.md +++ b/docs/administration/harper-studio/manage-databases-browse-data.md @@ -2,10 +2,10 @@ Manage instance databases/tables and browse data in tabular format with the following instructions: -1) Navigate to the Harper Studio Organizations page. -2) Click the appropriate organization that the instance belongs to. -3) Select your desired instance. -4) Click **browse** in the instance control bar. +1. Navigate to the Harper Studio Organizations page. +2. Click the appropriate organization that the instance belongs to. +3. Select your desired instance. +4. Click **browse** in the instance control bar. Once on the instance browse page you can view data, manage databases and tables, add new data, and more. @@ -13,95 +13,90 @@ Once on the instance browse page you can view data, manage databases and tables, #### Create a Database -1) Click the plus icon at the top right of the databases section. -2) Enter the database name. -3) Click the green check mark. - +1. Click the plus icon at the top right of the databases section. +2. Enter the database name. +3. Click the green check mark. #### Delete a Database Deleting a database is permanent and irreversible. Deleting a database removes all tables and data within it. -1) Click the minus icon at the top right of the databases section. -2) Identify the appropriate database to delete and click the red minus sign in the same row. -3) Click the red check mark to confirm deletion. - +1. Click the minus icon at the top right of the databases section. +2. Identify the appropriate database to delete and click the red minus sign in the same row. +3. Click the red check mark to confirm deletion. #### Create a Table -1) Select the desired database from the databases section. -2) Click the plus icon at the top right of the tables section. -3) Enter the table name. -4) Enter the primary key. - - *The primary key is also often referred to as the hash attribute in the studio, and it defines the unique identifier for each row in your table.* -5) Click the green check mark. +1. Select the desired database from the databases section. +2. Click the plus icon at the top right of the tables section. +3. Enter the table name. +4. Enter the primary key. + _The primary key is also often referred to as the hash attribute in the studio, and it defines the unique identifier for each row in your table._ + +5. Click the green check mark. #### Delete a Table + Deleting a table is permanent and irreversible. Deleting a table removes all data within it. -1) Select the desired database from the databases section. -2) Click the minus icon at the top right of the tables section. -3) Identify the appropriate table to delete and click the red minus sign in the same row. -4) Click the red check mark to confirm deletion. +1. Select the desired database from the databases section. +2. Click the minus icon at the top right of the tables section. +3. Identify the appropriate table to delete and click the red minus sign in the same row. +4. Click the red check mark to confirm deletion. ## Manage Table Data The following section assumes you have selected the appropriate table from the database/table browser. - - #### Filter Table Data -1) Click the magnifying glass icon at the top right of the table browser. -2) This expands the search filters. -3) The results will be filtered appropriately. - +1. Click the magnifying glass icon at the top right of the table browser. +2. This expands the search filters. +3. The results will be filtered appropriately. #### Load CSV Data -1) Click the data icon at the top right of the table browser. You will be directed to the CSV upload page where you can choose to import a CSV by URL or upload a CSV file. -2) To import a CSV by URL: - 1) Enter the URL in the **CSV file URL** textbox. - 2) Click **Import From URL**. - 3) The CSV will load, and you will be redirected back to browse table data. -3) To upload a CSV file: - 1) Click **Click or Drag to select a .csv file** (or drag your CSV file from your file browser). - 2) Navigate to your desired CSV file and select it. - 3) Click **Insert X Records**, where X is the number of records in your CSV. - 4) The CSV will load, and you will be redirected back to browse table data. - +1. Click the data icon at the top right of the table browser. You will be directed to the CSV upload page where you can choose to import a CSV by URL or upload a CSV file. +2. To import a CSV by URL: + 1. Enter the URL in the **CSV file URL** textbox. + 2. Click **Import From URL**. + 3. The CSV will load, and you will be redirected back to browse table data. +3. To upload a CSV file: + 1. Click **Click or Drag to select a .csv file** (or drag your CSV file from your file browser). + 2. Navigate to your desired CSV file and select it. + 3. Click **Insert X Records**, where X is the number of records in your CSV. + 4. The CSV will load, and you will be redirected back to browse table data. #### Add a Record -1) Click the plus icon at the top right of the table browser. -2) The Studio will pre-populate existing table attributes in JSON format. +1. Click the plus icon at the top right of the table browser. +2. The Studio will pre-populate existing table attributes in JSON format. - *The primary key is not included, but you can add it in and set it to your desired value. Auto-maintained fields are not included and cannot be manually set. You may enter a JSON array to insert multiple records in a single transaction.* -3) Enter values to be added to the record. + _The primary key is not included, but you can add it in and set it to your desired value. Auto-maintained fields are not included and cannot be manually set. You may enter a JSON array to insert multiple records in a single transaction._ - *You may add new attributes to the JSON; they will be reflexively added to the table.* -4) Click the **Add New** button. +3. Enter values to be added to the record. + _You may add new attributes to the JSON; they will be reflexively added to the table._ -#### Edit a Record +4. Click the **Add New** button. -1) Click the record/row you would like to edit. -2) Modify the desired values. +#### Edit a Record - *You may add new attributes to the JSON; they will be reflexively added to the table.* +1. Click the record/row you would like to edit. +2. Modify the desired values. -3) Click the **save icon**. + _You may add new attributes to the JSON; they will be reflexively added to the table._ +3. Click the **save icon**. #### Delete a Record Deleting a record is permanent and irreversible. If transaction logging is turned on, the delete transaction will be recorded as well as the data that was deleted. -1) Click the record/row you would like to delete. -2) Click the **delete icon**. -3) Confirm deletion by clicking the **check icon**. +1. Click the record/row you would like to delete. +2. Click the **delete icon**. +3. Confirm deletion by clicking the **check icon**. ## Browse Table Data @@ -111,18 +106,14 @@ The following section assumes you have selected the appropriate table from the d The first page of table data is automatically loaded on table selection. Paging controls are at the bottom of the table. Here you can: -* Page left and right using the arrows. -* Type in the desired page. -* Change the page size (the amount of records displayed in the table). - +- Page left and right using the arrows. +- Type in the desired page. +- Change the page size (the amount of records displayed in the table). #### Refresh Table Data Click the refresh icon at the top right of the table browser. - - #### Automatically Refresh Table Data Toggle the auto switch at the top right of the table browser. The table data will now automatically refresh every 15 seconds. Filters and pages will remain set for refreshed data. - diff --git a/docs/administration/harper-studio/manage-instance-roles.md b/docs/administration/harper-studio/manage-instance-roles.md index f44fec2c..552d5eb5 100644 --- a/docs/administration/harper-studio/manage-instance-roles.md +++ b/docs/administration/harper-studio/manage-instance-roles.md @@ -4,69 +4,70 @@ Harper users and roles can be managed directly through the Harper Studio. It is Instance role configuration is handled through the **roles** page of the Harper Studio, accessed with the following instructions: -1) Navigate to the Harper Studio Organizations page. +1. Navigate to the Harper Studio Organizations page. -2) Click the appropriate organization that the instance belongs to. +2. Click the appropriate organization that the instance belongs to. -3) Select your desired instance. +3. Select your desired instance. -4) Click **roles** in the instance control bar. +4. Click **roles** in the instance control bar. -*Note, the **roles** page will only be available to super users.* +_Note, the **roles** page will only be available to super users._ +The _roles management_ screen consists of the following panels: +- **super users** -The *roles management* screen consists of the following panels: + Displays all super user roles for this instance. -* **super users** +- **cluster users** - Displays all super user roles for this instance. -* **cluster users** + Displays all cluster user roles for this instance. - Displays all cluster user roles for this instance. -* **standard roles** +- **standard roles** - Displays all standard roles for this instance. -* **role permission editing** + Displays all standard roles for this instance. - Once a role is selected for editing, permissions will be displayed here in JSON format. +- **role permission editing** -*Note, when new tables are added that are not configured, the Studio will generate configuration values with permissions defaulting to `false`.* + Once a role is selected for editing, permissions will be displayed here in JSON format. + +_Note, when new tables are added that are not configured, the Studio will generate configuration values with permissions defaulting to `false`._ ## Role Management #### Create a Role -1) Click the plus icon at the top right of the appropriate role section. +1. Click the plus icon at the top right of the appropriate role section. -2) Enter the role name. +2. Enter the role name. -3) Click the green check mark. +3. Click the green check mark. -4) Optionally toggle the **manage databases/tables** switch to specify the `structure_user` config. +4. Optionally toggle the **manage databases/tables** switch to specify the `structure_user` config. -5) Configure the role permissions in the role permission editing panel. +5. Configure the role permissions in the role permission editing panel. - *Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel.* + _Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel._ -6) Click **Update Role Permissions**. +6. Click **Update Role Permissions**. #### Modify a Role -1) Click the appropriate role from the appropriate role section. +1. Click the appropriate role from the appropriate role section. -2) Modify the role permissions in the role permission editing panel. +2. Modify the role permissions in the role permission editing panel. - *Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel.* + _Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel._ -3) Click **Update Role Permissions**. +3. Click **Update Role Permissions**. #### Delete a Role Deleting a role is permanent and irreversible. A role cannot be remove if users are associated with it. -1) Click the minus icon at the top right of the roles section. +1. Click the minus icon at the top right of the roles section. -2) Identify the appropriate role to delete and click the red minus sign in the same row. +2. Identify the appropriate role to delete and click the red minus sign in the same row. -3) Click the red check mark to confirm deletion. \ No newline at end of file +3. Click the red check mark to confirm deletion. diff --git a/docs/administration/harper-studio/manage-instance-users.md b/docs/administration/harper-studio/manage-instance-users.md index d416a2b2..0b2ab43b 100644 --- a/docs/administration/harper-studio/manage-instance-users.md +++ b/docs/administration/harper-studio/manage-instance-users.md @@ -4,54 +4,46 @@ Harper users and roles can be managed directly through the Harper Studio. It is Instance user configuration is handled through the **users** page of the Harper Studio, accessed with the following instructions: -1) Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. +1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. -2) Click the appropriate organization that the instance belongs to. +2. Click the appropriate organization that the instance belongs to. -3) Select your desired instance. +3. Select your desired instance. -4) Click **users** in the instance control bar. +4. Click **users** in the instance control bar. -*Note, the **users** page will only be available to super users.* +_Note, the **users** page will only be available to super users._ ## Add a User Harper instance users can be added with the following instructions. -1) In the **add user** panel on the left enter: - - * New user username. - - * New user password. - - * Select a role. - - *Learn more about role management here: [Manage Instance Roles](manage-instance-roles.md).* - -2) Click **Add User**. - +1. In the **add user** panel on the left enter: + - New user username. + - New user password. + - Select a role. + + _Learn more about role management here: [Manage Instance Roles](manage-instance-roles.md)._ + +2. Click **Add User**. + ## Edit a User Harper instance users can be modified with the following instructions. -1) In the **existing users** panel, click the row of the user you would like to edit. +1. In the **existing users** panel, click the row of the user you would like to edit. + +2. To change a user’s password: + 1. In the **Change user password** section, enter the new password. + 2. Click **Update Password**. -2) To change a user’s password: +3. To change a user’s role: + 1. In the **Change user role** section, select the new role. + 2. Click **Update Role**. - 1) In the **Change user password** section, enter the new password. - - 2) Click **Update Password**. - -3) To change a user’s role: +4. To delete a user: + 1. In the **Delete User** section, type the username into the textbox. - 1) In the **Change user role** section, select the new role. - - 2) Click **Update Role**. - -4) To delete a user: + _This is done for confirmation purposes._ - 1) In the **Delete User** section, type the username into the textbox. - - *This is done for confirmation purposes.* - - 2) Click **Delete User**. \ No newline at end of file + 2. Click **Delete User**. diff --git a/docs/administration/harper-studio/manage-replication.md b/docs/administration/harper-studio/manage-replication.md index e72fc537..c99cded6 100644 --- a/docs/administration/harper-studio/manage-replication.md +++ b/docs/administration/harper-studio/manage-replication.md @@ -2,65 +2,66 @@ Harper instance clustering and replication can be configured directly through the Harper Studio. It is recommended to read through the [clustering documentation](../../developers/clustering/README.md) first to gain a strong understanding of Harper clustering behavior. - - All clustering configuration is handled through the **replication** page of the Harper Studio, accessed with the following instructions: -1) Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. +1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. -2) Click the appropriate organization that the instance belongs to. +2. Click the appropriate organization that the instance belongs to. -3) Select your desired instance. +3. Select your desired instance. -4) Click **replication** in the instance control bar. +4. Click **replication** in the instance control bar. Note, the **replication** page will only be available to super users. --- + ## Initial Configuration Harper instances do not have clustering configured by default. The Harper Studio will walk you through the initial configuration. Upon entering the **replication** screen for the first time you will need to complete the following configuration. Configurations are set in the **enable clustering** panel on the left while actions are described in the middle of the screen. It is worth reviewing the [Creating a Cluster User](../../developers/clustering/creating-a-cluster-user.md) document before proceeding. -1) Enter Cluster User username. (Defaults to `cluster_user`). -2) Enter Cluster Password. -3) Review and/or Set Cluster Node Name. -4) Click **Enable Clustering**. - +1. Enter Cluster User username. (Defaults to `cluster_user`). +2. Enter Cluster Password. +3. Review and/or Set Cluster Node Name. +4. Click **Enable Clustering**. + At this point the Studio will restart your Harper Instance, required for the configuration changes to take effect. --- ## Manage Clustering + Once initial clustering configuration is completed you a presented with a clustering management screen with the following properties: -* **connected instances** +- **connected instances** + + Displays all instances within the Studio Organization that this instance manages a connection with. - Displays all instances within the Studio Organization that this instance manages a connection with. +- **unconnected instances** -* **unconnected instances** + Displays all instances within the Studio Organization that this instance does not manage a connection with. - Displays all instances within the Studio Organization that this instance does not manage a connection with. +- **unregistered instances** -* **unregistered instances** + Displays all instances outside the Studio Organization that this instance manages a connection with. - Displays all instances outside the Studio Organization that this instance manages a connection with. +- **manage clustering** -* **manage clustering** + Once instances are connected, this will display clustering management options for all connected instances and all databases and tables. - Once instances are connected, this will display clustering management options for all connected instances and all databases and tables. --- ## Connect an Instance Harper Instances can be clustered together with the following instructions. -1) Ensure clustering has been configured on both instances and a cluster user with identical credentials exists on both. +1. Ensure clustering has been configured on both instances and a cluster user with identical credentials exists on both. -2) Identify the instance you would like to connect from the **unconnected instances** panel. +2. Identify the instance you would like to connect from the **unconnected instances** panel. -3) Click the plus icon next the appropriate instance. +3. Click the plus icon next the appropriate instance. -4) If configurations are correct, all databases will sync across the cluster, then appear in the **manage clustering** panel. If there is a configuration issue, a red exclamation icon will appear, click it to learn more about what could be causing the issue. +4. If configurations are correct, all databases will sync across the cluster, then appear in the **manage clustering** panel. If there is a configuration issue, a red exclamation icon will appear, click it to learn more about what could be causing the issue. --- @@ -68,9 +69,9 @@ Harper Instances can be clustered together with the following instructions. Harper Instances can be disconnected with the following instructions. -1) Identify the instance you would like to disconnect from the **connected instances** panel. +1. Identify the instance you would like to disconnect from the **connected instances** panel. -2) Click the minus icon next the appropriate instance. +2. Click the minus icon next the appropriate instance. --- @@ -78,8 +79,8 @@ Harper Instances can be disconnected with the following instructions. Subscriptions must be configured in order to move data between connected instances. Read more about subscriptions here: Creating A Subscription. The **manage clustering** panel displays a table with each row representing an channel per instance. Cells are bolded to indicate a change in the column. Publish and subscribe replication can be configured per table with the following instructions: -1) Identify the instance, database, and table for replication to be configured. +1. Identify the instance, database, and table for replication to be configured. -2) For publish, click the toggle switch in the **publish** column. +2. For publish, click the toggle switch in the **publish** column. -3) For subscribe, click the toggle switch in the **subscribe** column. \ No newline at end of file +3. For subscribe, click the toggle switch in the **subscribe** column. diff --git a/docs/administration/harper-studio/organizations.md b/docs/administration/harper-studio/organizations.md index 12e24230..c273090b 100644 --- a/docs/administration/harper-studio/organizations.md +++ b/docs/administration/harper-studio/organizations.md @@ -1,101 +1,105 @@ # Organizations -Harper Studio organizations provide the ability to group Harper Cloud Instances. Organization behavior is as follows: -* Billing occurs at the organization level to a single credit card. -* Organizations retain their own unique Harper Cloud subdomain. -* Cloud instances reside within an organization. -* Studio users can be invited to organizations to share instances. +Harper Studio organizations provide the ability to group Harper Cloud Instances. Organization behavior is as follows: +- Billing occurs at the organization level to a single credit card. +- Organizations retain their own unique Harper Cloud subdomain. +- Cloud instances reside within an organization. +- Studio users can be invited to organizations to share instances. An organization is automatically created for you when you sign up for Harper Studio. If you only have one organization, the Studio will automatically bring you to your organization’s page. --- ## List Organizations + A summary view of all organizations your user belongs to can be viewed on the [Harper Studio Organizations](https://studio.harperdb.io/?redirect=/organizations) page. You can navigate to this page at any time by clicking the **all organizations** link at the top of the Harper Studio. ## Create a New Organization + A new organization can be created as follows: -1) Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/?redirect=/organizations) page. -2) Click the **Create a New Organization** card. -3) Fill out new organization details - * Enter Organization Name - *This is used for descriptive purposes only.* - * Enter Organization Subdomain - *Part of the URL that will be used to identify your Harper Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https://c1-demo.harperdbcloud.com.* -4) Click Create Organization. +1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/?redirect=/organizations) page. +2. Click the **Create a New Organization** card. +3. Fill out new organization details + - Enter Organization Name + _This is used for descriptive purposes only._ + - Enter Organization Subdomain + _Part of the URL that will be used to identify your Harper Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https://c1-demo.harperdbcloud.com._ +4. Click Create Organization. ## Delete an Organization + An organization cannot be deleted until all instances have been removed. An organization can be deleted as follows: -1) Navigate to the Harper Studio Organizations page. -2) Identify the proper organization card and click the trash can icon. -3) Enter the organization name into the text box. +1. Navigate to the Harper Studio Organizations page. +2. Identify the proper organization card and click the trash can icon. +3. Enter the organization name into the text box. - *This is done for confirmation purposes to ensure you do not accidentally delete an organization.* -4) Click the **Do It** button. + _This is done for confirmation purposes to ensure you do not accidentally delete an organization._ -## Manage Users -Harper Studio organization owners can manage users including inviting new users, removing users, and toggling ownership. +4. Click the **Do It** button. +## Manage Users +Harper Studio organization owners can manage users including inviting new users, removing users, and toggling ownership. #### Inviting a User + A new user can be invited to an organization as follows: -1) Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/?redirect=/organizations) page. -2) Click the appropriate organization card. -3) Click **users** at the top of the screen. -4) In the **add user** box, enter the new user’s email address. -5) Click **Add User**. +1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/?redirect=/organizations) page. +2. Click the appropriate organization card. +3. Click **users** at the top of the screen. +4. In the **add user** box, enter the new user’s email address. +5. Click **Add User**. Users may or may not already be Harper Studio users when adding them to an organization. If the Harper Studio account already exists, the user will receive an email notification alerting them to the organization invitation. If the user does not have a Harper Studio account, they will receive an email welcoming them to Harper Studio. --- #### Toggle a User’s Organization Owner Status + Organization owners have full access to the organization including the ability to manage organization users, create, modify, and delete instances, and delete the organization. Users must have accepted their invitation prior to being promoted to an owner. A user’s organization owner status can be toggled owner as follows: -1) Navigate to the Harper Studio Organizations page. -2) Click the appropriate organization card. -3) Click **users** at the top of the screen. -4) Click the appropriate user from the **existing users** section. -5) Toggle the **Is Owner** switch to the desired status. +1. Navigate to the Harper Studio Organizations page. +2. Click the appropriate organization card. +3. Click **users** at the top of the screen. +4. Click the appropriate user from the **existing users** section. +5. Toggle the **Is Owner** switch to the desired status. + --- #### Remove a User from an Organization + Users may be removed from an organization at any time. Removing a user from an organization will not delete their Harper Studio account, it will only remove their access to the specified organization. A user can be removed from an organization as follows: -1) Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/?redirect=/organizations) page. -2) Click the appropriate organization card. -3) Click **users** at the top of the screen. -4) Click the appropriate user from the **existing users** section. -5) Type **DELETE** in the text box in the **Delete User** row. +1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/?redirect=/organizations) page. +2. Click the appropriate organization card. +3. Click **users** at the top of the screen. +4. Click the appropriate user from the **existing users** section. +5. Type **DELETE** in the text box in the **Delete User** row. + + _This is done for confirmation purposes to ensure you do not accidentally delete a user._ - *This is done for confirmation purposes to ensure you do not accidentally delete a user.* -6) Click **Delete User**. +6. Click **Delete User**. ## Manage Billing Billing is configured per organization and will be billed to the stored credit card at appropriate intervals (monthly or annually depending on the registered instance). Billing settings can be configured as follows: -1) Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/?redirect=/organizations) page. -2) Click the appropriate organization card. -3) Click **billing** at the top of the screen. +1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/?redirect=/organizations) page. +2. Click the appropriate organization card. +3. Click **billing** at the top of the screen. Here organization owners can view invoices, manage coupons, and manage the associated credit card. - - -*Harper billing and payments are managed via Stripe.* - - +_Harper billing and payments are managed via Stripe._ ### Add a Coupon Coupons are applicable towards any paid tier or enterprise instance and you can change your subscription at any time. Coupons can be added to your Organization as follows: -1) In the coupons panel of the **billing** page, enter your coupon code. -2) Click **Add Coupon**. -3) The coupon will then be available and displayed in the coupons panel. \ No newline at end of file +1. In the coupons panel of the **billing** page, enter your coupon code. +2. Click **Add Coupon**. +3. The coupon will then be available and displayed in the coupons panel. diff --git a/docs/administration/harper-studio/query-instance-data.md b/docs/administration/harper-studio/query-instance-data.md index 2af3bed0..0db8a346 100644 --- a/docs/administration/harper-studio/query-instance-data.md +++ b/docs/administration/harper-studio/query-instance-data.md @@ -2,14 +2,14 @@ SQL queries can be executed directly through the Harper Studio with the following instructions: -1) Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. -2) Click the appropriate organization that the instance belongs to. -3) Select your desired instance. -4) Click **query** in the instance control bar. -5) Enter your SQL query in the SQL query window. -6) Click **Execute**. +1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. +2. Click the appropriate organization that the instance belongs to. +3. Select your desired instance. +4. Click **query** in the instance control bar. +5. Enter your SQL query in the SQL query window. +6. Click **Execute**. -*Please note, the Studio will execute the query exactly as entered. For example, if you attempt to `SELECT *` from a table with millions of rows, you will most likely crash your browser.* +_Please note, the Studio will execute the query exactly as entered. For example, if you attempt to `SELECT _` from a table with millions of rows, you will most likely crash your browser.\* ## Browse Query Results Set @@ -17,9 +17,9 @@ SQL queries can be executed directly through the Harper Studio with the followin The first page of results set data is automatically loaded on query execution. Paging controls are at the bottom of the table. Here you can: -* Page left and right using the arrows. -* Type in the desired page. -* Change the page size (the amount of records displayed in the table). +- Page left and right using the arrows. +- Type in the desired page. +- Change the page size (the amount of records displayed in the table). #### Refresh Results Set @@ -33,12 +33,11 @@ Toggle the auto switch at the top right of the results set table. The results se Query history is stored in your local browser cache. Executed queries are listed with the most recent at the top in the **query history** section. - #### Rerun Previous Query -* Identify the query from the **query history** list. -* Click the appropriate query. It will be loaded into the **sql query** input box. -* Click **Execute**. +- Identify the query from the **query history** list. +- Click the appropriate query. It will be loaded into the **sql query** input box. +- Click **Execute**. #### Clear Query History @@ -46,4 +45,4 @@ Click the trash can icon at the top right of the **query history** section. ## Create Charts -The Harper Studio includes a charting feature where you can build charts based on your specified queries. Visit the Charts documentation for more information. \ No newline at end of file +The Harper Studio includes a charting feature where you can build charts based on your specified queries. Visit the Charts documentation for more information. diff --git a/docs/administration/jobs.md b/docs/administration/jobs.md index d30f57e7..227c7886 100644 --- a/docs/administration/jobs.md +++ b/docs/administration/jobs.md @@ -6,7 +6,7 @@ Harper Jobs are asynchronous tasks performed by the Operations API. Jobs uses an asynchronous methodology to account for the potential of a long-running operation. For example, exporting millions of records to S3 could take some time, so that job is started and the id is provided to check on the status. -The job status can be **COMPLETE** or **IN\_PROGRESS**. +The job status can be **COMPLETE** or **IN_PROGRESS**. ## Example Job Operations diff --git a/docs/administration/logging/README.md b/docs/administration/logging/README.md index bebaa706..83226b1d 100644 --- a/docs/administration/logging/README.md +++ b/docs/administration/logging/README.md @@ -2,6 +2,6 @@ Harper provides many different logging options for various features and functionality. -* [Standard Logging](logging.md): Harper maintains a log of events that take place throughout operation. -* [Audit Logging](audit-logging.md): Harper uses a standard Harper table to track transactions. For each table a user creates, a corresponding table will be created to track transactions against that table. -* [Transaction Logging](transaction-logging.md): Harper stores a verbose history of all transactions logged for specified database tables, including original data records. +- [Standard Logging](logging.md): Harper maintains a log of events that take place throughout operation. +- [Audit Logging](audit-logging.md): Harper uses a standard Harper table to track transactions. For each table a user creates, a corresponding table will be created to track transactions against that table. +- [Transaction Logging](transaction-logging.md): Harper stores a verbose history of all transactions logged for specified database tables, including original data records. diff --git a/docs/administration/logging/audit-logging.md b/docs/administration/logging/audit-logging.md index 82f745f4..658a31b9 100644 --- a/docs/administration/logging/audit-logging.md +++ b/docs/administration/logging/audit-logging.md @@ -8,7 +8,7 @@ Audit log is enabled by default. To disable the audit log, set `logging.auditLog ### Audit Log Operations -#### read\_audit\_log +#### read_audit_log The `read_audit_log` operation is flexible, enabling users to query with many parameters. All operations search on a single table. Filter options include timestamps, usernames, and table hash values. Additional examples found in the [Harper API documentation](../../developers/operations-api/logs.md). @@ -16,116 +16,107 @@ The `read_audit_log` operation is flexible, enabling users to query with many pa ```json { - "operation": "read_audit_log", - "schema": "dev", - "table": "dog", - "search_type": "timestamp", - "search_values": [ - 1660585740558 - ] + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "timestamp", + "search_values": [1660585740558] } ``` There are three outcomes using timestamp. -* `"search_values": []` - All records returned for specified table -* `"search_values": [1660585740558]` - All records after provided timestamp -* `"search_values": [1660585740558, 1760585759710]` - Records "from" and "to" provided timestamp +- `"search_values": []` - All records returned for specified table +- `"search_values": [1660585740558]` - All records after provided timestamp +- `"search_values": [1660585740558, 1760585759710]` - Records "from" and "to" provided timestamp -*** +--- **Search by Username** ```json { - "operation": "read_audit_log", - "schema": "dev", - "table": "dog", - "search_type": "username", - "search_values": [ - "admin" - ] + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "username", + "search_values": ["admin"] } ``` The above example will return all records whose `username` is "admin." -*** +--- **Search by Primary Key** ```json { - "operation": "read_audit_log", - "schema": "dev", - "table": "dog", - "search_type": "hash_value", - "search_values": [ - 318 - ] + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "hash_value", + "search_values": [318] } ``` The above example will return all records whose primary key (`hash_value`) is 318. -*** +--- -#### read\_audit\_log Response +#### read_audit_log Response The example that follows provides records of operations performed on a table. One thing of note is that the `read_audit_log` operation gives you the `original_records`. ```json { - "operation": "update", - "user_name": "HDB_ADMIN", - "timestamp": 1607035559122.277, - "hash_values": [ - 1, - 2 - ], - "records": [ - { - "id": 1, - "breed": "Muttzilla", - "age": 6, - "__updatedtime__": 1607035559122 - }, - { - "id": 2, - "age": 7, - "__updatedtime__": 1607035559121 - } - ], - "original_records": [ - { - "__createdtime__": 1607035556801, - "__updatedtime__": 1607035556801, - "age": 5, - "breed": "Mutt", - "id": 2, - "name": "Penny" - }, - { - "__createdtime__": 1607035556801, - "__updatedtime__": 1607035556801, - "age": 5, - "breed": "Mutt", - "id": 1, - "name": "Harper" - } - ] + "operation": "update", + "user_name": "HDB_ADMIN", + "timestamp": 1607035559122.277, + "hash_values": [1, 2], + "records": [ + { + "id": 1, + "breed": "Muttzilla", + "age": 6, + "__updatedtime__": 1607035559122 + }, + { + "id": 2, + "age": 7, + "__updatedtime__": 1607035559121 + } + ], + "original_records": [ + { + "__createdtime__": 1607035556801, + "__updatedtime__": 1607035556801, + "age": 5, + "breed": "Mutt", + "id": 2, + "name": "Penny" + }, + { + "__createdtime__": 1607035556801, + "__updatedtime__": 1607035556801, + "age": 5, + "breed": "Mutt", + "id": 1, + "name": "Harper" + } + ] } ``` -#### delete\_audit\_logs\_before +#### delete_audit_logs_before Just like with transaction logs, you can clean up your audit logs with the `delete_audit_logs_before` operation. It will delete audit log data according to the given parameters. The example below will delete records older than the timestamp provided. ```json { - "operation": "delete_audit_logs_before", - "schema": "dev", - "table": "cat", - "timestamp": 1598290282817 + "operation": "delete_audit_logs_before", + "schema": "dev", + "table": "cat", + "timestamp": 1598290282817 } ``` diff --git a/docs/administration/logging/logging.md b/docs/administration/logging/logging.md index 7e76b64d..2338b761 100644 --- a/docs/administration/logging/logging.md +++ b/docs/administration/logging/logging.md @@ -18,15 +18,15 @@ For example, a typical log entry looks like: The components of a log entry are: -* timestamp - This is the date/time stamp when the event occurred -* level - This is an associated log level that gives a rough guide to the importance and urgency of the message. The available log levels in order of least urgent (and more verbose) are: `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. -* thread/ID - This reports the name of the thread and the thread ID that the event was reported on. Note that NATS logs are recorded by their process name and there is no thread id for them since they are a separate process. Key threads are: - * main - This is the thread that is responsible for managing all other threads and routes incoming requests to the other threads - * http - These are the worker threads that handle the primary workload of incoming HTTP requests to the operations API and custom functions. - * Clustering\* - These are threads and processes that handle replication. - * job - These are job threads that have been started to handle operations that are executed in a separate job thread. -* tags - Logging from a custom function will include a "custom-function" tag in the log entry. Most logs will not have any additional tags. -* message - This is the main message that was reported. +- timestamp - This is the date/time stamp when the event occurred +- level - This is an associated log level that gives a rough guide to the importance and urgency of the message. The available log levels in order of least urgent (and more verbose) are: `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. +- thread/ID - This reports the name of the thread and the thread ID that the event was reported on. Note that NATS logs are recorded by their process name and there is no thread id for them since they are a separate process. Key threads are: + - main - This is the thread that is responsible for managing all other threads and routes incoming requests to the other threads + - http - These are the worker threads that handle the primary workload of incoming HTTP requests to the operations API and custom functions. + - Clustering\* - These are threads and processes that handle replication. + - job - These are job threads that have been started to handle operations that are executed in a separate job thread. +- tags - Logging from a custom function will include a "custom-function" tag in the log entry. Most logs will not have any additional tags. +- message - This is the main message that was reported. We try to keep logging to a minimum by default, to do this the default log level is `error`. If you require more information from the logs, increasing the log level down will provide that. @@ -50,12 +50,12 @@ To access specific logs you may query the Harper API. Logs can be queried using ```json { - "operation": "read_log", - "start": 0, - "limit": 1000, - "level": "error", - "from": "2021-01-25T22:05:27.464+0000", - "until": "2021-01-25T23:05:27.464+0000", - "order": "desc" + "operation": "read_log", + "start": 0, + "limit": 1000, + "level": "error", + "from": "2021-01-25T22:05:27.464+0000", + "until": "2021-01-25T23:05:27.464+0000", + "order": "desc" } ``` diff --git a/docs/administration/logging/transaction-logging.md b/docs/administration/logging/transaction-logging.md index 28a1290c..0a9ae3b8 100644 --- a/docs/administration/logging/transaction-logging.md +++ b/docs/administration/logging/transaction-logging.md @@ -10,71 +10,71 @@ If you would like to use the transaction log, but have not set up clustering yet ## Transaction Log Operations -### read\_transaction\_log +### read_transaction_log The `read_transaction_log` operation returns a prescribed set of records, based on given parameters. The example below will give a maximum of 2 records within the timestamps provided. ```json { - "operation": "read_transaction_log", - "schema": "dev", - "table": "dog", - "from": 1598290235769, - "to": 1660249020865, - "limit": 2 + "operation": "read_transaction_log", + "schema": "dev", + "table": "dog", + "from": 1598290235769, + "to": 1660249020865, + "limit": 2 } ``` _See example response below._ -### read\_transaction\_log Response +### read_transaction_log Response ```json [ - { - "operation": "insert", - "user": "admin", - "timestamp": 1660165619736, - "records": [ - { - "id": 1, - "dog_name": "Penny", - "owner_name": "Kyle", - "breed_id": 154, - "age": 7, - "weight_lbs": 38, - "__updatedtime__": 1660165619688, - "__createdtime__": 1660165619688 - } - ] - }, - { - "operation": "update", - "user": "admin", - "timestamp": 1660165620040, - "records": [ - { - "id": 1, - "dog_name": "Penny B", - "__updatedtime__": 1660165620036 - } - ] - } + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619736, + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38, + "__updatedtime__": 1660165619688, + "__createdtime__": 1660165619688 + } + ] + }, + { + "operation": "update", + "user": "admin", + "timestamp": 1660165620040, + "records": [ + { + "id": 1, + "dog_name": "Penny B", + "__updatedtime__": 1660165620036 + } + ] + } ] ``` _See example request above._ -### delete\_transaction\_logs\_before +### delete_transaction_logs_before The `delete_transaction_logs_before` operation will delete transaction log data according to the given parameters. The example below will delete records older than the timestamp provided. ```json { - "operation": "delete_transaction_logs_before", - "schema": "dev", - "table": "dog", - "timestamp": 1598290282817 + "operation": "delete_transaction_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1598290282817 } ``` diff --git a/docs/custom-functions/README.md b/docs/custom-functions/README.md index 224ffc7e..687dcabb 100644 --- a/docs/custom-functions/README.md +++ b/docs/custom-functions/README.md @@ -2,23 +2,24 @@ Custom functions are a key part of building a complete Harper application. It is highly recommended that you use Custom Functions as the primary mechanism for your application to access your Harper database. Using Custom Functions gives you complete control over the accessible endpoints, how users are authenticated and authorized, what data is accessed from the database, and how it is aggregated and returned to users. -* Add your own API endpoints to a standalone API server inside Harper +- Add your own API endpoints to a standalone API server inside Harper -* Use Harper Core methods to interact with your data at lightning speed +- Use Harper Core methods to interact with your data at lightning speed -* Custom Functions are powered by Fastify, so they’re extremely flexible +- Custom Functions are powered by Fastify, so they’re extremely flexible -* Manage in Harper Studio, or use your own IDE and Version Management System +- Manage in Harper Studio, or use your own IDE and Version Management System -* Distribute your Custom Functions to all your Harper instances with a single click +- Distribute your Custom Functions to all your Harper instances with a single click --- -* [Requirements and Definitions](requirements-definitions.md) -* [Create A Project](create-project.md) +- [Requirements and Definitions](requirements-definitions.md) -* [Define Routes](define-routes.md) +- [Create A Project](create-project.md) -* [Define Helpers](define-helpers.md) +- [Define Routes](define-routes.md) -* [Host a Static UI](host-static.md) \ No newline at end of file +- [Define Helpers](define-helpers.md) + +- [Host a Static UI](host-static.md) diff --git a/docs/custom-functions/create-project.md b/docs/custom-functions/create-project.md index 752e625c..263890be 100644 --- a/docs/custom-functions/create-project.md +++ b/docs/custom-functions/create-project.md @@ -4,20 +4,20 @@ To create a project using our web-based GUI, Harper Studio, checkout out how to Otherwise, to create a project, you have the following options: -1. **Use the add\_custom\_function\_project operation** +1. **Use the add_custom_function_project operation** This operation creates a new project folder, and populates it with templates for the routes, helpers, and static subfolders. ```json { - "operation": "add_custom_function_project", - "project": "dogs" + "operation": "add_custom_function_project", + "project": "dogs" } ``` 2. **Clone our public gitHub project template** - _This requires a local installation. Remove the .git directory for a clean slate of git history._ + _This requires a local installation. Remove the .git directory for a clean slate of git history._ ```bash > git clone https://github.com/HarperDB/harperdb-custom-functions-template.git ~/hdb/custom_functions/dogs @@ -25,7 +25,7 @@ Otherwise, to create a project, you have the following options: 3. **Create a project folder in your Custom Functions root directory** and **initialize** - _This requires a local installation._ + _This requires a local installation._ ```bash > mkdir ~/hdb/custom_functions/dogs @@ -39,6 +39,6 @@ Otherwise, to create a project, you have the following options: Custom function projects can be structured and managed like normal Node.js projects. You can include external dependencies, include them in your route and helper files, and manage your revisions without changing your development tooling or pipeline. -* To initialize your project to use npm packages, use the terminal to execute `npm init` from the root of your project folder. +- To initialize your project to use npm packages, use the terminal to execute `npm init` from the root of your project folder. -* To implement version control using git, use the terminal to execute `git init` from the root of your project folder. \ No newline at end of file +- To implement version control using git, use the terminal to execute `git init` from the root of your project folder. diff --git a/docs/custom-functions/host-static.md b/docs/custom-functions/host-static.md index c90b85fa..0cc9a591 100644 --- a/docs/custom-functions/host-static.md +++ b/docs/custom-functions/host-static.md @@ -10,8 +10,8 @@ For further information on how to send specific files see the [@fastify/static]( ```javascript module.exports = async (server, { hdbCore, logger }) => { - server.register(require('@fastify/static'), { - root: path.join(__dirname, 'public'), - }) + server.register(require('@fastify/static'), { + root: path.join(__dirname, 'public'), + }); }; ``` diff --git a/docs/custom-functions/requirements-definitions.md b/docs/custom-functions/requirements-definitions.md index 49dfb2cb..cd973906 100644 --- a/docs/custom-functions/requirements-definitions.md +++ b/docs/custom-functions/requirements-definitions.md @@ -1,8 +1,10 @@ # Requirements And Definitions + Before you get started with Custom Functions, here’s a primer on the basic configuration and the structure of a Custom Functions Project. ## Configuration -Custom Functions are configured in the harperdb-config.yaml file located in the operations API root directory (by default this is a directory named `hdb` located in the home directory of the current user). Below is a view of the Custom Functions' section of the config YAML file, plus descriptions of important Custom Functions settings. + +Custom Functions are configured in the harperdb-config.yaml file located in the operations API root directory (by default this is a directory named `hdb` located in the home directory of the current user). Below is a view of the Custom Functions' section of the config YAML file, plus descriptions of important Custom Functions settings. ```yaml customFunctions: @@ -24,22 +26,22 @@ customFunctions: privateKey: ~/hdb/keys/privateKey.pem ``` -* **`enabled`** +- **`enabled`** A boolean value that tells Harper to start the Custom Functions server. Set it to **true** to enable custom functions and **false** to disable. `enabled` is `true` by default. -* **`network.port`** +- **`network.port`** This is the port Harper will use to start the HTTP server dedicated to serving your Custom Functions’ routes. -* **`root`** +- **`root`** This is the root directory where your Custom Functions projects and their files will live. By default, it’s in your \, but you can locate it anywhere--in a developer folder next to your other development projects, for example. _Please visit our [configuration docs](../configuration.md) for a more comprehensive look at these settings._ ## Project Structure -**project folder** -The name of the folder that holds your project files serves as the root prefix for all the routes you create. All routes created in the **dogs** project folder will have a URL like this: **https://my-server-url.com:9926/dogs/my/route**. As such, it’s important that any project folders you create avoid any characters that aren’t URL-friendly. You should avoid URL delimiters in your folder names. +**project folder** +The name of the folder that holds your project files serves as the root prefix for all the routes you create. All routes created in the **dogs** project folder will have a URL like this: **https://my-server-url.com:9926/dogs/my/route**. As such, it’s important that any project folders you create avoid any characters that aren’t URL-friendly. You should avoid URL delimiters in your folder names. **/routes folder** @@ -47,13 +49,13 @@ By default, files in the **routes** folder define the requests that your Custom ```javascript module.exports = async (server, { hdbCore, logger }) => { - server.route({ - url: '/', - method: 'POST', - preValidation: hdbCore.preValidation, - handler: hdbCore.request, - }); -} + server.route({ + url: '/', + method: 'POST', + preValidation: hdbCore.preValidation, + handler: hdbCore.request, + }); +}; ``` **/helpers folder** @@ -61,7 +63,7 @@ module.exports = async (server, { hdbCore, logger }) => { These files are JavaScript modules that you can use in your handlers, or for custom `preValidation` hooks. Examples include calls to third party Authentication services, filters for results of calls to Harper, and custom error responses. As modules, you can use standard import and export functionality. ```javascript -"use strict"; +'use strict'; const dbFilter = (databaseResultsArray) => databaseResultsArray.filter((result) => result.showToApi === true); @@ -70,4 +72,4 @@ module.exports = dbFilter; **/static folder** -If you’d like to serve your visitors a static website, you can place the html and supporting files into a directory called **static**. The directory must have an **index.html** file, and can have as many supporting resources as are necessary in whatever subfolder structure you prefer within that **static** directory. \ No newline at end of file +If you’d like to serve your visitors a static website, you can place the html and supporting files into a directory called **static**. The directory must have an **index.html** file, and can have as many supporting resources as are necessary in whatever subfolder structure you prefer within that **static** directory. diff --git a/docs/custom-functions/restarting-server.md b/docs/custom-functions/restarting-server.md index 9d461458..4362efd5 100644 --- a/docs/custom-functions/restarting-server.md +++ b/docs/custom-functions/restarting-server.md @@ -4,11 +4,9 @@ One way to manage Custom Functions is through [Harper Studio](../harper-studio/R For any changes made to your routes, helpers, or projects, you’ll need to restart the Custom Functions server to see them take effect. Harper Studio does this automatically whenever you create or delete a project, or add, edit, or edit a route or helper. If you need to start the Custom Functions server yourself, you can use the following operation to do so: - - ```json { - "operation": "restart_service", - "service": "custom_functions" + "operation": "restart_service", + "service": "custom_functions" } -``` \ No newline at end of file +``` diff --git a/docs/custom-functions/templates.md b/docs/custom-functions/templates.md index 08603195..4cfbd85c 100644 --- a/docs/custom-functions/templates.md +++ b/docs/custom-functions/templates.md @@ -1,3 +1,3 @@ # Templates -Check out our always-expanding library of templates in our open-source [Harper-Add-Ons GitHub repo](https://github.com/HarperDB-Add-Ons). \ No newline at end of file +Check out our always-expanding library of templates in our open-source [Harper-Add-Ons GitHub repo](https://github.com/HarperDB-Add-Ons). diff --git a/docs/deployments/configuration.md b/docs/deployments/configuration.md index 9906e1be..938e0431 100644 --- a/docs/deployments/configuration.md +++ b/docs/deployments/configuration.md @@ -4,7 +4,7 @@ Harper is configured through a [YAML](https://yaml.org/) file called `harperdb-c Some configuration will be populated by default in the config file on install, regardless of whether it is used. -*** +--- ## Using the Configuration File and Naming Conventions @@ -25,19 +25,22 @@ logging: ``` You could apply this change using: -* Environment variable: `LOGGING_ROTATION_ENABLED=false` -* Command line variable: `--LOGGING_ROTATION_ENABLED false` -* Operations API (`set_configuration`): `logging_rotation_enabled: false` + +- Environment variable: `LOGGING_ROTATION_ENABLED=false` +- Command line variable: `--LOGGING_ROTATION_ENABLED false` +- Operations API (`set_configuration`): `logging_rotation_enabled: false` To change the `port` in the `http` section, use: -* Environment variable: `HTTP_PORT=` -* Command line variable: `--HTTP_PORT ` -* Operations API (`set_configuration`): `http_port: ` + +- Environment variable: `HTTP_PORT=` +- Command line variable: `--HTTP_PORT ` +- Operations API (`set_configuration`): `http_port: ` To set the `operationsApi.network.port` to `9925`, use: -* Environment variable: `OPERATIONSAPI_NETWORK_PORT=9925` -* Command line variable: `--OPERATIONSAPI_NETWORK_PORT 9925` -* Operations API (`set_configuration`): `operationsApi_network_port: 9925` + +- Environment variable: `OPERATIONSAPI_NETWORK_PORT=9925` +- Command line variable: `--OPERATIONSAPI_NETWORK_PORT 9925` +- Operations API (`set_configuration`): `operationsApi_network_port: 9925` _Note: Component configuration cannot be added or updated via CLI or ENV variables._ @@ -47,7 +50,7 @@ To use a custom configuration file to set values on install, use the CLI/ENV var To install Harper overtop of an existing configuration file, set `HDB_CONFIG` to the root path of your install `/harperdb-config.yaml` -*** +--- ## Configuration Options @@ -70,7 +73,7 @@ For HTTP clients that support (Brotli) compression encoding, responses that are ```yaml http: - compressionThreshold: 1200 + compressionThreshold: 1200 ``` `cors` - _Type_: boolean; _Default_: true @@ -105,7 +108,7 @@ The port used to access the component server. The port the Harper component server uses for HTTPS connections. This requires a valid certificate and key. -`http2` - _Type_: boolean; _Default_: false +`http2` - _Type_: boolean; _Default_: false Enables HTTP/2 for the HTTP server. @@ -115,16 +118,16 @@ The length of time in milliseconds after which a request will timeout. ```yaml http: - cors: true - corsAccessList: - - null - headersTimeout: 60000 - maxHeaderSize: 8192 - https: false - keepAliveTimeout: 30000 - port: 9926 - securePort: null - timeout: 120000 + cors: true + corsAccessList: + - null + headersTimeout: 60000 + maxHeaderSize: 8192 + https: false + keepAliveTimeout: 30000 + port: 9926 + securePort: null + timeout: 120000 ``` `mlts` - _Type_: boolean | object; _Default_: false @@ -155,7 +158,7 @@ http: user: user-name ``` -*** +--- ### `threads` @@ -191,7 +194,7 @@ threads: This specifies the heap memory limit for each thread, in megabytes. The default heap limit is a heuristic based on available memory and thread count. -*** +--- ### `replication` @@ -201,7 +204,7 @@ The `replication` section configures [Harper replication](../developers/replicat replication: hostname: server-one url: wss://server-one:9925 - databases: "*" + databases: '*' routes: - wss://server-two:9925 port: null @@ -223,7 +226,7 @@ Configure which databases to replicate. This can be a string for all database or ```yaml replication: - databases: + databases: - db1 - db2 ``` @@ -254,7 +257,7 @@ replication: - QA69C7E2S ``` -`port` - _Type_: integer; +`port` - _Type_: integer; The port to use for replication connections. @@ -274,7 +277,7 @@ Replication will first attempt to catch up using the audit log. If unsuccessful, This defines the shard id of this instance and is used in conjunction with the [Table Resource functions](../developers/replication/sharding#custom-sharding) `setResidency` & `setResidencyById` to programmatically route traffic to the proper shard. -*** +--- ### `clustering` using NATS @@ -284,11 +287,11 @@ _Note: There exist two ways to create clusters and replicate data in Harper. One Clustering offers a lot of different configurations, however in a majority of cases the only options you will need to pay attention to are: -* `clustering.enabled` Enable the clustering processes. -* `clustering.hubServer.cluster.network.port` The port other nodes will connect to. This port must be accessible from other cluster nodes. -* `clustering.hubServer.cluster.network.routes`The connections to other instances. -* `clustering.nodeName` The name of your node, must be unique within the cluster. -* `clustering.user` The name of the user credentials used for Inter-node authentication. +- `clustering.enabled` Enable the clustering processes. +- `clustering.hubServer.cluster.network.port` The port other nodes will connect to. This port must be accessible from other cluster nodes. +- `clustering.hubServer.cluster.network.routes`The connections to other instances. +- `clustering.nodeName` The name of your node, must be unique within the cluster. +- `clustering.user` The name of the user credentials used for Inter-node authentication. `enabled` - _Type_: boolean; _Default_: false @@ -298,7 +301,7 @@ _Note: If you enabled clustering but do not create and add a cluster user you wi ```yaml clustering: - enabled: true + enabled: true ``` `clustering.hubServer.cluster` @@ -437,7 +440,7 @@ The maximum number of messages a consumer can process in one go. The number of Harper threads that are delegated to ingesting messages. -*** +--- `logLevel` - _Type_: string; _Default_: error @@ -499,7 +502,7 @@ When true, all transactions that are received from other nodes are republished t When true, hub server will verify client certificate using the CA certificate. -*** +--- `user` - _Type_: string; _Default_: null @@ -511,10 +514,10 @@ The user can be created either through the API using an `add_user` request with ```yaml clustering: - user: cluster_person + user: cluster_person ``` -*** +--- ### `localStudio` @@ -529,7 +532,7 @@ localStudio: enabled: false ``` -*** +--- ### `logging` @@ -550,9 +553,9 @@ To access the audit logs, use the API operation `read_audit_log`. It will provid ```json { - "operation": "read_audit_log", - "schema": "dev", - "table": "dog" + "operation": "read_audit_log", + "schema": "dev", + "table": "dog" } ``` @@ -848,7 +851,7 @@ Path to the certificate authority file. Path to the private key file. -*** +--- ### `componentsRoot` @@ -860,7 +863,7 @@ The path to the folder containing the local component files. componentsRoot: ~/hdb/components ``` -*** +--- ### `rootPath` @@ -872,7 +875,7 @@ The Harper database and applications/API/interface are decoupled from each other rootPath: /Users/jonsnow/hdb ``` -*** +--- ### `storage` @@ -967,6 +970,7 @@ The `path` configuration sets where all database files should reside. storage: path: /users/harperdb/storage ``` + _**Note:**_ This configuration applies to all database files, which includes system tables that are used internally by Harper. For this reason if you wish to use a non default `path` value you must move any existing schemas into your `path` location. Existing schemas is likely to include the system schema which can be found at `/schema/system`. `blobPaths` - _Type_: string; _Default_: `/blobs` @@ -1000,7 +1004,7 @@ storage: evictionFactor: 100000 # A factor used to determine how much aggressively to evict cached entries (default) ``` -*** +--- ### `tls` @@ -1036,16 +1040,15 @@ tls: - certificate: ~/hdb/keys/certificate1.pem certificateAuthority: ~/hdb/keys/ca1.pem privateKey: ~/hdb/keys/privateKey1.pem - host: example.com # the host is optional, and if not provided, this certificate's common name will be used as the host name. + host: example.com # the host is optional, and if not provided, this certificate's common name will be used as the host name. - certificate: ~/hdb/keys/certificate2.pem certificateAuthority: ~/hdb/keys/ca2.pem privateKey: ~/hdb/keys/privateKey2.pem - ``` Note that a `tls` section can also be defined in the `operationsApi` section, which will override the root `tls` section for the operations API. -*** +--- ### `mqtt` @@ -1105,7 +1108,7 @@ mqtt: required: true ``` -*** +--- ### `databases` @@ -1157,20 +1160,22 @@ Using the API: ```json { - "operation": "set_configuration", - "databases": [{ - "nameOfDatabase": { - "tables": { - "nameOfTable": { - "path": "/path/to/table" - } - } - } - }] + "operation": "set_configuration", + "databases": [ + { + "nameOfDatabase": { + "tables": { + "nameOfTable": { + "path": "/path/to/table" + } + } + } + } + ] } ``` -*** +--- ### Components diff --git a/docs/deployments/harper-cli.md b/docs/deployments/harper-cli.md index 69edc913..eb707c74 100644 --- a/docs/deployments/harper-cli.md +++ b/docs/deployments/harper-cli.md @@ -33,7 +33,7 @@ harperdb install harperdb install --TC_AGREEMENT yes --HDB_ADMIN_USERNAME HDB_ADMIN --HDB_ADMIN_PASSWORD password --ROOTPATH /tmp/hdb/ --OPERATIONSAPI_NETWORK_PORT 9925 ``` -*** +--- ### Starting Harper @@ -43,7 +43,7 @@ To start Harper after it is installed, run the following command: harperdb start ``` -*** +--- ### Stopping Harper @@ -53,7 +53,7 @@ To stop Harper once it is running, run the following command: harperdb stop ``` -*** +--- ### Restarting Harper @@ -63,7 +63,7 @@ To restart Harper once it is running, run the following command: harperdb restart ``` -*** +--- ### Getting the Harper Version @@ -73,7 +73,7 @@ To check the version of Harper that is installed run the following command: harperdb version ``` -*** +--- ### Renew self-signed certificates @@ -83,7 +83,7 @@ To renew the Harper generated self-signed certificates, run: harperdb renew-certs ``` -*** +--- ### Copy a database with compaction @@ -99,7 +99,7 @@ For example, to copy the default database: harperdb copy-db data /home/user/hdb/database/copy.mdb ``` -*** +--- ### Get all available CLI commands @@ -109,7 +109,7 @@ To display all available Harper CLI commands along with a brief description run: harperdb help ``` -*** +--- ### Get the status of Harper and clustering @@ -119,13 +119,13 @@ To display the status of the Harper process, the clustering hub and leaf process harperdb status ``` -*** +--- ### Backups Harper uses a transactional commit process that ensures that data on disk is always transactionally consistent with storage. This means that Harper maintains database integrity in the event of a crash. It also means that you can use any standard volume snapshot tool to make a backup of a Harper database. Database files are stored in the hdb/database directory. As long as the snapshot is an atomic snapshot of these database files, the data can be copied/moved back into the database directory to restore a previous backup (with Harper shut down) , and database integrity will be preserved. Note that simply copying an in-use database file (using `cp`, for example) is _not_ a snapshot, and this would progressively read data from the database at different points in time, which yields unreliable copy that likely will not be usable. Standard copying is only reliable for a database file that is not in use. -*** +--- ## Operations API through the CLI diff --git a/docs/deployments/harper-cloud/alarms.md b/docs/deployments/harper-cloud/alarms.md index 888cc8af..d9c5c08c 100644 --- a/docs/deployments/harper-cloud/alarms.md +++ b/docs/deployments/harper-cloud/alarms.md @@ -4,10 +4,10 @@ Harper Cloud instance alarms are triggered when certain conditions are met. Once ### Heading Definitions -* **Alarm**: Title of the alarm. -* **Threshold**: Definition of the alarm threshold. -* **Intervals**: The number of occurrences before an alarm is triggered and the period that the metric is evaluated over. -* **Proposed Remedy**: Recommended solution to avoid the alert in the future. +- **Alarm**: Title of the alarm. +- **Threshold**: Definition of the alarm threshold. +- **Intervals**: The number of occurrences before an alarm is triggered and the period that the metric is evaluated over. +- **Proposed Remedy**: Recommended solution to avoid the alert in the future. | Alarm | Threshold | Intervals | Proposed Remedy | | ------- | ---------- | --------- | ------------------------------------------------------------------------------------------------------------------------------ | diff --git a/docs/deployments/harper-cloud/iops-impact.md b/docs/deployments/harper-cloud/iops-impact.md index c91cd9e1..b74edd33 100644 --- a/docs/deployments/harper-cloud/iops-impact.md +++ b/docs/deployments/harper-cloud/iops-impact.md @@ -24,15 +24,18 @@ For assistance in estimating IOPS requirements feel free to contact Harper Suppo ## Example Use Case IOPS Requirements -* **Sensor Data Collection** +- **Sensor Data Collection** - In the case of IoT sensors where data collection will be sustained, high IOPS are required. While there are not typically large queries going on in this case, there is a high volume of data being ingested. This implies that IOPS will be sustained at a high level. For example, if you are collecting 100 records per second you would expect to need roughly 3,000 IOPS just to handle the data inserts. -* **Data Analytics/BI Server** + In the case of IoT sensors where data collection will be sustained, high IOPS are required. While there are not typically large queries going on in this case, there is a high volume of data being ingested. This implies that IOPS will be sustained at a high level. For example, if you are collecting 100 records per second you would expect to need roughly 3,000 IOPS just to handle the data inserts. - Providing a server for analytics purposes typically requires a larger machine. Typically these cases involve large scale SQL joins and aggregations, which puts a large strain on reads. Harper utilizes an in-memory cache, which provides a significant performance boost on machines with large amounts of memory. However, if disparate datasets are constantly being queried and/or new data is frequently being loaded, you will find that the system still needs to have high IOPS to meet performance demand. -* **Web Services** +- **Data Analytics/BI Server** - Typical web service implementations with discrete reads and writes often do not need high IOPS to perform as expected. This is often the case in more transactional systems without the requirement for high performance load. A good rule to follow is that any Harper operation that requires a data scan will be IOPS intensive, but if these are not frequent then the EBS boost will suffice. Queries utilizing equals operations in either SQL or NoSQL do not require a scan due to Harper’s native indexing. -* **High Performance Database** + Providing a server for analytics purposes typically requires a larger machine. Typically these cases involve large scale SQL joins and aggregations, which puts a large strain on reads. Harper utilizes an in-memory cache, which provides a significant performance boost on machines with large amounts of memory. However, if disparate datasets are constantly being queried and/or new data is frequently being loaded, you will find that the system still needs to have high IOPS to meet performance demand. - Ultimately, if performance is your top priority, Harper should be run on bare metal hardware. Cloud providers offer these options at a higher cost, but they come with obvious performance improvements. +- **Web Services** + + Typical web service implementations with discrete reads and writes often do not need high IOPS to perform as expected. This is often the case in more transactional systems without the requirement for high performance load. A good rule to follow is that any Harper operation that requires a data scan will be IOPS intensive, but if these are not frequent then the EBS boost will suffice. Queries utilizing equals operations in either SQL or NoSQL do not require a scan due to Harper’s native indexing. + +- **High Performance Database** + + Ultimately, if performance is your top priority, Harper should be run on bare metal hardware. Cloud providers offer these options at a higher cost, but they come with obvious performance improvements. diff --git a/docs/deployments/harper-cloud/verizon-5g-wavelength-instances.md b/docs/deployments/harper-cloud/verizon-5g-wavelength-instances.md index d9ab83c6..b3caa3df 100644 --- a/docs/deployments/harper-cloud/verizon-5g-wavelength-instances.md +++ b/docs/deployments/harper-cloud/verizon-5g-wavelength-instances.md @@ -24,4 +24,4 @@ AWS EBS gp2 volumes have a baseline performance level, which determines the numb Smaller gp2 volumes are perfect for trying out the functionality of Harper, and might also work well for applications that don’t perform many database transactions. For applications that perform a moderate or high number of transactions, we recommend that you use a larger Harper volume. Learn more about the [impact of IOPS on performance here](iops-impact.md). -You can read more about [AWS EBS gp2 volume IOPS here](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html#EBSVolumeTypes\_gp2). +You can read more about [AWS EBS gp2 volume IOPS here](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html#EBSVolumeTypes_gp2). diff --git a/docs/deployments/install-harper/README.md b/docs/deployments/install-harper/README.md index bfb9d3c5..1305b182 100644 --- a/docs/deployments/install-harper/README.md +++ b/docs/deployments/install-harper/README.md @@ -51,7 +51,7 @@ npm install -g harperdb-X.X.X.tgz harperdb install Harper comes with binaries for standard AMD64/x64 or ARM64 CPU architectures on Linux, Windows (x64 only), and Mac (including Apple Silicon). However, if you are installing on a less common platform (Alpine, for example), you will need to ensure that you have build tools installed for the installation process to compile the binaries (this is handled automatically), including: -* [Go](https://go.dev/dl/): version 1.19.1 -* GCC -* Make -* Python v3.7, v3.8, v3.9, or v3.10 +- [Go](https://go.dev/dl/): version 1.19.1 +- GCC +- Make +- Python v3.7, v3.8, v3.9, or v3.10 diff --git a/docs/deployments/install-harper/linux.md b/docs/deployments/install-harper/linux.md index 0f14bdca..365989aa 100644 --- a/docs/deployments/install-harper/linux.md +++ b/docs/deployments/install-harper/linux.md @@ -4,7 +4,7 @@ If you wish to install locally or already have a configured server, see the basi The following is a recommended way to configure Linux and install Harper. These instructions should work reasonably well for any public cloud or on-premises Linux instance. -*** +--- These instructions assume that the following has already been completed: @@ -18,7 +18,7 @@ While you will need to access Harper through port 9925 for the administration th For this example, we will use an AWS Ubuntu Server 22.04 LTS m5.large EC2 Instance with an additional General Purpose SSD EBS volume and the default “ubuntu” user account. -*** +--- ### (Optional) LVM Configuration @@ -81,7 +81,7 @@ Run `lsblk` and note the device name of the additional volume lsblk ``` -Create an ext4 filesystem on the volume (The below commands assume the device name is nvme1n1. If you used LVM to create logical volume, replace /dev/nvme1n1 with /dev/hdb\_vg/hdb\_lv) +Create an ext4 filesystem on the volume (The below commands assume the device name is nvme1n1. If you used LVM to create logical volume, replace /dev/nvme1n1 with /dev/hdb_vg/hdb_lv) ```bash sudo mkfs.ext4 -L hdb_data /dev/nvme1n1 diff --git a/docs/deployments/upgrade-hdb-instance.md b/docs/deployments/upgrade-hdb-instance.md index 9b61070b..cde1a11d 100644 --- a/docs/deployments/upgrade-hdb-instance.md +++ b/docs/deployments/upgrade-hdb-instance.md @@ -9,6 +9,7 @@ Upgrading Harper is a two-step process. First the latest version of Harper must 1. Install the latest version of Harper using `npm install -g harperdb`. Note `-g` should only be used if you installed Harper globally (which is recommended). + 2. Run `harperdb` to initiate the upgrade process. Harper will then prompt you for all appropriate inputs and then run the upgrade directives. @@ -85,7 +86,7 @@ Start Harper harperdb start ``` -*** +--- ## Upgrading Nats to Plexus 4.4 @@ -99,8 +100,8 @@ To enable Plexus on a node that is already running NATS, you will need to update ```yaml replication: - url: wss://my-cluster-node-1:9925 - hostname: node-1 + url: wss://my-cluster-node-1:9925 + hostname: node-1 ``` `replication.url` – This should be set to the URL of the current Harper instance. @@ -110,22 +111,22 @@ replication: ### Upgrade Steps 1. Set up the bridge node: - * Choose one node to be the bridge node. - * On this node, follow the "Enabling Plexus" steps from the previous section, but **do not disable NATS clustering on this instance.** - * Stop the instance and perform the upgrade. - * Start the instance. This node should now be running both Plexus and NATS. + - Choose one node to be the bridge node. + - On this node, follow the "Enabling Plexus" steps from the previous section, but **do not disable NATS clustering on this instance.** + - Stop the instance and perform the upgrade. + - Start the instance. This node should now be running both Plexus and NATS. 2. Upgrade a node: - * Choose a node that needs upgrading and enable Plexus by following the "Enable Plexus" steps. - * Disable NATS by setting `clustering.enabled` to `false`. - * Stop the instance and upgrade it. - * Start the instance. - * Call [`add_node`](../developers/operations-api/clustering.md#add-node) on the upgraded instance. In this call, omit `subscriptions` so that a fully replicating cluster is built. The target node for this call should be the bridge node. _Note: depending on your setup, you may need to expand this `add_node` call to include_ [_authorization and/or tls information_](../developers/operations-api/clustering.md#add-node)_._ + - Choose a node that needs upgrading and enable Plexus by following the "Enable Plexus" steps. + - Disable NATS by setting `clustering.enabled` to `false`. + - Stop the instance and upgrade it. + - Start the instance. + - Call [`add_node`](../developers/operations-api/clustering.md#add-node) on the upgraded instance. In this call, omit `subscriptions` so that a fully replicating cluster is built. The target node for this call should be the bridge node. _Note: depending on your setup, you may need to expand this `add_node` call to include_ [_authorization and/or tls information_](../developers/operations-api/clustering.md#add-node)_._ ```json { - "operation": "add_node", - "hostname:": "node-1", - "url": "wss://my-cluster-node-1:9925" + "operation": "add_node", + "hostname:": "node-1", + "url": "wss://my-cluster-node-1:9925" } ``` diff --git a/docs/developers/applications/README.md b/docs/developers/applications/README.md index fe504038..e463ec12 100644 --- a/docs/developers/applications/README.md +++ b/docs/developers/applications/README.md @@ -22,46 +22,45 @@ Extensions can also depend on other extensions. For example, the [`@harperdb/apo ```mermaid flowchart TD - subgraph Applications - direction TB - NextJSApp["Next.js App"] - ApolloApp["Apollo App"] - CustomResource["Custom Resource"] - end - - subgraph Extensions - direction TB - subgraph Custom - NextjsExt["@harperdb/nextjs"] - ApolloExt["@harperdb/apollo"] + subgraph Applications + direction TB + NextJSApp["Next.js App"] + ApolloApp["Apollo App"] + CustomResource["Custom Resource"] end - subgraph Built-In - GraphqlSchema["graphqlSchema"] - JsResource["jsResource"] - Rest["rest"] - end - end - subgraph Core - direction TB - Database["database"] - FileSystem["file-system"] - Networking["networking"] - end + subgraph Extensions + direction TB + subgraph Custom + NextjsExt["@harperdb/nextjs"] + ApolloExt["@harperdb/apollo"] + end + subgraph Built-In + GraphqlSchema["graphqlSchema"] + JsResource["jsResource"] + Rest["rest"] + end + end - NextJSApp --> NextjsExt - ApolloApp --> ApolloExt - CustomResource --> JsResource & GraphqlSchema & Rest + subgraph Core + direction TB + Database["database"] + FileSystem["file-system"] + Networking["networking"] + end - NextjsExt --> Networking - NextjsExt --> FileSystem - ApolloExt --> GraphqlSchema - ApolloExt --> Networking + NextJSApp --> NextjsExt + ApolloApp --> ApolloExt + CustomResource --> JsResource & GraphqlSchema & Rest - GraphqlSchema --> Database - JsResource --> Database - Rest --> Networking + NextjsExt --> Networking + NextjsExt --> FileSystem + ApolloExt --> GraphqlSchema + ApolloExt --> Networking + GraphqlSchema --> Database + JsResource --> Database + Rest --> Networking ``` > As of Harper v4.6, a new, **experimental** component system has been introduced called **plugins**. Plugins are a **new iteration of the existing extension system**. They are simultaneously a simplification and an extensibility upgrade. Instead of defining multiple methods (`start` vs `startOnMainThread`, `handleFile` vs `setupFile`, `handleDirectory` vs `setupDirectory`), plugins only have to define a single `handleComponent` method. Plugins are **experimental**, and complete documentation is available on the [plugin API](../../technical-details/reference/components/plugins.md) page. In time we plan to deprecate the concept of extensions in favor of plugins, but for now, both are supported. @@ -92,7 +91,7 @@ export class DogWithHumanAge extends Dog { const record = await super.get(target); return { ...record, // include all properties from the record - humanAge: 15 + record.age * 5 // silly calculation of human age equivalent + humanAge: 15 + record.age * 5, // silly calculation of human age equivalent }; } } @@ -131,7 +130,7 @@ export class DogWithBreed extends Dog { let breedDescription = await Breed.get(record.breed); return { ...record, - breedDescription + breedDescription, }; } } @@ -168,7 +167,7 @@ export class CustomDog extends Dog { // if we want to skip the default permission checks, we can turn off checkPermissions: target.checkPermissions = false; const record = this.update(target); - // and do our own/custom permission check: + // and do our own/custom permission check: if (record.owner !== context.user?.username) { throw new Error('Can not update this record'); } @@ -196,7 +195,8 @@ We can also directly implement the Resource class and use it to create new data ```javascript const { Breed } = tables; // our Breed table -class BreedSource extends Resource { // define a data source +class BreedSource extends Resource { + // define a data source async get(target) { return (await fetch(`http://best-dog-site.com/${target}`)).json(); } diff --git a/docs/developers/applications/caching.md b/docs/developers/applications/caching.md index b5835669..2c733583 100644 --- a/docs/developers/applications/caching.md +++ b/docs/developers/applications/caching.md @@ -17,9 +17,10 @@ You may also note that we can define a time-to-live (TTL) expiration on the tabl While you can provide a single expiration time, there are actually several expiration timings that are potentially relevant, and can be independently configured. These settings are available as directive properties on the table configuration (like `expiration` above): stale expiration: The point when a request for a record should trigger a request to origin (but might possibly return the current stale record depending on policy) must-revalidate expiration: The point when a request for a record must make a request to origin first and return the latest value from origin. eviction expiration: The point when a record is actually removed from the caching table. You can provide a single expiration and it defines the behavior for all three. You can also provide three settings for expiration, through table directives: -* expiration - The amount of time until a record goes stale. -* eviction - The amount of time after expiration before a record can be evicted (defaults to zero). -* scanInterval - The interval for scanning for expired records (defaults to one quarter of the total of expiration and eviction). + +- expiration - The amount of time until a record goes stale. +- eviction - The amount of time after expiration before a record can be evicted (defaults to zero). +- scanInterval - The interval for scanning for expired records (defaults to one quarter of the total of expiration and eviction). ## Define External Data Source @@ -50,7 +51,6 @@ flowchart TD Resource-->API(Remote Data Source API) ``` - Harper handles waiting for an existing cache resolution to finish and uses its result. This prevents a "cache stampede" when entries expire, ensuring that multiple requests to a cache entry will all wait on a single request to the data source. Cache tables with an expiration are periodically pruned for expired entries. Because this is done periodically, there is usually some amount of time between when a record has expired and when the record is actually evicted (the cached data is removed). But when a record is checked for availability, the expiration time is used to determine if the record is fresh (and the cache entry can be used). @@ -106,7 +106,8 @@ One way to provide more active caching is to specifically invalidate individual const { MyTable } = tables; export class MyTableEndpoint extends MyTable { async post(data) { - if (data.invalidate) // use this flag as a marker + if (data.invalidate) + // use this flag as a marker this.invalidate(); } } @@ -139,20 +140,20 @@ class ThirdPartyAPI extends Resource { Notification events should always include an `id` property to indicate the primary key of the updated record. The event should have a `value` property for `put` and `message` event types. The `timestamp` is optional and can be used to indicate the exact timestamp of the change. The following event `type`s are supported: -* `put` - This indicates that the record has been updated and provides the new value of the record. -* `invalidate` - Alternately, you can notify with an event type of `invalidate` to indicate that the data has changed, but without the overhead of actually sending the data (the `value` property is not needed), so the data only needs to be sent if and when the data is requested through the cache. An `invalidate` will evict the entry and update the timestamp to indicate that there is new data that should be requested (if needed). -* `delete` - This indicates that the record has been deleted. -* `message` - This indicates a message is being passed through the record. The record value has not changed, but this is used for [publish/subscribe messaging](../real-time.md). -* `transaction` - This indicates that there are multiple writes that should be treated as a single atomic transaction. These writes should be included as an array of data notification events in the `writes` property. +- `put` - This indicates that the record has been updated and provides the new value of the record. +- `invalidate` - Alternately, you can notify with an event type of `invalidate` to indicate that the data has changed, but without the overhead of actually sending the data (the `value` property is not needed), so the data only needs to be sent if and when the data is requested through the cache. An `invalidate` will evict the entry and update the timestamp to indicate that there is new data that should be requested (if needed). +- `delete` - This indicates that the record has been deleted. +- `message` - This indicates a message is being passed through the record. The record value has not changed, but this is used for [publish/subscribe messaging](../real-time.md). +- `transaction` - This indicates that there are multiple writes that should be treated as a single atomic transaction. These writes should be included as an array of data notification events in the `writes` property. And the following properties can be defined on event objects: -* `type`: The event type as described above. -* `id`: The primary key of the record that updated -* `value`: The new value of the record that updated (for put and message) -* `writes`: An array of event properties that are part of a transaction (used in conjunction with the transaction event type). -* `table`: The name of the table with the record that was updated. This can be used with events within a transaction to specify events across multiple tables. -* `timestamp`: The timestamp of when the data change occurred +- `type`: The event type as described above. +- `id`: The primary key of the record that updated +- `value`: The new value of the record that updated (for put and message) +- `writes`: An array of event properties that are part of a transaction (used in conjunction with the transaction event type). +- `table`: The name of the table with the record that was updated. This can be used with events within a transaction to specify events across multiple tables. +- `timestamp`: The timestamp of when the data change occurred With an active external data source with a `subscribe` method, the data source will proactively notify the cache, ensuring a fresh and efficient active cache. Note that with an active data source, we still use the `sourcedFrom` method to register the source for a caching table, and the table will automatically detect and call the subscribe method on the data source. @@ -214,10 +215,10 @@ When you are using a caching table, it is important to remember that any resourc ```javascript class MyCache extends tables.MyCache { async post(data) { - // if the data is not cached locally, retrieves from source: - await this.ensuredLoaded(); - // now we can be sure that the data is loaded, and can access properties - this.quantity = this.quantity - data.purchases; + // if the data is not cached locally, retrieves from source: + await this.ensuredLoaded(); + // now we can be sure that the data is loaded, and can access properties + this.quantity = this.quantity - data.purchases; } } ``` @@ -264,21 +265,24 @@ Cache-Control: only-if-cached, no-store You may also use the `stale-if-error` to indicate if it is acceptable to return a stale cached resource when the data source returns an error (network connection error, 500, 502, 503, or 504). The `must-revalidate` directive can indicate a stale cached resource can not be returned, even when the data source has an error (by default a stale cached resource is returned when there is a network connection error). - ## Caching Flow + It may be helpful to understand the flow of a cache request. When a request is made to a caching table: -* Harper will first create a resource instance to handle the process, and ensure that the data is loaded for the resource instance. To do this, it will first check if the record is in the table/cache. - * If the record is not in the cache, Harper will first check if there is a current request to get the record from the source. If there is, Harper will wait for the request to complete and return the record from the cache. - * If not, Harper will call the `get()` method on the source to retrieve the record. The record will then be stored in the cache. - * If the record is in the cache, Harper will check if the record is stale. If the record is not stale, Harper will immediately return the record from the cache. If the record is stale, Harper will call the `get()` method on the source to retrieve the record. - * The record will then be stored in the cache. This will write the record to the cache in a separate asynchronous/background write-behind transaction, so it does not block the current request, then return the data immediately once it has it. -* The `get()` method will be called on the resource instance to return the record to the client (or perform any querying on the record). If this is overriden, the method will be called at this time. + +- Harper will first create a resource instance to handle the process, and ensure that the data is loaded for the resource instance. To do this, it will first check if the record is in the table/cache. + - If the record is not in the cache, Harper will first check if there is a current request to get the record from the source. If there is, Harper will wait for the request to complete and return the record from the cache. + - If not, Harper will call the `get()` method on the source to retrieve the record. The record will then be stored in the cache. + - If the record is in the cache, Harper will check if the record is stale. If the record is not stale, Harper will immediately return the record from the cache. If the record is stale, Harper will call the `get()` method on the source to retrieve the record. + - The record will then be stored in the cache. This will write the record to the cache in a separate asynchronous/background write-behind transaction, so it does not block the current request, then return the data immediately once it has it. +- The `get()` method will be called on the resource instance to return the record to the client (or perform any querying on the record). If this is overriden, the method will be called at this time. ### Caching Flow with Write-Through + When a writes are performed on a caching table (in `put()` or `post()` method, for example), the flow is slightly different: -* Harper will have first created a resource instance to handle the process, and this resource instance that will be the current `this` for a call to `put()` or `post()`. -* If a `put()` or `update()` is called, for example, this action will be record in the current transaction. -* Once the transaction is committed (which is done automatically as the request handler completes), the transaction write will be sent to the source to update the data. - * The local writes will wait for the source to confirm the writes have completed (note that this effectively allows you to perform a two-phase transactional write to the source, and the source can confirm the writes have completed before the transaction is committed locally). - * The transaction writes will then be written the local caching table. -* The transaction handler will wait for the local commit to be written, then the transaction will be resolved and a response will be sent to the client. + +- Harper will have first created a resource instance to handle the process, and this resource instance that will be the current `this` for a call to `put()` or `post()`. +- If a `put()` or `update()` is called, for example, this action will be record in the current transaction. +- Once the transaction is committed (which is done automatically as the request handler completes), the transaction write will be sent to the source to update the data. + - The local writes will wait for the source to confirm the writes have completed (note that this effectively allows you to perform a two-phase transactional write to the source, and the source can confirm the writes have completed before the transaction is committed locally). + - The transaction writes will then be written the local caching table. +- The transaction handler will wait for the local commit to be written, then the transaction will be resolved and a response will be sent to the client. diff --git a/docs/developers/applications/debugging.md b/docs/developers/applications/debugging.md index 36be49c1..d407e0a3 100644 --- a/docs/developers/applications/debugging.md +++ b/docs/developers/applications/debugging.md @@ -14,13 +14,13 @@ For local debugging and development, it is recommended that you use standard con Harper Logger Functions -* `trace(message)`: Write a 'trace' level log, if the configured level allows for it. -* `debug(message)`: Write a 'debug' level log, if the configured level allows for it. -* `info(message)`: Write a 'info' level log, if the configured level allows for it. -* `warn(message)`: Write a 'warn' level log, if the configured level allows for it. -* `error(message)`: Write a 'error' level log, if the configured level allows for it. -* `fatal(message)`: Write a 'fatal' level log, if the configured level allows for it. -* `notify(message)`: Write a 'notify' level log. +- `trace(message)`: Write a 'trace' level log, if the configured level allows for it. +- `debug(message)`: Write a 'debug' level log, if the configured level allows for it. +- `info(message)`: Write a 'info' level log, if the configured level allows for it. +- `warn(message)`: Write a 'warn' level log, if the configured level allows for it. +- `error(message)`: Write a 'error' level log, if the configured level allows for it. +- `fatal(message)`: Write a 'fatal' level log, if the configured level allows for it. +- `notify(message)`: Write a 'notify' level log. For example, you can log a warning: diff --git a/docs/developers/applications/define-routes.md b/docs/developers/applications/define-routes.md index 817e9880..454755b6 100644 --- a/docs/developers/applications/define-routes.md +++ b/docs/developers/applications/define-routes.md @@ -12,23 +12,23 @@ fastifyRoutes: # This loads files that define fastify routes using fastify's aut By default, route URLs are configured to be: -* \[**Instance URL**]:\[**HTTP Port**]/\[**Project Name**]/\[**Route URL**] +- \[**Instance URL**]:\[**HTTP Port**]/\[**Project Name**]/\[**Route URL**] However, you can specify the path to be `/` if you wish to have your routes handling the root path of incoming URLs. -* The route below, using the default config, within the **dogs** project, with a route of **breeds** would be available at **http://localhost:9926/dogs/breeds**. +- The route below, using the default config, within the **dogs** project, with a route of **breeds** would be available at **http://localhost:9926/dogs/breeds**. In effect, this route is just a pass-through to Harper. The same result could have been achieved by hitting the core Harper API, since it uses **hdbCore.preValidation** and **hdbCore.request**, which are defined in the “helper methods” section, below. ```javascript export default async (server, { hdbCore, logger }) => { - server.route({ - url: '/', - method: 'POST', - preValidation: hdbCore.preValidation, - handler: hdbCore.request, - }) -} + server.route({ + url: '/', + method: 'POST', + preValidation: hdbCore.preValidation, + handler: hdbCore.request, + }); +}; ``` ## Custom Handlers @@ -65,20 +65,20 @@ Below is an example of a route that uses a custom validation hook: import customValidation from '../helpers/customValidation'; export default async (server, { hdbCore, logger }) => { - server.route({ - url: '/:id', - method: 'GET', - preValidation: (request) => customValidation(request, logger), - handler: (request) => { - request.body= { - operation: 'sql', - sql: `SELECT * FROM dev.dog WHERE id = ${request.params.id}` - }; - - return hdbCore.requestWithoutAuthentication(request); - } - }); -} + server.route({ + url: '/:id', + method: 'GET', + preValidation: (request) => customValidation(request, logger), + handler: (request) => { + request.body = { + operation: 'sql', + sql: `SELECT * FROM dev.dog WHERE id = ${request.params.id}`, + }; + + return hdbCore.requestWithoutAuthentication(request); + }, + }); +}; ``` Notice we imported customValidation from the **helpers** directory. To include a helper, and to see the actual code within customValidation, see [Helper Methods](define-routes.md#helper-methods). @@ -91,24 +91,25 @@ When declaring routes, you are given access to 2 helper methods: hdbCore and log hdbCore contains three functions that allow you to authenticate an inbound request, and execute operations against Harper directly, by passing the standard Operations API. -* **preValidation** +- **preValidation** + + This is an array of functions used for fastify authentication. The second function takes the authorization header from the inbound request and executes the same authentication as the standard Harper Operations API (for example, `hdbCore.preValidation[1](req, resp, callback)`). It will determine if the user exists, and if they are allowed to perform this operation. **If you use the request method, you have to use preValidation to get the authenticated user**. - This is an array of functions used for fastify authentication. The second function takes the authorization header from the inbound request and executes the same authentication as the standard Harper Operations API (for example, `hdbCore.preValidation[1](req, resp, callback)`). It will determine if the user exists, and if they are allowed to perform this operation. **If you use the request method, you have to use preValidation to get the authenticated user**. -* **request** +- **request** - This will execute a request with Harper using the operations API. The `request.body` should contain a standard Harper operation and must also include the `hdb_user` property that was in `request.body` provided in the callback. -* **requestWithoutAuthentication** + This will execute a request with Harper using the operations API. The `request.body` should contain a standard Harper operation and must also include the `hdb_user` property that was in `request.body` provided in the callback. - Executes a request against Harper without any security checks around whether the inbound user is allowed to make this request. For security purposes, you should always take the following precautions when using this method: +- **requestWithoutAuthentication** - * Properly handle user-submitted values, including url params. User-submitted values should only be used for `search_value` and for defining values in records. Special care should be taken to properly escape any values if user-submitted values are used for SQL. + Executes a request against Harper without any security checks around whether the inbound user is allowed to make this request. For security purposes, you should always take the following precautions when using this method: + - Properly handle user-submitted values, including url params. User-submitted values should only be used for `search_value` and for defining values in records. Special care should be taken to properly escape any values if user-submitted values are used for SQL. **logger** This helper allows you to write directly to the log file, hdb.log. It’s useful for debugging during development, although you may also use the console logger. There are 5 functions contained within logger, each of which pertains to a different **logging.level** configuration in your harperdb-config.yaml file. -* logger.trace(‘Starting the handler for /dogs’) -* logger.debug(‘This should only fire once’) -* logger.warn(‘This should never ever fire’) -* logger.error(‘This did not go well’) -* logger.fatal(‘This did not go very well at all’) +- logger.trace(‘Starting the handler for /dogs’) +- logger.debug(‘This should only fire once’) +- logger.warn(‘This should never ever fire’) +- logger.error(‘This did not go well’) +- logger.fatal(‘This did not go very well at all’) diff --git a/docs/developers/applications/defining-roles.md b/docs/developers/applications/defining-roles.md index b7faa4be..075c02eb 100644 --- a/docs/developers/applications/defining-roles.md +++ b/docs/developers/applications/defining-roles.md @@ -4,7 +4,9 @@ In addition to [defining a database schema](./defining-schemas.md), you can also roles: files: roles.yaml ``` + Now you can create a roles.yaml in your application directory: + ```yaml declared-role: super_user: false # This is a boolean value that indicates if the role is a super user or not @@ -28,12 +30,13 @@ declared-role: With this in place, where Harper starts up, it will create the roles in the roles.yaml file if they do not already exist. If they do exist, it will update the roles with the new permissions. This allows you to manage your roles in your application code and have them automatically created or updated when the application starts. The structure of the roles.yaml file is: + ```yaml : permission: # contains the permissions for the role, this structure is optional, and you can place flags like super_user here as a shortcut super_user: : # each database with permissions can be added as named properties on the role - tables: # this structure is optional, and table names can be placed directly under the database as a shortcut + tables: # this structure is optional, and table names can be placed directly under the database as a shortcut : read: # indicates if the role has read permission to this table insert: # indicates if the role has insert permission to this table @@ -44,4 +47,4 @@ The structure of the roles.yaml file is: read: insert: update: -``` \ No newline at end of file +``` diff --git a/docs/developers/applications/defining-schemas.md b/docs/developers/applications/defining-schemas.md index 65fd2714..dae7d705 100644 --- a/docs/developers/applications/defining-schemas.md +++ b/docs/developers/applications/defining-schemas.md @@ -36,10 +36,10 @@ type TableName @table By default the table name is inherited from the type name (in this case the table name would be "TableName"). The `@table` directive supports several optional arguments (all of these are optional and can be freely combined): -* `@table(table: "table_name")` - This allows you to explicitly specify the table name. -* `@table(database: "database_name")` - This allows you to specify which database the table belongs to. This defaults to the "data" database. -* `@table(expiration: 3600)` - Sets an expiration time on entries in the table before they are automatically cleared (primarily useful for caching tables). This is specified in seconds. -* `@table(audit: true)` - This enables the audit log for the table so that a history of record changes are recorded. This defaults to [configuration file's setting for `auditLog`](../../deployments/configuration.md#logging). +- `@table(table: "table_name")` - This allows you to explicitly specify the table name. +- `@table(database: "database_name")` - This allows you to specify which database the table belongs to. This defaults to the "data" database. +- `@table(expiration: 3600)` - Sets an expiration time on entries in the table before they are automatically cleared (primarily useful for caching tables). This is specified in seconds. +- `@table(audit: true)` - This enables the audit log for the table so that a history of record changes are recorded. This defaults to [configuration file's setting for `auditLog`](../../deployments/configuration.md#logging). Database naming: the default "data" database is generally a good default choice for tables in applications that will not be reused in other applications (and don't need to worry about staying in a separate namespace). Application with many tables may wish to organize the tables into separate databases (but remember that transactions do not preserve atomicity across different databases, only across tables in the same database). For components that are designed for re-use, it is recommended that you use a database name that is specific to the component (e.g. "my-component-data") to avoid name collisions with other components. @@ -132,7 +132,7 @@ type Product @table { ```javascript tables.Product.setComputedAttribute('totalPrice', (record) => { - return record.price + (record.price * record.taxRate); + return record.price + record.price * record.taxRate; }); ``` @@ -167,7 +167,7 @@ The field directives can be used for information about each attribute in table t #### `@primaryKey` -The `@primaryKey` directive specifies that an attribute is the primary key for a table. These must be unique and when records are created, this will be auto-generated if no primary key is provided. When a primary key is auto-generated, it will be a UUID (as a string) if the primary key type is `String` or `ID`. If the primary key type is `Int`, `Long`, or `Any`, then the primary key will be an auto-incremented number. Using numeric primary keys is more efficient than using UUIDs. Note that if the type is `Int`, the primary key will be limited to 32-bit, which can be limiting and problematic for large tables. It is recommended that if you will be relying on auto-generated keys, that you use a primary key type of `Long` or `Any` (the latter will allow you to also use strings as primary keys). +The `@primaryKey` directive specifies that an attribute is the primary key for a table. These must be unique and when records are created, this will be auto-generated if no primary key is provided. When a primary key is auto-generated, it will be a UUID (as a string) if the primary key type is `String` or `ID`. If the primary key type is `Int`, `Long`, or `Any`, then the primary key will be an auto-incremented number. Using numeric primary keys is more efficient than using UUIDs. Note that if the type is `Int`, the primary key will be limited to 32-bit, which can be limiting and problematic for large tables. It is recommended that if you will be relying on auto-generated keys, that you use a primary key type of `Long` or `Any` (the latter will allow you to also use strings as primary keys). #### `@indexed` @@ -178,6 +178,7 @@ A standard index will index the values in each field, so you can query directly #### Vector Indexing The `@indexed` directive can also specify a `type`. To use vector indexing, you can specify the `type` as `HNSW` for Hierarchical Navigable Small World indexing. This will create a vector index for the attribute. For example: + ```graphql type Product @table { id: Long @primaryKey @@ -186,34 +187,39 @@ type Product @table { ``` HNSW indexing finds the nearest neighbors to a search vector. To use this, you can query with a `sort` parameter, for example: + ```javascript let results = Product.search({ - sort: { attribute: 'textEmbeddings', target: searchVector }, - limit: 5 // get the five nearest neighbors -}) + sort: { attribute: 'textEmbeddings', target: searchVector }, + limit: 5, // get the five nearest neighbors +}); ``` + This can be used in combination with other conditions as well, for example: + ```javascript let results = Product.search({ - conditions: [{ attribute: 'price', comparator: 'lt', value: 50 }], - sort: { attribute: 'textEmbeddings', target: searchVector }, - limit: 5 // get the five nearest neighbors -}) + conditions: [{ attribute: 'price', comparator: 'lt', value: 50 }], + sort: { attribute: 'textEmbeddings', target: searchVector }, + limit: 5, // get the five nearest neighbors +}); ``` HNSW supports several additional arguments to the `@indexed` directive to adjust the HNSW parameters: -* `distance` - Define the distance function. This can be set to 'euclidean' or 'cosine' (uses negative of cosine similarity). The default is cosine. -* `efConstruction` - Maximum number of nodes to keep in the list for finding nearest neighbors. A higher value can yield better recall, and a lower value can have better performance. If `efSearchConstruction` is set, this is only applied to indexing. The default is 100. -* `M` - The preferred number of connections at each layer in the HNSW graph. A higher number uses more space but can be helpful when the intrinsic dimensionality of the data is higher. A lower number can be more efficient. The default is 16. -* `optimizeRouting` - This uses a heuristic to avoid graph connections that match existing indirect connections (connections through another node). This can yield more efficient graph traversals for the same M setting. This is a number between 0 and 1 and a higher value will more aggressively omit connections with alternate paths. Setting this to 0 will disable route optimizing and follow the traditional HNSW algorithm for creating connections. The default is 0.5. -* `mL` - The normalization factor for level generation, by default this is computed from `M`. -* `efSearchConstruction` - Maximum number of nodes to keep in the list for finding nearest neighbors for searching. The default is 50. - + +- `distance` - Define the distance function. This can be set to 'euclidean' or 'cosine' (uses negative of cosine similarity). The default is cosine. +- `efConstruction` - Maximum number of nodes to keep in the list for finding nearest neighbors. A higher value can yield better recall, and a lower value can have better performance. If `efSearchConstruction` is set, this is only applied to indexing. The default is 100. +- `M` - The preferred number of connections at each layer in the HNSW graph. A higher number uses more space but can be helpful when the intrinsic dimensionality of the data is higher. A lower number can be more efficient. The default is 16. +- `optimizeRouting` - This uses a heuristic to avoid graph connections that match existing indirect connections (connections through another node). This can yield more efficient graph traversals for the same M setting. This is a number between 0 and 1 and a higher value will more aggressively omit connections with alternate paths. Setting this to 0 will disable route optimizing and follow the traditional HNSW algorithm for creating connections. The default is 0.5. +- `mL` - The normalization factor for level generation, by default this is computed from `M`. +- `efSearchConstruction` - Maximum number of nodes to keep in the list for finding nearest neighbors for searching. The default is 50. + For exmpale + ```graphql type Product @table { - id: Long @primaryKey - textEmbeddings: [Float] @indexed(type: "HNSW", distance: "euclidean", optimizeRouting: 0, efSearchConstruction: 100) + id: Long @primaryKey + textEmbeddings: [Float] @indexed(type: "HNSW", distance: "euclidean", optimizeRouting: 0, efSearchConstruction: 100) } ``` @@ -237,17 +243,17 @@ If you do not define a schema for a table and create a table through the operati Harper supports the following field types in addition to user defined (object) types: -* `String`: String/text -* `Int`: A 32-bit signed integer (from -2147483648 to 2147483647) -* `Long`: A 54-bit signed integer (from -9007199254740992 to 9007199254740992) -* `Float`: Any number (any number that can be represented as a [64-bit double precision floating point number](https://en.wikipedia.org/wiki/Double-precision\_floating-point\_format). Note that all numbers are stored in the most compact representation available) -* `BigInt`: Any integer (negative or positive) with less than 300 digits (Note that `BigInt` is a distinct and separate type from standard numbers in JavaScript, so custom code should handle this type appropriately) -* `Boolean`: true or false -* `ID`: A string (but indicates it is not intended to be human readable) -* `Any`: Any primitive, object, or array is allowed -* `Date`: A Date object -* `Bytes`: Binary data as a Buffer or Uint8Array -* `Blob`: Binary data as a [Blob](../../technical-details/reference/blob.md), designed for large blocks of data that can be streamed. It is recommend that you use this for binary data that will typically be larger than 20KB. +- `String`: String/text +- `Int`: A 32-bit signed integer (from -2147483648 to 2147483647) +- `Long`: A 54-bit signed integer (from -9007199254740992 to 9007199254740992) +- `Float`: Any number (any number that can be represented as a [64-bit double precision floating point number](https://en.wikipedia.org/wiki/Double-precision_floating-point_format). Note that all numbers are stored in the most compact representation available) +- `BigInt`: Any integer (negative or positive) with less than 300 digits (Note that `BigInt` is a distinct and separate type from standard numbers in JavaScript, so custom code should handle this type appropriately) +- `Boolean`: true or false +- `ID`: A string (but indicates it is not intended to be human readable) +- `Any`: Any primitive, object, or array is allowed +- `Date`: A Date object +- `Bytes`: Binary data as a Buffer or Uint8Array +- `Blob`: Binary data as a [Blob](../../technical-details/reference/blob.md), designed for large blocks of data that can be streamed. It is recommend that you use this for binary data that will typically be larger than 20KB. #### Renaming Tables diff --git a/docs/developers/applications/example-projects.md b/docs/developers/applications/example-projects.md index f0ece219..7e99e459 100644 --- a/docs/developers/applications/example-projects.md +++ b/docs/developers/applications/example-projects.md @@ -2,32 +2,32 @@ **Library of example Harper applications and components:** -* [Authorization in Harper using Okta Customer Identity Cloud](https://www.harperdb.io/post/authorization-in-harperdb-using-okta-customer-identity-cloud), by Yitaek Hwang +- [Authorization in Harper using Okta Customer Identity Cloud](https://www.harperdb.io/post/authorization-in-harperdb-using-okta-customer-identity-cloud), by Yitaek Hwang -* [How to Speed Up your Applications by Caching at the Edge with Harper](https://dev.to/doabledanny/how-to-speed-up-your-applications-by-caching-at-the-edge-with-harperdb-3o2l), by Danny Adams +- [How to Speed Up your Applications by Caching at the Edge with Harper](https://dev.to/doabledanny/how-to-speed-up-your-applications-by-caching-at-the-edge-with-harperdb-3o2l), by Danny Adams -* [OAuth Authentication in Harper using Auth0 & Node.js](https://www.harperdb.io/post/oauth-authentication-in-harperdb-using-auth0-and-node-js), by Lucas Santos +- [OAuth Authentication in Harper using Auth0 & Node.js](https://www.harperdb.io/post/oauth-authentication-in-harperdb-using-auth0-and-node-js), by Lucas Santos -* [How To Create a CRUD API with Next.js & Harper Custom Functions](https://www.harperdb.io/post/create-a-crud-api-w-next-js-harperdb), by Colby Fayock +- [How To Create a CRUD API with Next.js & Harper Custom Functions](https://www.harperdb.io/post/create-a-crud-api-w-next-js-harperdb), by Colby Fayock -* [Build a Dynamic REST API with Custom Functions](https://harperdb.io/blog/build-a-dynamic-rest-api-with-custom-functions/), by Terra Roush +- [Build a Dynamic REST API with Custom Functions](https://harperdb.io/blog/build-a-dynamic-rest-api-with-custom-functions/), by Terra Roush -* [How to use Harper Custom Functions to Build your Entire Backend](https://dev.to/andrewbaisden/how-to-use-harperdb-custom-functions-to-build-your-entire-backend-a2m), by Andrew Baisden +- [How to use Harper Custom Functions to Build your Entire Backend](https://dev.to/andrewbaisden/how-to-use-harperdb-custom-functions-to-build-your-entire-backend-a2m), by Andrew Baisden -* [Using TensorFlowJS & Harper Custom Functions for Machine Learning](https://harperdb.io/blog/using-tensorflowjs-harperdb-for-machine-learning/), by Kevin Ashcraft +- [Using TensorFlowJS & Harper Custom Functions for Machine Learning](https://harperdb.io/blog/using-tensorflowjs-harperdb-for-machine-learning/), by Kevin Ashcraft -* [Build & Deploy a Fitness App with Python & Harper](https://www.youtube.com/watch?v=KMkmA4i2FQc), by Patrick Löber +- [Build & Deploy a Fitness App with Python & Harper](https://www.youtube.com/watch?v=KMkmA4i2FQc), by Patrick Löber -* [Create a Discord Slash Bot using Harper Custom Functions](https://geekysrm.hashnode.dev/discord-slash-bot-with-harperdb-custom-functions), by Soumya Ranjan Mohanty +- [Create a Discord Slash Bot using Harper Custom Functions](https://geekysrm.hashnode.dev/discord-slash-bot-with-harperdb-custom-functions), by Soumya Ranjan Mohanty -* [How I used Harper Custom Functions to Build a Web App for my Newsletter](https://blog.hrithwik.me/how-i-used-harperdb-custom-functions-to-build-a-web-app-for-my-newsletter), by Hrithwik Bharadwaj +- [How I used Harper Custom Functions to Build a Web App for my Newsletter](https://blog.hrithwik.me/how-i-used-harperdb-custom-functions-to-build-a-web-app-for-my-newsletter), by Hrithwik Bharadwaj -* [How I used Harper Custom Functions and Recharts to create Dashboard](https://blog.greenroots.info/how-to-create-dashboard-with-harperdb-custom-functions-and-recharts), by Tapas Adhikary +- [How I used Harper Custom Functions and Recharts to create Dashboard](https://blog.greenroots.info/how-to-create-dashboard-with-harperdb-custom-functions-and-recharts), by Tapas Adhikary -* [How To Use Harper Custom Functions With Your React App](https://dev.to/tyaga001/how-to-use-harperdb-custom-functions-with-your-react-app-2c43), by Ankur Tyagi +- [How To Use Harper Custom Functions With Your React App](https://dev.to/tyaga001/how-to-use-harperdb-custom-functions-with-your-react-app-2c43), by Ankur Tyagi -* [Build a Web App Using Harper’s Custom Functions](https://www.youtube.com/watch?v=rz6prItVJZU), livestream by Jaxon Repp +- [Build a Web App Using Harper’s Custom Functions](https://www.youtube.com/watch?v=rz6prItVJZU), livestream by Jaxon Repp -* [How to Web Scrape Using Python, Snscrape & Custom Functions](https://hackernoon.com/how-to-web-scrape-using-python-snscrape-and-harperdb), by Davis David +- [How to Web Scrape Using Python, Snscrape & Custom Functions](https://hackernoon.com/how-to-web-scrape-using-python-snscrape-and-harperdb), by Davis David -* [What’s the Big Deal w/ Custom Functions](https://rss.com/podcasts/harperdb-select-star/278933/), Select* Podcast \ No newline at end of file +- [What’s the Big Deal w/ Custom Functions](https://rss.com/podcasts/harperdb-select-star/278933/), Select\* Podcast diff --git a/docs/developers/applications/web-applications.md b/docs/developers/applications/web-applications.md index c2fde73e..f6d7798b 100644 --- a/docs/developers/applications/web-applications.md +++ b/docs/developers/applications/web-applications.md @@ -56,4 +56,4 @@ Harper includes built-in support for Cross-Origin Resource Sharing (CORS), which Make sure to check out our developer videos too: - [Next.js on Harper | Step-by-Step Guide for Next Level Next.js Performance](https://youtu.be/GqLEwteFJYY) -- [Server-side Rendering (SSR) with Multi-Tier Cache Demo](https://youtu.be/L-tnBNhO9Fc) \ No newline at end of file +- [Server-side Rendering (SSR) with Multi-Tier Cache Demo](https://youtu.be/L-tnBNhO9Fc) diff --git a/docs/developers/clustering/README.md b/docs/developers/clustering/README.md index 5cc3ed59..a92ded99 100644 --- a/docs/developers/clustering/README.md +++ b/docs/developers/clustering/README.md @@ -4,24 +4,24 @@ Harper 4.0 - 4.3 used a clustering system based on NATS for replication. In 4.4+ Harper’s clustering engine replicates data between instances of Harper using a highly performant, bi-directional pub/sub model on a per-table basis. Data replicates asynchronously with eventual consistency across the cluster following the defined pub/sub configuration. Individual transactions are sent in the order in which they were transacted, once received by the destination instance, they are processed in an ACID-compliant manner. Conflict resolution follows a last writer wins model based on recorded transaction time on the transaction and the timestamp on the record on the node. -*** +--- ### Common Use Case A common use case is an edge application collecting and analyzing sensor data that creates an alert if a sensor value exceeds a given threshold: -* The edge application should not be making outbound http requests for security purposes. -* There may not be a reliable network connection. -* Not all sensor data will be sent to the cloud--either because of the unreliable network connection, or maybe it’s just a pain to store it. -* The edge node should be inaccessible from outside the firewall. -* The edge node will send alerts to the cloud with a snippet of sensor data containing the offending sensor readings. +- The edge application should not be making outbound http requests for security purposes. +- There may not be a reliable network connection. +- Not all sensor data will be sent to the cloud--either because of the unreliable network connection, or maybe it’s just a pain to store it. +- The edge node should be inaccessible from outside the firewall. +- The edge node will send alerts to the cloud with a snippet of sensor data containing the offending sensor readings. Harper simplifies the architecture of such an application with its bi-directional, table-level replication: -* The edge instance subscribes to a “thresholds” table on the cloud instance, so the application only makes localhost calls to get the thresholds. -* The application continually pushes sensor data into a “sensor\_data” table via the localhost API, comparing it to the threshold values as it does so. -* When a threshold violation occurs, the application adds a record to the “alerts” table. -* The application appends to that record array “sensor\_data” entries for the 60 seconds (or minutes, or days) leading up to the threshold violation. -* The edge instance publishes the “alerts” table up to the cloud instance. +- The edge instance subscribes to a “thresholds” table on the cloud instance, so the application only makes localhost calls to get the thresholds. +- The application continually pushes sensor data into a “sensor_data” table via the localhost API, comparing it to the threshold values as it does so. +- When a threshold violation occurs, the application adds a record to the “alerts” table. +- The application appends to that record array “sensor_data” entries for the 60 seconds (or minutes, or days) leading up to the threshold violation. +- The edge instance publishes the “alerts” table up to the cloud instance. By letting Harper focus on the fault-tolerant logistics of transporting your data, you get to write less code. By moving data only when and where it’s needed, you lower storage and bandwidth costs. And by restricting your app to only making local calls to Harper, you reduce the overall exposure of your application to outside forces. diff --git a/docs/developers/clustering/certificate-management.md b/docs/developers/clustering/certificate-management.md index 703e4eca..81f0bc48 100644 --- a/docs/developers/clustering/certificate-management.md +++ b/docs/developers/clustering/certificate-management.md @@ -30,7 +30,7 @@ Since these new certificates can be issued with correct CNs, you should set `ins ### Certificate Requirements -* Certificates must have an `Extended Key Usage` that defines both `TLS Web Server Authentication` and `TLS Web Client Authentication` as these certificates will be used to accept connections from other Harper nodes and to make requests to other Harper nodes. Example: +- Certificates must have an `Extended Key Usage` that defines both `TLS Web Server Authentication` and `TLS Web Client Authentication` as these certificates will be used to accept connections from other Harper nodes and to make requests to other Harper nodes. Example: ``` X509v3 Key Usage: critical @@ -39,8 +39,8 @@ X509v3 Extended Key Usage: TLS Web Server Authentication, TLS Web Client Authentication ``` -* If you are using an intermediate CA to issue the certificates, the entire certificate chain (to the root CA) must be included in the `certificateAuthority` file. -* If your certificates expire you will need a way to issue new certificates to the nodes and then restart Harper. If you are using a public CA such as LetsEncrypt, a tool like `certbot` can be used to renew certificates. +- If you are using an intermediate CA to issue the certificates, the entire certificate chain (to the root CA) must be included in the `certificateAuthority` file. +- If your certificates expire you will need a way to issue new certificates to the nodes and then restart Harper. If you are using a public CA such as LetsEncrypt, a tool like `certbot` can be used to renew certificates. ### Certificate Troubleshooting diff --git a/docs/developers/clustering/creating-a-cluster-user.md b/docs/developers/clustering/creating-a-cluster-user.md index babe6150..864989a5 100644 --- a/docs/developers/clustering/creating-a-cluster-user.md +++ b/docs/developers/clustering/creating-a-cluster-user.md @@ -12,11 +12,11 @@ There are multiple ways a `cluster_user` can be created, they are: ```json { - "operation": "add_user", - "role": "cluster_user", - "username": "cluster_account", - "password": "letsCluster123!", - "active": true + "operation": "add_user", + "role": "cluster_user", + "username": "cluster_account", + "password": "letsCluster123!", + "active": true } ``` @@ -26,8 +26,8 @@ This can be done through the API by calling `set_configuration` or by editing th ```json { - "operation": "set_configuration", - "clustering_user": "cluster_account" + "operation": "set_configuration", + "clustering_user": "cluster_account" } ``` diff --git a/docs/developers/clustering/enabling-clustering.md b/docs/developers/clustering/enabling-clustering.md index 27414458..762a9902 100644 --- a/docs/developers/clustering/enabling-clustering.md +++ b/docs/developers/clustering/enabling-clustering.md @@ -19,8 +19,8 @@ _Note: When making any changes to the `harperdb-config.yaml` file Harper must be ```json { - "operation": "set_configuration", - "clustering_enabled": true + "operation": "set_configuration", + "clustering_enabled": true } ``` diff --git a/docs/developers/clustering/establishing-routes.md b/docs/developers/clustering/establishing-routes.md index b2abb43f..8fe628a7 100644 --- a/docs/developers/clustering/establishing-routes.md +++ b/docs/developers/clustering/establishing-routes.md @@ -39,9 +39,9 @@ There are multiple ways to set routes, they are: ```json { - "operation": "cluster_set_routes", - "server": "hub", - "routes":[ {"host": "3.735.184.8", "port": 9932} ] + "operation": "cluster_set_routes", + "server": "hub", + "routes": [{ "host": "3.735.184.8", "port": 9932 }] } ``` @@ -63,7 +63,7 @@ The API also has `cluster_get_routes` for getting all routes in the config and ` ```json { - "operation": "cluster_delete_routes", - "routes":[ {"host": "3.735.184.8", "port": 9932} ] + "operation": "cluster_delete_routes", + "routes": [{ "host": "3.735.184.8", "port": 9932 }] } ``` diff --git a/docs/developers/clustering/managing-subscriptions.md b/docs/developers/clustering/managing-subscriptions.md index bc39c7f7..5d94fb75 100644 --- a/docs/developers/clustering/managing-subscriptions.md +++ b/docs/developers/clustering/managing-subscriptions.md @@ -1,11 +1,13 @@ Tables are replicated when the table is designated as replicating and there is subscription between the nodes. Tables designated as replicating by default, but can be changed by setting `replicate` to `false` in the table definition: + ```graphql type Product @table(replicate: false) { - id: ID! - name: String! + id: ID! + name: String! } ``` + Or in your harperdb-config.yaml, you can set the default replication behavior for databases, and indicate which databases should be replicated by default: @@ -13,22 +15,20 @@ should be replicated by default: replication: databases: data ``` + If a table is not in the list of databases to be replicated, it will not be replicated unless the table is specifically set to replicate: ```graphql type Product @table(replicate: true) { - id: ID! - name: String! + id: ID! + name: String! } ``` -Reading hdb_nodes (what we do _to_ the node, not what the node does). +Reading hdb*nodes (what we do \_to* the node, not what the node does). The subscription can be set to publish, subscribe, or both. - - - # Managing subscriptions Subscriptions can be added, updated, or removed through the API. @@ -39,22 +39,22 @@ To add a single node and create one or more subscriptions use `set_node_replicat ```json { - "operation": "set_node_replication", - "node_name": "Node2", - "subscriptions": [ - { - "database": "data", - "table": "dog", - "publish": false, - "subscribe": true - }, - { - "database": "data", - "table": "chicken", - "publish": true, - "subscribe": true - } - ] + "operation": "set_node_replication", + "node_name": "Node2", + "subscriptions": [ + { + "database": "data", + "table": "dog", + "publish": false, + "subscribe": true + }, + { + "database": "data", + "table": "chicken", + "publish": true, + "subscribe": true + } + ] } ``` @@ -64,16 +64,16 @@ To update one or more subscriptions with a single node you can also use `set_nod ```json { - "operation": "set_node_replication", - "node_name": "Node2", - "subscriptions": [ - { - "schema": "dev", - "table": "dog", - "publish": true, - "subscribe": true - } - ] + "operation": "set_node_replication", + "node_name": "Node2", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ] } ``` @@ -83,37 +83,37 @@ To add or update subscriptions with one or more nodes in one API call use `confi ```json { - "operation": "configure_cluster", - "connections": [ - { - "node_name": "Node2", - "subscriptions": [ - { - "database": "dev", - "table": "chicken", - "publish": false, - "subscribe": true - }, - { - "database": "prod", - "table": "dog", - "publish": true, - "subscribe": true - } - ] - }, - { - "node_name": "Node3", - "subscriptions": [ - { - "database": "dev", - "table": "chicken", - "publish": true, - "subscribe": false - } - ] - } - ] + "operation": "configure_cluster", + "connections": [ + { + "node_name": "Node2", + "subscriptions": [ + { + "database": "dev", + "table": "chicken", + "publish": false, + "subscribe": true + }, + { + "database": "prod", + "table": "dog", + "publish": true, + "subscribe": true + } + ] + }, + { + "node_name": "Node3", + "subscriptions": [ + { + "database": "dev", + "table": "chicken", + "publish": true, + "subscribe": false + } + ] + } + ] } ``` @@ -127,17 +127,17 @@ There is an optional property called `start_time` that can be passed in the subs ```json { - "operation": "set_node_replication", - "node_name": "Node2", - "subscriptions": [ - { - "database": "dev", - "table": "dog", - "publish": false, - "subscribe": true, - "start_time": "2022-09-02T20:06:35.993Z" - } - ] + "operation": "set_node_replication", + "node_name": "Node2", + "subscriptions": [ + { + "database": "dev", + "table": "dog", + "publish": false, + "subscribe": true, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] } ``` @@ -153,8 +153,8 @@ To remove a node and all its subscriptions use `remove_node`. ```json { - "operation":"remove_node", - "node_name":"Node2" + "operation": "remove_node", + "node_name": "Node2" } ``` @@ -164,32 +164,32 @@ To get the status of all connected nodes and see their subscriptions use `cluste ```json { - "node_name": "Node1", - "is_enabled": true, - "connections": [ - { - "node_name": "Node2", - "status": "open", - "ports": { - "clustering": 9932, - "operations_api": 9925 - }, - "latency_ms": 65, - "uptime": "11m 19s", - "subscriptions": [ - { - "schema": "dev", - "table": "dog", - "publish": true, - "subscribe": true - } - ], - "system_info": { - "hdb_version": "4.0.0", - "node_version": "16.17.1", - "platform": "linux" - } - } - ] + "node_name": "Node1", + "is_enabled": true, + "connections": [ + { + "node_name": "Node2", + "status": "open", + "ports": { + "clustering": 9932, + "operations_api": 9925 + }, + "latency_ms": 65, + "uptime": "11m 19s", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ], + "system_info": { + "hdb_version": "4.0.0", + "node_version": "16.17.1", + "platform": "linux" + } + } + ] } ``` diff --git a/docs/developers/clustering/naming-a-node.md b/docs/developers/clustering/naming-a-node.md index 113e0603..32054115 100644 --- a/docs/developers/clustering/naming-a-node.md +++ b/docs/developers/clustering/naming-a-node.md @@ -23,8 +23,8 @@ _Note: When making any changes to the `harperdb-config.yaml` file Harper must be ```json { - "operation": "set_configuration", - "clustering_nodeName":"Node1" + "operation": "set_configuration", + "clustering_nodeName": "Node1" } ``` diff --git a/docs/developers/clustering/subscription-overview.md b/docs/developers/clustering/subscription-overview.md index 9f8dbfba..484aa7d1 100644 --- a/docs/developers/clustering/subscription-overview.md +++ b/docs/developers/clustering/subscription-overview.md @@ -6,7 +6,7 @@ _Note: ‘local’ and ‘remote’ will often be referred to. In the context of A subscription consists of: -`database` - the name of the database that the table you are creating the subscription for belongs to. *Note, this was previously referred to as schema and may occasionally still be referenced that way.* +`database` - the name of the database that the table you are creating the subscription for belongs to. _Note, this was previously referred to as schema and may occasionally still be referenced that way._ `table` - the name of the table the subscription will apply to. diff --git a/docs/developers/clustering/things-worth-knowing.md b/docs/developers/clustering/things-worth-knowing.md index 8d864532..1b63c8ac 100644 --- a/docs/developers/clustering/things-worth-knowing.md +++ b/docs/developers/clustering/things-worth-knowing.md @@ -2,21 +2,21 @@ Additional information that will help you define your clustering topology. -*** +--- ### Transactions Transactions that are replicated across the cluster are: -* Insert -* Update -* Upsert -* Delete -* Bulk loads - * CSV data load - * CSV file load - * CSV URL load - * Import from S3 +- Insert +- Update +- Upsert +- Delete +- Bulk loads + - CSV data load + - CSV file load + - CSV URL load + - Import from S3 When adding or updating a node any databases and tables in the subscription that don’t exist on the remote node will be automatically created. @@ -24,13 +24,13 @@ When adding or updating a node any databases and tables in the subscription that Users and roles are not replicated across the cluster. -*** +--- ### Queueing Harper has built-in resiliency for when network connectivity is lost within a subscription. When connections are reestablished, a catchup routine is executed to ensure data that was missed, specific to the subscription, is sent/received as defined. -*** +--- ### Topologies diff --git a/docs/developers/miscellaneous/README.md b/docs/developers/miscellaneous/README.md index f49b0239..9772780f 100644 --- a/docs/developers/miscellaneous/README.md +++ b/docs/developers/miscellaneous/README.md @@ -1,3 +1,3 @@ # Miscellaneous -This section covers a grouping of reference documents for various external developer tools, packages, SDKs, etc. \ No newline at end of file +This section covers a grouping of reference documents for various external developer tools, packages, SDKs, etc. diff --git a/docs/developers/miscellaneous/google-data-studio.md b/docs/developers/miscellaneous/google-data-studio.md index dc10f31d..eb066503 100644 --- a/docs/developers/miscellaneous/google-data-studio.md +++ b/docs/developers/miscellaneous/google-data-studio.md @@ -19,15 +19,15 @@ Get started by selecting the Harper connector from the [Google Data Studio Partn 5. Check the box for “Secure Connections Only” if you want to always use HTTPS connections for this data source; entering a Web URL that starts with https:// will do the same thing, if you prefer. 6. Check the box for “Allow Bad Certs” if your Harper instance does not have a valid SSL certificate. [Harper Cloud](../../deployments/harper-cloud/) always has valid certificates, and so will never require this to be checked. Instances you set up yourself may require this, if you are using self-signed certs. If you are using [Harper Cloud](../../deployments/harper-cloud/) or another instance you know should always have valid SSL certificates, do not check this box. 7. Choose your Query Type. This determines what information the configuration will ask for after pressing the Next button. - * Table will ask you for a Schema and a Table to return all fields of using `SELECT *`. - * SQL will ask you for the SQL query you’re using to retrieve fields from the database. You may `JOIN` multiple tables together, and use Harper specific SQL functions, along with the usual power SQL grants. + - Table will ask you for a Schema and a Table to return all fields of using `SELECT *`. + - SQL will ask you for the SQL query you’re using to retrieve fields from the database. You may `JOIN` multiple tables together, and use Harper specific SQL functions, along with the usual power SQL grants. 8. When all information is entered correctly, press the Connect button in the top right of the new Data Source view to generate the Schema. You may also want to name the data source at this point. If the connector encounters any errors, a dialog box will tell you what went wrong so you can correct the issue. 9. If there are no errors, you now have a data source you can use in your reports! You may change the types of the generated fields in the Schema view if you need to (for instance, changing a Number field to a specific currency), as well as creating new fields from the report view that do calculations on other fields. ## Considerations -* Both Postman and the [Harper Studio](deployments/harper-cloud/) app have ways to convert a user:password pair to a Basic Auth token. Use either to create the token for the connector’s user. - * You may sign out of your current user by going to the instances tab in Harper Studio, then clicking on the lock icon at the top-right of a given instance’s box. Click the lock again to sign in as any user. The Basic Auth token will be visible in the Authorization header portion of any code created in the Sample Code tab. -* It’s highly recommended that you create a read-only user role in Harper Studio, and create a user with that role for your data sources to use. This prevents that authorization token from being used to alter your database, should someone else ever get ahold of it. -* The RecordCount field is intended for use as a metric, for counting how many instances of a given set of values appear in a report’s data set. -* _Do not attempt to create fields with spaces in their names_ for any data sources! Google Data Studio will crash when attempting to retrieve a field with such a name, producing a System Error instead of a useful chart on your reports. Using CamelCase or snake\_case gets around this. +- Both Postman and the [Harper Studio](deployments/harper-cloud/) app have ways to convert a user:password pair to a Basic Auth token. Use either to create the token for the connector’s user. + - You may sign out of your current user by going to the instances tab in Harper Studio, then clicking on the lock icon at the top-right of a given instance’s box. Click the lock again to sign in as any user. The Basic Auth token will be visible in the Authorization header portion of any code created in the Sample Code tab. +- It’s highly recommended that you create a read-only user role in Harper Studio, and create a user with that role for your data sources to use. This prevents that authorization token from being used to alter your database, should someone else ever get ahold of it. +- The RecordCount field is intended for use as a metric, for counting how many instances of a given set of values appear in a report’s data set. +- _Do not attempt to create fields with spaces in their names_ for any data sources! Google Data Studio will crash when attempting to retrieve a field with such a name, producing a System Error instead of a useful chart on your reports. Using CamelCase or snake_case gets around this. diff --git a/docs/developers/miscellaneous/query-optimization.md b/docs/developers/miscellaneous/query-optimization.md index 201c4dee..16faa3cb 100644 --- a/docs/developers/miscellaneous/query-optimization.md +++ b/docs/developers/miscellaneous/query-optimization.md @@ -24,10 +24,10 @@ Conditions can be applied to primary key fields or other indexed fields (known a Harper supports relationships between tables, allowing for "join" queries that. This does result in more complex queries with potentially larger performance overhead, as more lookups are necessary to connect matched or selected data with other tables. Similar principles apply to conditions which use relationships. Indexed fields and comparators that leverage the ordering are still valuable for performance. It is also important that if a condition on a table is connected to another table's foreign key, that that foreign key also be indexed. Likewise, if a query `select`s data from a related table that uses a foreign key to relate, that it is indexed. The same principles of higher cardinality applies here as well, more unique values allow for efficient lookups. - ### Sorting + Queries can also specify a sort order. This can also significantly impact performance. If a query specifies a sort order on an indexed field, the database can use the index to quickly retrieve the data in the specified order. A sort order can be used in conjunction with a condition on the same (indexed) field can utilize the index for ordering. However, if the sort order is not on an indexed field, or the query specifies conditions on different fields, Harper will generally need to sort the data after retrieving it, which can be slow for large datasets. The same principles apply to sorting as they do to conditions. Sorting on a primary key is generally faster than sorting on a secondary index, if the condition aligns with the sort order. ### Streaming -One of the unique and powerful features of Harper's querying functionality is the ability to stream query results. When possible, Harper can return records from a query as they are found, rather than waiting for the entire query to complete. This can significantly improve performance for large queries, as it allows the application to start processing results or sending the initial data before the entire query is complete (improving time-to-first-byte speed, for example). However, using a sort order on a query with conditions that are not on an aligned index requires that the entire query result be loaded in order to perform the sorting, which defeats the streaming benefits. \ No newline at end of file +One of the unique and powerful features of Harper's querying functionality is the ability to stream query results. When possible, Harper can return records from a query as they are found, rather than waiting for the entire query to complete. This can significantly improve performance for large queries, as it allows the application to start processing results or sending the initial data before the entire query is complete (improving time-to-first-byte speed, for example). However, using a sort order on a query with conditions that are not on an aligned index requires that the entire query result be loaded in order to perform the sorting, which defeats the streaming benefits. diff --git a/docs/developers/miscellaneous/sdks.md b/docs/developers/miscellaneous/sdks.md index 35a4a826..0202cb57 100644 --- a/docs/developers/miscellaneous/sdks.md +++ b/docs/developers/miscellaneous/sdks.md @@ -6,16 +6,16 @@ description: >- # SDKs -| SDK/Tool | Description | Installation | -| ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------- | -| [HarperDB.NET.Client](https://www.nuget.org/packages/HarperDB.NET.Client) | A Dot Net Core client to execute operations against HarperDB | `dotnet add package HarperDB.NET.Client --version 1.1.0` | -| [Websocket Client](https://www.npmjs.com/package/harperdb-websocket-client) | A Javascript client for real-time access to HarperDB transactions | `npm i -s harperdb-websocket-client` | +| SDK/Tool | Description | Installation | +| ------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- | +| [HarperDB.NET.Client](https://www.nuget.org/packages/HarperDB.NET.Client) | A Dot Net Core client to execute operations against HarperDB | `dotnet add package HarperDB.NET.Client --version 1.1.0` | +| [Websocket Client](https://www.npmjs.com/package/harperdb-websocket-client) | A Javascript client for real-time access to HarperDB transactions | `npm i -s harperdb-websocket-client` | | [Gatsby HarperDB Source](https://www.npmjs.com/package/gatsby-source-harperdb) | Use Harper as the data source for a Gatsby project at the build time | `npm i -s gatsby-source-harperdb` | | [HarperDB.EntityFrameworkCore](https://www.nuget.org/packages/HarperDB.EntityFrameworkCore) | The Harper EntityFrameworkCore Provider Package for .NET 6.0 | `dotnet add package HarperDB.EntityFrameworkCore --version 1.0.0` | | [Python SDK](https://pypi.org/project/harperdb/) | Python3 implementations of Harper API functions with wrappers for an object-oriented interface | `pip3 install harperdb` | | [HarperDB Flutter SDK](https://github.com/HarperDB/harperdb-sdk-flutter) | A Harper SDK for Flutter | `flutter pub add harperdb` | -| [React Hook](https://www.npmjs.com/package/use-harperdb) | A ReactJS Hook for HarperDB | `npm i -s use-harperdb` | +| [React Hook](https://www.npmjs.com/package/use-harperdb) | A ReactJS Hook for HarperDB | `npm i -s use-harperdb` | | [Node Red Node](https://flows.nodered.org/node/node-red-contrib-harperdb) | Easy drag and drop connections to Harper using the Node-Red platform | `npm i -s node-red-contrib-harperdb` | | [NodeJS SDK](https://www.npmjs.com/package/harperive) | A Harper SDK for NodeJS | `npm i -s harperive` | | [HarperDB Cargo Crate](https://crates.io/crates/harperdb) | A Harper SDK for Rust | `Cargo.toml > harperdb = '1.0.0'` | -| [HarperDB Go SDK](https://github.com/HarperDB/sdk-go) | A Harper SDK for Go | `go get github.com/HarperDB/sdk-go` | +| [HarperDB Go SDK](https://github.com/HarperDB/sdk-go) | A Harper SDK for Go | `go get github.com/HarperDB/sdk-go` | diff --git a/docs/developers/operations-api/advanced-json-sql-examples.md b/docs/developers/operations-api/advanced-json-sql-examples.md index 90287056..2fee2a0c 100644 --- a/docs/developers/operations-api/advanced-json-sql-examples.md +++ b/docs/developers/operations-api/advanced-json-sql-examples.md @@ -1,1776 +1,1771 @@ -# Advanced JSON SQL Examples +# Advanced JSON SQL Examples ## Create movies database + Create a new database called "movies" using the 'create_database' operation. _Note: Creating a database is optional, if one is not created Harper will default to using a database named `data`_ ### Body + ```json { - "operation": "create_database", - "database": "movies" + "operation": "create_database", + "database": "movies" } ``` ### Response: 200 + ```json { - "message": "database 'movies' successfully created" + "message": "database 'movies' successfully created" } ``` --- ## Create movie Table + Creates a new table called "movie" inside the database "movies" using the ‘create_table’ operation. ### Body ```json { - "operation": "create_table", - "database": "movies", - "table": "movie", - "primary_key": "id" + "operation": "create_table", + "database": "movies", + "table": "movie", + "primary_key": "id" } ``` ### Response: 200 + ```json { - "message": "table 'movies.movie' successfully created." + "message": "table 'movies.movie' successfully created." } ``` - --- ## Create credits Table + Creates a new table called "credits" inside the database "movies" using the ‘create_table’ operation. ### Body ```json { - "operation": "create_table", - "database": "movies", - "table": "credits", - "primary_key": "movie_id" + "operation": "create_table", + "database": "movies", + "table": "credits", + "primary_key": "movie_id" } ``` ### Response: 200 + ```json { - "message": "table 'movies.credits' successfully created." + "message": "table 'movies.credits' successfully created." } ``` - --- ## Bulk Insert movie Via CSV + Inserts data from a hosted CSV file into the "movie" table using the 'csv_url_load' operation. ### Body ```json { - "operation": "csv_url_load", - "database": "movies", - "table": "movie", - "csv_url": "https://search-json-sample-data.s3.us-east-2.amazonaws.com/movie.csv" + "operation": "csv_url_load", + "database": "movies", + "table": "movie", + "csv_url": "https://search-json-sample-data.s3.us-east-2.amazonaws.com/movie.csv" } ``` ### Response: 200 + ```json { - "message": "Starting job with id 1889eee4-23c1-4945-9bb7-c805fc20726c" + "message": "Starting job with id 1889eee4-23c1-4945-9bb7-c805fc20726c" } ``` - --- ## Bulk Insert credits Via CSV + Inserts data from a hosted CSV file into the "credits" table using the 'csv_url_load' operation. ### Body ```json { - "operation": "csv_url_load", - "database": "movies", - "table": "credits", - "csv_url": "https://search-json-sample-data.s3.us-east-2.amazonaws.com/credits.csv" + "operation": "csv_url_load", + "database": "movies", + "table": "credits", + "csv_url": "https://search-json-sample-data.s3.us-east-2.amazonaws.com/credits.csv" } ``` ### Response: 200 + ```json { - "message": "Starting job with id 3a14cd74-67f3-41e9-8ccd-45ffd0addc2c", - "job_id": "3a14cd74-67f3-41e9-8ccd-45ffd0addc2c" + "message": "Starting job with id 3a14cd74-67f3-41e9-8ccd-45ffd0addc2c", + "job_id": "3a14cd74-67f3-41e9-8ccd-45ffd0addc2c" } ``` - --- ## View raw data + In the following example we will be running expressions on the keywords & production_companies attributes, so for context we are displaying what the raw data looks like. ### Body ```json { - "operation": "sql", - "sql": "SELECT title, rank, keywords, production_companies FROM movies.movie ORDER BY rank LIMIT 10" + "operation": "sql", + "sql": "SELECT title, rank, keywords, production_companies FROM movies.movie ORDER BY rank LIMIT 10" } ``` ### Response: 200 + ```json [ - { - "title": "Ad Astra", - "rank": 1, - "keywords": [ - { - "id": 305, - "name": "moon" - }, - { - "id": 697, - "name": "loss of loved one" - }, - { - "id": 839, - "name": "planet mars" - }, - { - "id": 14626, - "name": "astronaut" - }, - { - "id": 157265, - "name": "moon colony" - }, - { - "id": 162429, - "name": "solar system" - }, - { - "id": 240119, - "name": "father son relationship" - }, - { - "id": 244256, - "name": "near future" - }, - { - "id": 257878, - "name": "planet neptune" - }, - { - "id": 260089, - "name": "space walk" - } - ], - "production_companies": [ - { - "id": 490, - "name": "New Regency Productions", - "origin_country": "" - }, - { - "id": 79963, - "name": "Keep Your Head", - "origin_country": "" - }, - { - "id": 73492, - "name": "MadRiver Pictures", - "origin_country": "" - }, - { - "id": 81, - "name": "Plan B Entertainment", - "origin_country": "US" - }, - { - "id": 30666, - "name": "RT Features", - "origin_country": "BR" - }, - { - "id": 30148, - "name": "Bona Film Group", - "origin_country": "CN" - }, - { - "id": 22213, - "name": "TSG Entertainment", - "origin_country": "US" - } - ] - }, - { - "title": "Extraction", - "rank": 2, - "keywords": [ - { - "id": 3070, - "name": "mercenary" - }, - { - "id": 4110, - "name": "mumbai (bombay), india" - }, - { - "id": 9717, - "name": "based on comic" - }, - { - "id": 9730, - "name": "crime boss" - }, - { - "id": 11107, - "name": "rescue mission" - }, - { - "id": 18712, - "name": "based on graphic novel" - }, - { - "id": 265216, - "name": "dhaka (dacca), bangladesh" - } - ], - "production_companies": [ - { - "id": 106544, - "name": "AGBO", - "origin_country": "US" - }, - { - "id": 109172, - "name": "Thematic Entertainment", - "origin_country": "US" - }, - { - "id": 92029, - "name": "TGIM Films", - "origin_country": "US" - } - ] - }, - { - "title": "To the Beat! Back 2 School", - "rank": 3, - "keywords": [ - { - "id": 10873, - "name": "school" - } - ], - "production_companies": [] - }, - { - "title": "Bloodshot", - "rank": 4, - "keywords": [ - { - "id": 2651, - "name": "nanotechnology" - }, - { - "id": 9715, - "name": "superhero" - }, - { - "id": 9717, - "name": "based on comic" - }, - { - "id": 164218, - "name": "psychotronic" - }, - { - "id": 255024, - "name": "shared universe" - }, - { - "id": 258575, - "name": "valiant comics" - } - ], - "production_companies": [ - { - "id": 34, - "name": "Sony Pictures", - "origin_country": "US" - }, - { - "id": 10246, - "name": "Cross Creek Pictures", - "origin_country": "US" - }, - { - "id": 6573, - "name": "Mimran Schur Pictures", - "origin_country": "US" - }, - { - "id": 333, - "name": "Original Film", - "origin_country": "US" - }, - { - "id": 103673, - "name": "The Hideaway Entertainment", - "origin_country": "US" - }, - { - "id": 124335, - "name": "Valiant Entertainment", - "origin_country": "US" - }, - { - "id": 5, - "name": "Columbia Pictures", - "origin_country": "US" - }, - { - "id": 1225, - "name": "One Race", - "origin_country": "US" - }, - { - "id": 30148, - "name": "Bona Film Group", - "origin_country": "CN" - } - ] - }, - { - "title": "The Call of the Wild", - "rank": 5, - "keywords": [ - { - "id": 818, - "name": "based on novel or book" - }, - { - "id": 4542, - "name": "gold rush" - }, - { - "id": 15162, - "name": "dog" - }, - { - "id": 155821, - "name": "sled dogs" - }, - { - "id": 189390, - "name": "yukon" - }, - { - "id": 207928, - "name": "19th century" - }, - { - "id": 259987, - "name": "cgi animation" - }, - { - "id": 263806, - "name": "1890s" - } - ], - "production_companies": [ - { - "id": 787, - "name": "3 Arts Entertainment", - "origin_country": "US" - }, - { - "id": 127928, - "name": "20th Century Studios", - "origin_country": "US" - }, - { - "id": 22213, - "name": "TSG Entertainment", - "origin_country": "US" - } - ] - }, - { - "title": "Sonic the Hedgehog", - "rank": 6, - "keywords": [ - { - "id": 282, - "name": "video game" - }, - { - "id": 6054, - "name": "friendship" - }, - { - "id": 10842, - "name": "good vs evil" - }, - { - "id": 41645, - "name": "based on video game" - }, - { - "id": 167043, - "name": "road movie" - }, - { - "id": 172142, - "name": "farting" - }, - { - "id": 188933, - "name": "bar fight" - }, - { - "id": 226967, - "name": "amistad" - }, - { - "id": 245230, - "name": "live action remake" - }, - { - "id": 258111, - "name": "fantasy" - }, - { - "id": 260223, - "name": "videojuego" - } - ], - "production_companies": [ - { - "id": 333, - "name": "Original Film", - "origin_country": "US" - }, - { - "id": 10644, - "name": "Blur Studios", - "origin_country": "US" - }, - { - "id": 77884, - "name": "Marza Animation Planet", - "origin_country": "JP" - }, - { - "id": 4, - "name": "Paramount", - "origin_country": "US" - }, - { - "id": 113750, - "name": "SEGA", - "origin_country": "JP" - }, - { - "id": 100711, - "name": "DJ2 Entertainment", - "origin_country": "" - }, - { - "id": 24955, - "name": "Paramount Animation", - "origin_country": "US" - } - ] - }, - { - "title": "Birds of Prey (and the Fantabulous Emancipation of One Harley Quinn)", - "rank": 7, - "keywords": [ - { - "id": 849, - "name": "dc comics" - }, - { - "id": 9717, - "name": "based on comic" - }, - { - "id": 187056, - "name": "woman director" - }, - { - "id": 229266, - "name": "dc extended universe" - } - ], - "production_companies": [ - { - "id": 9993, - "name": "DC Entertainment", - "origin_country": "US" - }, - { - "id": 82968, - "name": "LuckyChap Entertainment", - "origin_country": "GB" - }, - { - "id": 103462, - "name": "Kroll & Co Entertainment", - "origin_country": "US" - }, - { - "id": 174, - "name": "Warner Bros. Pictures", - "origin_country": "US" - }, - { - "id": 429, - "name": "DC Comics", - "origin_country": "US" - }, - { - "id": 128064, - "name": "DC Films", - "origin_country": "US" - }, - { - "id": 101831, - "name": "Clubhouse Pictures", - "origin_country": "US" - } - ] - }, - { - "title": "Justice League Dark: Apokolips War", - "rank": 8, - "keywords": [ - { - "id": 849, - "name": "dc comics" - } - ], - "production_companies": [ - { - "id": 2785, - "name": "Warner Bros. Animation", - "origin_country": "US" - }, - { - "id": 9993, - "name": "DC Entertainment", - "origin_country": "US" - }, - { - "id": 429, - "name": "DC Comics", - "origin_country": "US" - } - ] - }, - { - "title": "Parasite", - "rank": 9, - "keywords": [ - { - "id": 1353, - "name": "underground" - }, - { - "id": 5318, - "name": "seoul" - }, - { - "id": 5732, - "name": "birthday party" - }, - { - "id": 5752, - "name": "private lessons" - }, - { - "id": 9866, - "name": "basement" - }, - { - "id": 10453, - "name": "con artist" - }, - { - "id": 11935, - "name": "working class" - }, - { - "id": 12565, - "name": "psychological thriller" - }, - { - "id": 13126, - "name": "limousine driver" - }, - { - "id": 14514, - "name": "class differences" - }, - { - "id": 14864, - "name": "rich poor" - }, - { - "id": 17997, - "name": "housekeeper" - }, - { - "id": 18015, - "name": "tutor" - }, - { - "id": 18035, - "name": "family" - }, - { - "id": 33421, - "name": "crime family" - }, - { - "id": 173272, - "name": "flood" - }, - { - "id": 188861, - "name": "smell" - }, - { - "id": 198673, - "name": "unemployed" - }, - { - "id": 237462, - "name": "wealthy family" - } - ], - "production_companies": [ - { - "id": 7036, - "name": "CJ Entertainment", - "origin_country": "KR" - }, - { - "id": 4399, - "name": "Barunson E&A", - "origin_country": "KR" - } - ] - }, - { - "title": "Star Wars: The Rise of Skywalker", - "rank": 10, - "keywords": [ - { - "id": 161176, - "name": "space opera" - } - ], - "production_companies": [ - { - "id": 1, - "name": "Lucasfilm", - "origin_country": "US" - }, - { - "id": 11461, - "name": "Bad Robot", - "origin_country": "US" - }, - { - "id": 2, - "name": "Walt Disney Pictures", - "origin_country": "US" - }, - { - "id": 120404, - "name": "British Film Commission", - "origin_country": "" - } - ] - } + { + "title": "Ad Astra", + "rank": 1, + "keywords": [ + { + "id": 305, + "name": "moon" + }, + { + "id": 697, + "name": "loss of loved one" + }, + { + "id": 839, + "name": "planet mars" + }, + { + "id": 14626, + "name": "astronaut" + }, + { + "id": 157265, + "name": "moon colony" + }, + { + "id": 162429, + "name": "solar system" + }, + { + "id": 240119, + "name": "father son relationship" + }, + { + "id": 244256, + "name": "near future" + }, + { + "id": 257878, + "name": "planet neptune" + }, + { + "id": 260089, + "name": "space walk" + } + ], + "production_companies": [ + { + "id": 490, + "name": "New Regency Productions", + "origin_country": "" + }, + { + "id": 79963, + "name": "Keep Your Head", + "origin_country": "" + }, + { + "id": 73492, + "name": "MadRiver Pictures", + "origin_country": "" + }, + { + "id": 81, + "name": "Plan B Entertainment", + "origin_country": "US" + }, + { + "id": 30666, + "name": "RT Features", + "origin_country": "BR" + }, + { + "id": 30148, + "name": "Bona Film Group", + "origin_country": "CN" + }, + { + "id": 22213, + "name": "TSG Entertainment", + "origin_country": "US" + } + ] + }, + { + "title": "Extraction", + "rank": 2, + "keywords": [ + { + "id": 3070, + "name": "mercenary" + }, + { + "id": 4110, + "name": "mumbai (bombay), india" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 9730, + "name": "crime boss" + }, + { + "id": 11107, + "name": "rescue mission" + }, + { + "id": 18712, + "name": "based on graphic novel" + }, + { + "id": 265216, + "name": "dhaka (dacca), bangladesh" + } + ], + "production_companies": [ + { + "id": 106544, + "name": "AGBO", + "origin_country": "US" + }, + { + "id": 109172, + "name": "Thematic Entertainment", + "origin_country": "US" + }, + { + "id": 92029, + "name": "TGIM Films", + "origin_country": "US" + } + ] + }, + { + "title": "To the Beat! Back 2 School", + "rank": 3, + "keywords": [ + { + "id": 10873, + "name": "school" + } + ], + "production_companies": [] + }, + { + "title": "Bloodshot", + "rank": 4, + "keywords": [ + { + "id": 2651, + "name": "nanotechnology" + }, + { + "id": 9715, + "name": "superhero" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 164218, + "name": "psychotronic" + }, + { + "id": 255024, + "name": "shared universe" + }, + { + "id": 258575, + "name": "valiant comics" + } + ], + "production_companies": [ + { + "id": 34, + "name": "Sony Pictures", + "origin_country": "US" + }, + { + "id": 10246, + "name": "Cross Creek Pictures", + "origin_country": "US" + }, + { + "id": 6573, + "name": "Mimran Schur Pictures", + "origin_country": "US" + }, + { + "id": 333, + "name": "Original Film", + "origin_country": "US" + }, + { + "id": 103673, + "name": "The Hideaway Entertainment", + "origin_country": "US" + }, + { + "id": 124335, + "name": "Valiant Entertainment", + "origin_country": "US" + }, + { + "id": 5, + "name": "Columbia Pictures", + "origin_country": "US" + }, + { + "id": 1225, + "name": "One Race", + "origin_country": "US" + }, + { + "id": 30148, + "name": "Bona Film Group", + "origin_country": "CN" + } + ] + }, + { + "title": "The Call of the Wild", + "rank": 5, + "keywords": [ + { + "id": 818, + "name": "based on novel or book" + }, + { + "id": 4542, + "name": "gold rush" + }, + { + "id": 15162, + "name": "dog" + }, + { + "id": 155821, + "name": "sled dogs" + }, + { + "id": 189390, + "name": "yukon" + }, + { + "id": 207928, + "name": "19th century" + }, + { + "id": 259987, + "name": "cgi animation" + }, + { + "id": 263806, + "name": "1890s" + } + ], + "production_companies": [ + { + "id": 787, + "name": "3 Arts Entertainment", + "origin_country": "US" + }, + { + "id": 127928, + "name": "20th Century Studios", + "origin_country": "US" + }, + { + "id": 22213, + "name": "TSG Entertainment", + "origin_country": "US" + } + ] + }, + { + "title": "Sonic the Hedgehog", + "rank": 6, + "keywords": [ + { + "id": 282, + "name": "video game" + }, + { + "id": 6054, + "name": "friendship" + }, + { + "id": 10842, + "name": "good vs evil" + }, + { + "id": 41645, + "name": "based on video game" + }, + { + "id": 167043, + "name": "road movie" + }, + { + "id": 172142, + "name": "farting" + }, + { + "id": 188933, + "name": "bar fight" + }, + { + "id": 226967, + "name": "amistad" + }, + { + "id": 245230, + "name": "live action remake" + }, + { + "id": 258111, + "name": "fantasy" + }, + { + "id": 260223, + "name": "videojuego" + } + ], + "production_companies": [ + { + "id": 333, + "name": "Original Film", + "origin_country": "US" + }, + { + "id": 10644, + "name": "Blur Studios", + "origin_country": "US" + }, + { + "id": 77884, + "name": "Marza Animation Planet", + "origin_country": "JP" + }, + { + "id": 4, + "name": "Paramount", + "origin_country": "US" + }, + { + "id": 113750, + "name": "SEGA", + "origin_country": "JP" + }, + { + "id": 100711, + "name": "DJ2 Entertainment", + "origin_country": "" + }, + { + "id": 24955, + "name": "Paramount Animation", + "origin_country": "US" + } + ] + }, + { + "title": "Birds of Prey (and the Fantabulous Emancipation of One Harley Quinn)", + "rank": 7, + "keywords": [ + { + "id": 849, + "name": "dc comics" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 187056, + "name": "woman director" + }, + { + "id": 229266, + "name": "dc extended universe" + } + ], + "production_companies": [ + { + "id": 9993, + "name": "DC Entertainment", + "origin_country": "US" + }, + { + "id": 82968, + "name": "LuckyChap Entertainment", + "origin_country": "GB" + }, + { + "id": 103462, + "name": "Kroll & Co Entertainment", + "origin_country": "US" + }, + { + "id": 174, + "name": "Warner Bros. Pictures", + "origin_country": "US" + }, + { + "id": 429, + "name": "DC Comics", + "origin_country": "US" + }, + { + "id": 128064, + "name": "DC Films", + "origin_country": "US" + }, + { + "id": 101831, + "name": "Clubhouse Pictures", + "origin_country": "US" + } + ] + }, + { + "title": "Justice League Dark: Apokolips War", + "rank": 8, + "keywords": [ + { + "id": 849, + "name": "dc comics" + } + ], + "production_companies": [ + { + "id": 2785, + "name": "Warner Bros. Animation", + "origin_country": "US" + }, + { + "id": 9993, + "name": "DC Entertainment", + "origin_country": "US" + }, + { + "id": 429, + "name": "DC Comics", + "origin_country": "US" + } + ] + }, + { + "title": "Parasite", + "rank": 9, + "keywords": [ + { + "id": 1353, + "name": "underground" + }, + { + "id": 5318, + "name": "seoul" + }, + { + "id": 5732, + "name": "birthday party" + }, + { + "id": 5752, + "name": "private lessons" + }, + { + "id": 9866, + "name": "basement" + }, + { + "id": 10453, + "name": "con artist" + }, + { + "id": 11935, + "name": "working class" + }, + { + "id": 12565, + "name": "psychological thriller" + }, + { + "id": 13126, + "name": "limousine driver" + }, + { + "id": 14514, + "name": "class differences" + }, + { + "id": 14864, + "name": "rich poor" + }, + { + "id": 17997, + "name": "housekeeper" + }, + { + "id": 18015, + "name": "tutor" + }, + { + "id": 18035, + "name": "family" + }, + { + "id": 33421, + "name": "crime family" + }, + { + "id": 173272, + "name": "flood" + }, + { + "id": 188861, + "name": "smell" + }, + { + "id": 198673, + "name": "unemployed" + }, + { + "id": 237462, + "name": "wealthy family" + } + ], + "production_companies": [ + { + "id": 7036, + "name": "CJ Entertainment", + "origin_country": "KR" + }, + { + "id": 4399, + "name": "Barunson E&A", + "origin_country": "KR" + } + ] + }, + { + "title": "Star Wars: The Rise of Skywalker", + "rank": 10, + "keywords": [ + { + "id": 161176, + "name": "space opera" + } + ], + "production_companies": [ + { + "id": 1, + "name": "Lucasfilm", + "origin_country": "US" + }, + { + "id": 11461, + "name": "Bad Robot", + "origin_country": "US" + }, + { + "id": 2, + "name": "Walt Disney Pictures", + "origin_country": "US" + }, + { + "id": 120404, + "name": "British Film Commission", + "origin_country": "" + } + ] + } ] ``` - --- ## Simple search_json call -This query uses search_json to convert the keywords object array to a simple string array. The expression '[name]' tells the function to extract all values for the name attribute and wrap them in an array. + +This query uses search_json to convert the keywords object array to a simple string array. The expression '[name]' tells the function to extract all values for the name attribute and wrap them in an array. ### Body ```json { - "operation": "sql", - "sql": "SELECT title, rank, search_json('[name]', keywords) as keywords FROM movies.movie ORDER BY rank LIMIT 10" + "operation": "sql", + "sql": "SELECT title, rank, search_json('[name]', keywords) as keywords FROM movies.movie ORDER BY rank LIMIT 10" } ``` ### Response: 200 + ```json [ - { - "title": "Ad Astra", - "rank": 1, - "keywords": [ - "moon", - "loss of loved one", - "planet mars", - "astronaut", - "moon colony", - "solar system", - "father son relationship", - "near future", - "planet neptune", - "space walk" - ] - }, - { - "title": "Extraction", - "rank": 2, - "keywords": [ - "mercenary", - "mumbai (bombay), india", - "based on comic", - "crime boss", - "rescue mission", - "based on graphic novel", - "dhaka (dacca), bangladesh" - ] - }, - { - "title": "To the Beat! Back 2 School", - "rank": 3, - "keywords": [ - "school" - ] - }, - { - "title": "Bloodshot", - "rank": 4, - "keywords": [ - "nanotechnology", - "superhero", - "based on comic", - "psychotronic", - "shared universe", - "valiant comics" - ] - }, - { - "title": "The Call of the Wild", - "rank": 5, - "keywords": [ - "based on novel or book", - "gold rush", - "dog", - "sled dogs", - "yukon", - "19th century", - "cgi animation", - "1890s" - ] - }, - { - "title": "Sonic the Hedgehog", - "rank": 6, - "keywords": [ - "video game", - "friendship", - "good vs evil", - "based on video game", - "road movie", - "farting", - "bar fight", - "amistad", - "live action remake", - "fantasy", - "videojuego" - ] - }, - { - "title": "Birds of Prey (and the Fantabulous Emancipation of One Harley Quinn)", - "rank": 7, - "keywords": [ - "dc comics", - "based on comic", - "woman director", - "dc extended universe" - ] - }, - { - "title": "Justice League Dark: Apokolips War", - "rank": 8, - "keywords": [ - "dc comics" - ] - }, - { - "title": "Parasite", - "rank": 9, - "keywords": [ - "underground", - "seoul", - "birthday party", - "private lessons", - "basement", - "con artist", - "working class", - "psychological thriller", - "limousine driver", - "class differences", - "rich poor", - "housekeeper", - "tutor", - "family", - "crime family", - "flood", - "smell", - "unemployed", - "wealthy family" - ] - }, - { - "title": "Star Wars: The Rise of Skywalker", - "rank": 10, - "keywords": [ - "space opera" - ] - } + { + "title": "Ad Astra", + "rank": 1, + "keywords": [ + "moon", + "loss of loved one", + "planet mars", + "astronaut", + "moon colony", + "solar system", + "father son relationship", + "near future", + "planet neptune", + "space walk" + ] + }, + { + "title": "Extraction", + "rank": 2, + "keywords": [ + "mercenary", + "mumbai (bombay), india", + "based on comic", + "crime boss", + "rescue mission", + "based on graphic novel", + "dhaka (dacca), bangladesh" + ] + }, + { + "title": "To the Beat! Back 2 School", + "rank": 3, + "keywords": ["school"] + }, + { + "title": "Bloodshot", + "rank": 4, + "keywords": ["nanotechnology", "superhero", "based on comic", "psychotronic", "shared universe", "valiant comics"] + }, + { + "title": "The Call of the Wild", + "rank": 5, + "keywords": [ + "based on novel or book", + "gold rush", + "dog", + "sled dogs", + "yukon", + "19th century", + "cgi animation", + "1890s" + ] + }, + { + "title": "Sonic the Hedgehog", + "rank": 6, + "keywords": [ + "video game", + "friendship", + "good vs evil", + "based on video game", + "road movie", + "farting", + "bar fight", + "amistad", + "live action remake", + "fantasy", + "videojuego" + ] + }, + { + "title": "Birds of Prey (and the Fantabulous Emancipation of One Harley Quinn)", + "rank": 7, + "keywords": ["dc comics", "based on comic", "woman director", "dc extended universe"] + }, + { + "title": "Justice League Dark: Apokolips War", + "rank": 8, + "keywords": ["dc comics"] + }, + { + "title": "Parasite", + "rank": 9, + "keywords": [ + "underground", + "seoul", + "birthday party", + "private lessons", + "basement", + "con artist", + "working class", + "psychological thriller", + "limousine driver", + "class differences", + "rich poor", + "housekeeper", + "tutor", + "family", + "crime family", + "flood", + "smell", + "unemployed", + "wealthy family" + ] + }, + { + "title": "Star Wars: The Rise of Skywalker", + "rank": 10, + "keywords": ["space opera"] + } ] ``` - --- ## Use search_json in a where clause -This example shows how we can use SEARCH_JSON to filter out records in a WHERE clause. The production_companies attribute holds an object array of companies that produced each movie, we want to only see movies which were produced by Marvel Studios. Our expression is a filter '$[name="Marvel Studios"]' this tells the function to iterate the production_companies array and only return entries where the name is "Marvel Studios". + +This example shows how we can use SEARCH_JSON to filter out records in a WHERE clause. The production_companies attribute holds an object array of companies that produced each movie, we want to only see movies which were produced by Marvel Studios. Our expression is a filter '$[name="Marvel Studios"]' this tells the function to iterate the production_companies array and only return entries where the name is "Marvel Studios". ### Body ```json { - "operation": "sql", - "sql": "SELECT title, release_date FROM movies.movie where search_json('$[name=\"Marvel Studios\"]', production_companies) IS NOT NULL ORDER BY release_date" + "operation": "sql", + "sql": "SELECT title, release_date FROM movies.movie where search_json('$[name=\"Marvel Studios\"]', production_companies) IS NOT NULL ORDER BY release_date" } ``` ### Response: 200 + ```json [ - { - "title": "Iron Man", - "release_date": "2008-04-30" - }, - { - "title": "The Incredible Hulk", - "release_date": "2008-06-12" - }, - { - "title": "Iron Man 2", - "release_date": "2010-04-28" - }, - { - "title": "Thor", - "release_date": "2011-04-21" - }, - { - "title": "Captain America: The First Avenger", - "release_date": "2011-07-22" - }, - { - "title": "Marvel One-Shot: The Consultant", - "release_date": "2011-09-12" - }, - { - "title": "Marvel One-Shot: A Funny Thing Happened on the Way to Thor's Hammer", - "release_date": "2011-10-25" - }, - { - "title": "The Avengers", - "release_date": "2012-04-25" - }, - { - "title": "Marvel One-Shot: Item 47", - "release_date": "2012-09-13" - }, - { - "title": "Iron Man 3", - "release_date": "2013-04-18" - }, - { - "title": "Marvel One-Shot: Agent Carter", - "release_date": "2013-09-08" - }, - { - "title": "Thor: The Dark World", - "release_date": "2013-10-29" - }, - { - "title": "Marvel One-Shot: All Hail the King", - "release_date": "2014-02-04" - }, - { - "title": "Marvel Studios: Assembling a Universe", - "release_date": "2014-03-18" - }, - { - "title": "Captain America: The Winter Soldier", - "release_date": "2014-03-20" - }, - { - "title": "Guardians of the Galaxy", - "release_date": "2014-07-30" - }, - { - "title": "Avengers: Age of Ultron", - "release_date": "2015-04-22" - }, - { - "title": "Ant-Man", - "release_date": "2015-07-14" - }, - { - "title": "Captain America: Civil War", - "release_date": "2016-04-27" - }, - { - "title": "Team Thor", - "release_date": "2016-08-28" - }, - { - "title": "Doctor Strange", - "release_date": "2016-10-25" - }, - { - "title": "Guardians of the Galaxy Vol. 2", - "release_date": "2017-04-19" - }, - { - "title": "Spider-Man: Homecoming", - "release_date": "2017-07-05" - }, - { - "title": "Thor: Ragnarok", - "release_date": "2017-10-25" - }, - { - "title": "Black Panther", - "release_date": "2018-02-13" - }, - { - "title": "Avengers: Infinity War", - "release_date": "2018-04-25" - }, - { - "title": "Ant-Man and the Wasp", - "release_date": "2018-07-04" - }, - { - "title": "Captain Marvel", - "release_date": "2019-03-06" - }, - { - "title": "Avengers: Endgame", - "release_date": "2019-04-24" - }, - { - "title": "Spider-Man: Far from Home", - "release_date": "2019-06-28" - }, - { - "title": "Black Widow", - "release_date": "2020-10-28" - }, - { - "title": "Untitled Spider-Man 3", - "release_date": "2021-11-04" - }, - { - "title": "Thor: Love and Thunder", - "release_date": "2022-02-10" - }, - { - "title": "Doctor Strange in the Multiverse of Madness", - "release_date": "2022-03-23" - }, - { - "title": "Untitled Marvel Project (3)", - "release_date": "2022-07-29" - }, - { - "title": "Guardians of the Galaxy Vol. 3", - "release_date": "2023-02-16" - } + { + "title": "Iron Man", + "release_date": "2008-04-30" + }, + { + "title": "The Incredible Hulk", + "release_date": "2008-06-12" + }, + { + "title": "Iron Man 2", + "release_date": "2010-04-28" + }, + { + "title": "Thor", + "release_date": "2011-04-21" + }, + { + "title": "Captain America: The First Avenger", + "release_date": "2011-07-22" + }, + { + "title": "Marvel One-Shot: The Consultant", + "release_date": "2011-09-12" + }, + { + "title": "Marvel One-Shot: A Funny Thing Happened on the Way to Thor's Hammer", + "release_date": "2011-10-25" + }, + { + "title": "The Avengers", + "release_date": "2012-04-25" + }, + { + "title": "Marvel One-Shot: Item 47", + "release_date": "2012-09-13" + }, + { + "title": "Iron Man 3", + "release_date": "2013-04-18" + }, + { + "title": "Marvel One-Shot: Agent Carter", + "release_date": "2013-09-08" + }, + { + "title": "Thor: The Dark World", + "release_date": "2013-10-29" + }, + { + "title": "Marvel One-Shot: All Hail the King", + "release_date": "2014-02-04" + }, + { + "title": "Marvel Studios: Assembling a Universe", + "release_date": "2014-03-18" + }, + { + "title": "Captain America: The Winter Soldier", + "release_date": "2014-03-20" + }, + { + "title": "Guardians of the Galaxy", + "release_date": "2014-07-30" + }, + { + "title": "Avengers: Age of Ultron", + "release_date": "2015-04-22" + }, + { + "title": "Ant-Man", + "release_date": "2015-07-14" + }, + { + "title": "Captain America: Civil War", + "release_date": "2016-04-27" + }, + { + "title": "Team Thor", + "release_date": "2016-08-28" + }, + { + "title": "Doctor Strange", + "release_date": "2016-10-25" + }, + { + "title": "Guardians of the Galaxy Vol. 2", + "release_date": "2017-04-19" + }, + { + "title": "Spider-Man: Homecoming", + "release_date": "2017-07-05" + }, + { + "title": "Thor: Ragnarok", + "release_date": "2017-10-25" + }, + { + "title": "Black Panther", + "release_date": "2018-02-13" + }, + { + "title": "Avengers: Infinity War", + "release_date": "2018-04-25" + }, + { + "title": "Ant-Man and the Wasp", + "release_date": "2018-07-04" + }, + { + "title": "Captain Marvel", + "release_date": "2019-03-06" + }, + { + "title": "Avengers: Endgame", + "release_date": "2019-04-24" + }, + { + "title": "Spider-Man: Far from Home", + "release_date": "2019-06-28" + }, + { + "title": "Black Widow", + "release_date": "2020-10-28" + }, + { + "title": "Untitled Spider-Man 3", + "release_date": "2021-11-04" + }, + { + "title": "Thor: Love and Thunder", + "release_date": "2022-02-10" + }, + { + "title": "Doctor Strange in the Multiverse of Madness", + "release_date": "2022-03-23" + }, + { + "title": "Untitled Marvel Project (3)", + "release_date": "2022-07-29" + }, + { + "title": "Guardians of the Galaxy Vol. 3", + "release_date": "2023-02-16" + } ] ``` - --- ## Use search_json to show the movies with the largest casts -This example shows how we can use SEARCH_JSON to perform a simple calculation on JSON and order by the results. The cast attribute holds an object array of details around the cast of a movie. We use the expression '$count(id)' that counts each id and returns the value back which we alias in SQL as cast_size which in turn gets used to sort the rows. + +This example shows how we can use SEARCH_JSON to perform a simple calculation on JSON and order by the results. The cast attribute holds an object array of details around the cast of a movie. We use the expression '$count(id)' that counts each id and returns the value back which we alias in SQL as cast_size which in turn gets used to sort the rows. ### Body ```json { - "operation": "sql", - "sql": "SELECT movie_title, search_json('$count(id)', `cast`) as cast_size FROM movies.credits ORDER BY cast_size DESC LIMIT 10" + "operation": "sql", + "sql": "SELECT movie_title, search_json('$count(id)', `cast`) as cast_size FROM movies.credits ORDER BY cast_size DESC LIMIT 10" } ``` ### Response: 200 + ```json [ - { - "movie_title": "Around the World in Eighty Days", - "cast_size": 312 - }, - { - "movie_title": "And the Oscar Goes To...", - "cast_size": 259 - }, - { - "movie_title": "Rock of Ages", - "cast_size": 223 - }, - { - "movie_title": "Mr. Smith Goes to Washington", - "cast_size": 213 - }, - { - "movie_title": "Les Misérables", - "cast_size": 208 - }, - { - "movie_title": "Jason Bourne", - "cast_size": 201 - }, - { - "movie_title": "The Muppets", - "cast_size": 191 - }, - { - "movie_title": "You Don't Mess with the Zohan", - "cast_size": 183 - }, - { - "movie_title": "The Irishman", - "cast_size": 173 - }, - { - "movie_title": "Spider-Man: Far from Home", - "cast_size": 173 - } + { + "movie_title": "Around the World in Eighty Days", + "cast_size": 312 + }, + { + "movie_title": "And the Oscar Goes To...", + "cast_size": 259 + }, + { + "movie_title": "Rock of Ages", + "cast_size": 223 + }, + { + "movie_title": "Mr. Smith Goes to Washington", + "cast_size": 213 + }, + { + "movie_title": "Les Misérables", + "cast_size": 208 + }, + { + "movie_title": "Jason Bourne", + "cast_size": 201 + }, + { + "movie_title": "The Muppets", + "cast_size": 191 + }, + { + "movie_title": "You Don't Mess with the Zohan", + "cast_size": 183 + }, + { + "movie_title": "The Irishman", + "cast_size": 173 + }, + { + "movie_title": "Spider-Man: Far from Home", + "cast_size": 173 + } ] ``` - --- ## search_json as a condition, in a select with a table join -This example shows how we can use SEARCH_JSON to find movies where at least of 2 our favorite actors from Marvel films have acted together then list the movie, its overview, release date, and the actors names and their characters. The WHERE clause performs a count on credits.cast attribute that have the matching actors. The SELECT performs the same filter on the cast attribute and performs a transform on each object to just return the actor's name and their character. + +This example shows how we can use SEARCH_JSON to find movies where at least of 2 our favorite actors from Marvel films have acted together then list the movie, its overview, release date, and the actors names and their characters. The WHERE clause performs a count on credits.cast attribute that have the matching actors. The SELECT performs the same filter on the cast attribute and performs a transform on each object to just return the actor's name and their character. ### Body ```json { - "operation": "sql", - "sql": "SELECT m.title, m.overview, m.release_date, search_json('$[name in [\"Robert Downey Jr.\", \"Chris Evans\", \"Scarlett Johansson\", \"Mark Ruffalo\", \"Chris Hemsworth\", \"Jeremy Renner\", \"Clark Gregg\", \"Samuel L. Jackson\", \"Gwyneth Paltrow\", \"Don Cheadle\"]].{\"actor\": name, \"character\": character}', c.`cast`) as characters FROM movies.credits c INNER JOIN movies.movie m ON c.movie_id = m.id WHERE search_json('$count($[name in [\"Robert Downey Jr.\", \"Chris Evans\", \"Scarlett Johansson\", \"Mark Ruffalo\", \"Chris Hemsworth\", \"Jeremy Renner\", \"Clark Gregg\", \"Samuel L. Jackson\", \"Gwyneth Paltrow\", \"Don Cheadle\"]])', c.`cast`) >= 2" + "operation": "sql", + "sql": "SELECT m.title, m.overview, m.release_date, search_json('$[name in [\"Robert Downey Jr.\", \"Chris Evans\", \"Scarlett Johansson\", \"Mark Ruffalo\", \"Chris Hemsworth\", \"Jeremy Renner\", \"Clark Gregg\", \"Samuel L. Jackson\", \"Gwyneth Paltrow\", \"Don Cheadle\"]].{\"actor\": name, \"character\": character}', c.`cast`) as characters FROM movies.credits c INNER JOIN movies.movie m ON c.movie_id = m.id WHERE search_json('$count($[name in [\"Robert Downey Jr.\", \"Chris Evans\", \"Scarlett Johansson\", \"Mark Ruffalo\", \"Chris Hemsworth\", \"Jeremy Renner\", \"Clark Gregg\", \"Samuel L. Jackson\", \"Gwyneth Paltrow\", \"Don Cheadle\"]])', c.`cast`) >= 2" } ``` ### Response: 200 + ```json [ - { - "title": "Out of Sight", - "overview": "Meet Jack Foley, a smooth criminal who bends the law and is determined to make one last heist. Karen Sisco is a federal marshal who chooses all the right moves … and all the wrong guys. Now they're willing to risk it all to find out if there's more between them than just the law.", - "release_date": "1998-06-26", - "characters": [ - { - "actor": "Don Cheadle", - "character": "Maurice Miller" - }, - { - "actor": "Samuel L. Jackson", - "character": "Hejira Henry (uncredited)" - } - ] - }, - { - "title": "Iron Man", - "overview": "After being held captive in an Afghan cave, billionaire engineer Tony Stark creates a unique weaponized suit of armor to fight evil.", - "release_date": "2008-04-30", - "characters": [ - { - "actor": "Robert Downey Jr.", - "character": "Tony Stark / Iron Man" - }, - { - "actor": "Gwyneth Paltrow", - "character": "Virginia \"Pepper\" Potts" - }, - { - "actor": "Clark Gregg", - "character": "Phil Coulson" - }, - { - "actor": "Samuel L. Jackson", - "character": "Nick Fury (uncredited)" - }, - { - "actor": "Samuel L. Jackson", - "character": "Nick Fury" - } - ] - }, - { - "title": "Captain America: The First Avenger", - "overview": "During World War II, Steve Rogers is a sickly man from Brooklyn who's transformed into super-soldier Captain America to aid in the war effort. Rogers must stop the Red Skull – Adolf Hitler's ruthless head of weaponry, and the leader of an organization that intends to use a mysterious device of untold powers for world domination.", - "release_date": "2011-07-22", - "characters": [ - { - "actor": "Chris Evans", - "character": "Steve Rogers / Captain America" - }, - { - "actor": "Samuel L. Jackson", - "character": "Nick Fury" - } - ] - }, - { - "title": "In Good Company", - "overview": "Dan Foreman is a seasoned advertisement sales executive at a high-ranking publication when a corporate takeover results in him being placed under naive supervisor Carter Duryea, who is half his age. Matters are made worse when Dan's new supervisor becomes romantically involved with his daughter an 18 year-old college student Alex.", - "release_date": "2004-12-29", - "characters": [ - { - "actor": "Scarlett Johansson", - "character": "Alex Foreman" - }, - { - "actor": "Clark Gregg", - "character": "Mark Steckle" - } - ] - }, - { - "title": "Zodiac", - "overview": "The true story of the investigation of the \"Zodiac Killer\", a serial killer who terrified the San Francisco Bay Area, taunting police with his ciphers and letters. The case becomes an obsession for three men as their lives and careers are built and destroyed by the endless trail of clues.", - "release_date": "2007-03-02", - "characters": [ - { - "actor": "Mark Ruffalo", - "character": "Dave Toschi" - }, - { - "actor": "Robert Downey Jr.", - "character": "Paul Avery" - } - ] - }, - { - "title": "Hard Eight", - "overview": "A stranger mentors a young Reno gambler who weds a hooker and befriends a vulgar casino regular.", - "release_date": "1996-02-28", - "characters": [ - { - "actor": "Gwyneth Paltrow", - "character": "Clementine" - }, - { - "actor": "Samuel L. Jackson", - "character": "Jimmy" - } - ] - }, - { - "title": "The Spirit", - "overview": "Down these mean streets a man must come. A hero born, murdered, and born again. A Rookie cop named Denny Colt returns from the beyond as The Spirit, a hero whose mission is to fight against the bad forces from the shadows of Central City. The Octopus, who kills anyone unfortunate enough to see his face, has other plans; he is going to wipe out the entire city.", - "release_date": "2008-12-25", - "characters": [ - { - "actor": "Scarlett Johansson", - "character": "Silken Floss" - }, - { - "actor": "Samuel L. Jackson", - "character": "Octopuss" - } - ] - }, - { - "title": "S.W.A.T.", - "overview": "Hondo Harrelson recruits Jim Street to join an elite unit of the Los Angeles Police Department. Together they seek out more members, including tough Deke Kay and single mom Chris Sanchez. The team's first big assignment is to escort crime boss Alex Montel to prison. It seems routine, but when Montel offers a huge reward to anyone who can break him free, criminals of various stripes step up for the prize.", - "release_date": "2003-08-08", - "characters": [ - { - "actor": "Samuel L. Jackson", - "character": "Sgt. Dan 'Hondo' Harrelson" - }, - { - "actor": "Jeremy Renner", - "character": "Brian Gamble" - } - ] - }, - { - "title": "Iron Man 2", - "overview": "With the world now aware of his dual life as the armored superhero Iron Man, billionaire inventor Tony Stark faces pressure from the government, the press and the public to share his technology with the military. Unwilling to let go of his invention, Stark, with Pepper Potts and James 'Rhodey' Rhodes at his side, must forge new alliances – and confront powerful enemies.", - "release_date": "2010-04-28", - "characters": [ - { - "actor": "Robert Downey Jr.", - "character": "Tony Stark / Iron Man" - }, - { - "actor": "Gwyneth Paltrow", - "character": "Virginia \"Pepper\" Potts" - }, - { - "actor": "Don Cheadle", - "character": "James \"Rhodey\" Rhodes / War Machine" - }, - { - "actor": "Scarlett Johansson", - "character": "Natalie Rushman / Natasha Romanoff / Black Widow" - }, - { - "actor": "Samuel L. Jackson", - "character": "Nick Fury" - }, - { - "actor": "Clark Gregg", - "character": "Phil Coulson" - } - ] - }, - { - "title": "Thor", - "overview": "Against his father Odin's will, The Mighty Thor - a powerful but arrogant warrior god - recklessly reignites an ancient war. Thor is cast down to Earth and forced to live among humans as punishment. Once here, Thor learns what it takes to be a true hero when the most dangerous villain of his world sends the darkest forces of Asgard to invade Earth.", - "release_date": "2011-04-21", - "characters": [ - { - "actor": "Chris Hemsworth", - "character": "Thor Odinson" - }, - { - "actor": "Clark Gregg", - "character": "Phil Coulson" - }, - { - "actor": "Jeremy Renner", - "character": "Clint Barton / Hawkeye (uncredited)" - }, - { - "actor": "Samuel L. Jackson", - "character": "Nick Fury (uncredited)" - } - ] - }, - { - "title": "View from the Top", - "overview": "A small-town woman tries to achieve her goal of becoming a flight attendant.", - "release_date": "2003-03-21", - "characters": [ - { - "actor": "Gwyneth Paltrow", - "character": "Donna" - }, - { - "actor": "Mark Ruffalo", - "character": "Ted Stewart" - } - ] - }, - { - "title": "The Nanny Diaries", - "overview": "A college graduate goes to work as a nanny for a rich New York family. Ensconced in their home, she has to juggle their dysfunction, a new romance, and the spoiled brat in her charge.", - "release_date": "2007-08-24", - "characters": [ - { - "actor": "Scarlett Johansson", - "character": "Annie Braddock" - }, - { - "actor": "Chris Evans", - "character": "Hayden \"Harvard Hottie\"" - } - ] - }, - { - "title": "The Perfect Score", - "overview": "Six high school seniors decide to break into the Princeton Testing Center so they can steal the answers to their upcoming SAT tests and all get perfect scores.", - "release_date": "2004-01-30", - "characters": [ - { - "actor": "Chris Evans", - "character": "Kyle" - }, - { - "actor": "Scarlett Johansson", - "character": "Francesca Curtis" - } - ] - }, - { - "title": "The Avengers", - "overview": "When an unexpected enemy emerges and threatens global safety and security, Nick Fury, director of the international peacekeeping agency known as S.H.I.E.L.D., finds himself in need of a team to pull the world back from the brink of disaster. Spanning the globe, a daring recruitment effort begins!", - "release_date": "2012-04-25", - "characters": [ - { - "actor": "Robert Downey Jr.", - "character": "Tony Stark / Iron Man" - }, - { - "actor": "Chris Evans", - "character": "Steve Rogers / Captain America" - }, - { - "actor": "Mark Ruffalo", - "character": "Bruce Banner / The Hulk" - }, - { - "actor": "Chris Hemsworth", - "character": "Thor Odinson" - }, - { - "actor": "Scarlett Johansson", - "character": "Natasha Romanoff / Black Widow" - }, - { - "actor": "Jeremy Renner", - "character": "Clint Barton / Hawkeye" - }, - { - "actor": "Samuel L. Jackson", - "character": "Nick Fury" - }, - { - "actor": "Clark Gregg", - "character": "Phil Coulson" - }, - { - "actor": "Gwyneth Paltrow", - "character": "Virginia \"Pepper\" Potts" - } - ] - }, - { - "title": "Iron Man 3", - "overview": "When Tony Stark's world is torn apart by a formidable terrorist called the Mandarin, he starts an odyssey of rebuilding and retribution.", - "release_date": "2013-04-18", - "characters": [ - { - "actor": "Robert Downey Jr.", - "character": "Tony Stark / Iron Man" - }, - { - "actor": "Gwyneth Paltrow", - "character": "Virginia \"Pepper\" Potts" - }, - { - "actor": "Don Cheadle", - "character": "James \"Rhodey\" Rhodes / Iron Patriot" - }, - { - "actor": "Mark Ruffalo", - "character": "Bruce Banner (uncredited)" - } - ] - }, - { - "title": "Marvel One-Shot: The Consultant", - "overview": "Agent Coulson informs Agent Sitwell that the World Security Council wishes Emil Blonsky to be released from prison to join the Avengers Initiative. As Nick Fury doesn't want to release Blonsky, the two agents decide to send a patsy to sabotage the meeting...", - "release_date": "2011-09-12", - "characters": [ - { - "actor": "Clark Gregg", - "character": "Phil Coulson" - }, - { - "actor": "Robert Downey Jr.", - "character": "Tony Stark (archive footage)" - } - ] - }, - { - "title": "Thor: The Dark World", - "overview": "Thor fights to restore order across the cosmos… but an ancient race led by the vengeful Malekith returns to plunge the universe back into darkness. Faced with an enemy that even Odin and Asgard cannot withstand, Thor must embark on his most perilous and personal journey yet, one that will reunite him with Jane Foster and force him to sacrifice everything to save us all.", - "release_date": "2013-10-29", - "characters": [ - { - "actor": "Chris Hemsworth", - "character": "Thor Odinson" - }, - { - "actor": "Chris Evans", - "character": "Loki as Captain America (uncredited)" - } - ] - }, - { - "title": "Avengers: Age of Ultron", - "overview": "When Tony Stark tries to jumpstart a dormant peacekeeping program, things go awry and Earth’s Mightiest Heroes are put to the ultimate test as the fate of the planet hangs in the balance. As the villainous Ultron emerges, it is up to The Avengers to stop him from enacting his terrible plans, and soon uneasy alliances and unexpected action pave the way for an epic and unique global adventure.", - "release_date": "2015-04-22", - "characters": [ - { - "actor": "Robert Downey Jr.", - "character": "Tony Stark / Iron Man" - }, - { - "actor": "Chris Evans", - "character": "Steve Rogers / Captain America" - }, - { - "actor": "Mark Ruffalo", - "character": "Bruce Banner / The Hulk" - }, - { - "actor": "Chris Hemsworth", - "character": "Thor Odinson" - }, - { - "actor": "Scarlett Johansson", - "character": "Natasha Romanoff / Black Widow" - }, - { - "actor": "Jeremy Renner", - "character": "Clint Barton / Hawkeye" - }, - { - "actor": "Samuel L. Jackson", - "character": "Nick Fury" - }, - { - "actor": "Don Cheadle", - "character": "James \"Rhodey\" Rhodes / War Machine" - } - ] - }, - { - "title": "Captain America: The Winter Soldier", - "overview": "After the cataclysmic events in New York with The Avengers, Steve Rogers, aka Captain America is living quietly in Washington, D.C. and trying to adjust to the modern world. But when a S.H.I.E.L.D. colleague comes under attack, Steve becomes embroiled in a web of intrigue that threatens to put the world at risk. Joining forces with the Black Widow, Captain America struggles to expose the ever-widening conspiracy while fighting off professional assassins sent to silence him at every turn. When the full scope of the villainous plot is revealed, Captain America and the Black Widow enlist the help of a new ally, the Falcon. However, they soon find themselves up against an unexpected and formidable enemy—the Winter Soldier.", - "release_date": "2014-03-20", - "characters": [ - { - "actor": "Chris Evans", - "character": "Steve Rogers / Captain America" - }, - { - "actor": "Samuel L. Jackson", - "character": "Nick Fury" - }, - { - "actor": "Scarlett Johansson", - "character": "Natasha Romanoff / Black Widow" - } - ] - }, - { - "title": "Thanks for Sharing", - "overview": "A romantic comedy that brings together three disparate characters who are learning to face a challenging and often confusing world as they struggle together against a common demon—sex addiction.", - "release_date": "2013-09-19", - "characters": [ - { - "actor": "Mark Ruffalo", - "character": "Adam" - }, - { - "actor": "Gwyneth Paltrow", - "character": "Phoebe" - } - ] - }, - { - "title": "Chef", - "overview": "When Chef Carl Casper suddenly quits his job at a prominent Los Angeles restaurant after refusing to compromise his creative integrity for its controlling owner, he is left to figure out what's next. Finding himself in Miami, he teams up with his ex-wife, his friend and his son to launch a food truck. Taking to the road, Chef Carl goes back to his roots to reignite his passion for the kitchen -- and zest for life and love.", - "release_date": "2014-05-08", - "characters": [ - { - "actor": "Scarlett Johansson", - "character": "Molly" - }, - { - "actor": "Robert Downey Jr.", - "character": "Marvin" - } - ] - }, - { - "title": "Marvel Studios: Assembling a Universe", - "overview": "A look at the story behind Marvel Studios and the Marvel Cinematic Universe, featuring interviews and behind-the-scenes footage from all of the Marvel films, the Marvel One-Shots and \"Marvel's Agents of S.H.I.E.L.D.\"", - "release_date": "2014-03-18", - "characters": [ - { - "actor": "Robert Downey Jr.", - "character": "Himself / Tony Stark / Iron Man" - }, - { - "actor": "Chris Hemsworth", - "character": "Himself / Thor" - }, - { - "actor": "Chris Evans", - "character": "Himself / Steve Rogers / Captain America" - }, - { - "actor": "Mark Ruffalo", - "character": "Himself / Bruce Banner / Hulk" - }, - { - "actor": "Gwyneth Paltrow", - "character": "Herself" - }, - { - "actor": "Clark Gregg", - "character": "Himself" - }, - { - "actor": "Samuel L. Jackson", - "character": "Himself" - }, - { - "actor": "Scarlett Johansson", - "character": "Herself" - }, - { - "actor": "Jeremy Renner", - "character": "Himself" - } - ] - }, - { - "title": "Captain America: Civil War", - "overview": "Following the events of Age of Ultron, the collective governments of the world pass an act designed to regulate all superhuman activity. This polarizes opinion amongst the Avengers, causing two factions to side with Iron Man or Captain America, which causes an epic battle between former allies.", - "release_date": "2016-04-27", - "characters": [ - { - "actor": "Chris Evans", - "character": "Steve Rogers / Captain America" - }, - { - "actor": "Robert Downey Jr.", - "character": "Tony Stark / Iron Man" - }, - { - "actor": "Scarlett Johansson", - "character": "Natasha Romanoff / Black Widow" - }, - { - "actor": "Don Cheadle", - "character": "James \"Rhodey\" Rhodes / War Machine" - }, - { - "actor": "Jeremy Renner", - "character": "Clint Barton / Hawkeye" - } - ] - }, - { - "title": "Thor: Ragnarok", - "overview": "Thor is imprisoned on the other side of the universe and finds himself in a race against time to get back to Asgard to stop Ragnarok, the destruction of his home-world and the end of Asgardian civilization, at the hands of an all-powerful new threat, the ruthless Hela.", - "release_date": "2017-10-25", - "characters": [ - { - "actor": "Chris Hemsworth", - "character": "Thor Odinson" - }, - { - "actor": "Mark Ruffalo", - "character": "Bruce Banner / Hulk" - }, - { - "actor": "Scarlett Johansson", - "character": "Natasha Romanoff / Black Widow (archive footage / uncredited)" - } - ] - }, - { - "title": "Avengers: Endgame", - "overview": "After the devastating events of Avengers: Infinity War, the universe is in ruins due to the efforts of the Mad Titan, Thanos. With the help of remaining allies, the Avengers must assemble once more in order to undo Thanos' actions and restore order to the universe once and for all, no matter what consequences may be in store.", - "release_date": "2019-04-24", - "characters": [ - { - "actor": "Robert Downey Jr.", - "character": "Tony Stark / Iron Man" - }, - { - "actor": "Chris Evans", - "character": "Steve Rogers / Captain America" - }, - { - "actor": "Mark Ruffalo", - "character": "Bruce Banner / Hulk" - }, - { - "actor": "Chris Hemsworth", - "character": "Thor Odinson" - }, - { - "actor": "Scarlett Johansson", - "character": "Natasha Romanoff / Black Widow" - }, - { - "actor": "Jeremy Renner", - "character": "Clint Barton / Hawkeye" - }, - { - "actor": "Don Cheadle", - "character": "James Rhodes / War Machine" - }, - { - "actor": "Gwyneth Paltrow", - "character": "Pepper Potts" - }, - { - "actor": "Samuel L. Jackson", - "character": "Nick Fury" - } - ] - }, - { - "title": "Avengers: Infinity War", - "overview": "As the Avengers and their allies have continued to protect the world from threats too large for any one hero to handle, a new danger has emerged from the cosmic shadows: Thanos. A despot of intergalactic infamy, his goal is to collect all six Infinity Stones, artifacts of unimaginable power, and use them to inflict his twisted will on all of reality. Everything the Avengers have fought for has led up to this moment - the fate of Earth and existence itself has never been more uncertain.", - "release_date": "2018-04-25", - "characters": [ - { - "actor": "Robert Downey Jr.", - "character": "Tony Stark / Iron Man" - }, - { - "actor": "Chris Hemsworth", - "character": "Thor Odinson" - }, - { - "actor": "Chris Evans", - "character": "Steve Rogers / Captain America" - }, - { - "actor": "Scarlett Johansson", - "character": "Natasha Romanoff / Black Widow" - }, - { - "actor": "Don Cheadle", - "character": "James \"Rhodey\" Rhodes / War Machine" - }, - { - "actor": "Gwyneth Paltrow", - "character": "Virginia \"Pepper\" Potts" - }, - { - "actor": "Samuel L. Jackson", - "character": "Nick Fury (uncredited)" - }, - { - "actor": "Mark Ruffalo", - "character": "Bruce Banner / The Hulk" - } - ] - }, - { - "title": "Captain Marvel", - "overview": "The story follows Carol Danvers as she becomes one of the universe’s most powerful heroes when Earth is caught in the middle of a galactic war between two alien races. Set in the 1990s, Captain Marvel is an all-new adventure from a previously unseen period in the history of the Marvel Cinematic Universe.", - "release_date": "2019-03-06", - "characters": [ - { - "actor": "Samuel L. Jackson", - "character": "Nick Fury" - }, - { - "actor": "Clark Gregg", - "character": "Agent Phil Coulson" - }, - { - "actor": "Chris Evans", - "character": "Steve Rogers / Captain America (uncredited)" - }, - { - "actor": "Scarlett Johansson", - "character": "Natasha Romanoff / Black Widow (uncredited)" - }, - { - "actor": "Don Cheadle", - "character": "James 'Rhodey' Rhodes / War Machine (uncredited)" - }, - { - "actor": "Mark Ruffalo", - "character": "Bruce Banner / The Hulk (uncredited)" - } - ] - }, - { - "title": "Spider-Man: Homecoming", - "overview": "Following the events of Captain America: Civil War, Peter Parker, with the help of his mentor Tony Stark, tries to balance his life as an ordinary high school student in Queens, New York City, with fighting crime as his superhero alter ego Spider-Man as a new threat, the Vulture, emerges.", - "release_date": "2017-07-05", - "characters": [ - { - "actor": "Robert Downey Jr.", - "character": "Tony Stark / Iron Man" - }, - { - "actor": "Gwyneth Paltrow", - "character": "Virginia \"Pepper\" Potts" - }, - { - "actor": "Chris Evans", - "character": "Steve Rogers / Captain America" - } - ] - }, - { - "title": "Team Thor", - "overview": "Discover what Thor was up to during the events of Captain America: Civil War.", - "release_date": "2016-08-28", - "characters": [ - { - "actor": "Chris Hemsworth", - "character": "Thor Odinson" - }, - { - "actor": "Mark Ruffalo", - "character": "Bruce Banner" - } - ] - }, - { - "title": "Black Widow", - "overview": "Natasha Romanoff, also known as Black Widow, confronts the darker parts of her ledger when a dangerous conspiracy with ties to her past arises. Pursued by a force that will stop at nothing to bring her down, Natasha must deal with her history as a spy and the broken relationships left in her wake long before she became an Avenger.", - "release_date": "2020-10-28", - "characters": [ - { - "actor": "Scarlett Johansson", - "character": "Natasha Romanoff / Black Widow" - }, - { - "actor": "Robert Downey Jr.", - "character": "Tony Stark / Iron Man" - } - ] - } + { + "title": "Out of Sight", + "overview": "Meet Jack Foley, a smooth criminal who bends the law and is determined to make one last heist. Karen Sisco is a federal marshal who chooses all the right moves … and all the wrong guys. Now they're willing to risk it all to find out if there's more between them than just the law.", + "release_date": "1998-06-26", + "characters": [ + { + "actor": "Don Cheadle", + "character": "Maurice Miller" + }, + { + "actor": "Samuel L. Jackson", + "character": "Hejira Henry (uncredited)" + } + ] + }, + { + "title": "Iron Man", + "overview": "After being held captive in an Afghan cave, billionaire engineer Tony Stark creates a unique weaponized suit of armor to fight evil.", + "release_date": "2008-04-30", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "Captain America: The First Avenger", + "overview": "During World War II, Steve Rogers is a sickly man from Brooklyn who's transformed into super-soldier Captain America to aid in the war effort. Rogers must stop the Red Skull – Adolf Hitler's ruthless head of weaponry, and the leader of an organization that intends to use a mysterious device of untold powers for world domination.", + "release_date": "2011-07-22", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "In Good Company", + "overview": "Dan Foreman is a seasoned advertisement sales executive at a high-ranking publication when a corporate takeover results in him being placed under naive supervisor Carter Duryea, who is half his age. Matters are made worse when Dan's new supervisor becomes romantically involved with his daughter an 18 year-old college student Alex.", + "release_date": "2004-12-29", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Alex Foreman" + }, + { + "actor": "Clark Gregg", + "character": "Mark Steckle" + } + ] + }, + { + "title": "Zodiac", + "overview": "The true story of the investigation of the \"Zodiac Killer\", a serial killer who terrified the San Francisco Bay Area, taunting police with his ciphers and letters. The case becomes an obsession for three men as their lives and careers are built and destroyed by the endless trail of clues.", + "release_date": "2007-03-02", + "characters": [ + { + "actor": "Mark Ruffalo", + "character": "Dave Toschi" + }, + { + "actor": "Robert Downey Jr.", + "character": "Paul Avery" + } + ] + }, + { + "title": "Hard Eight", + "overview": "A stranger mentors a young Reno gambler who weds a hooker and befriends a vulgar casino regular.", + "release_date": "1996-02-28", + "characters": [ + { + "actor": "Gwyneth Paltrow", + "character": "Clementine" + }, + { + "actor": "Samuel L. Jackson", + "character": "Jimmy" + } + ] + }, + { + "title": "The Spirit", + "overview": "Down these mean streets a man must come. A hero born, murdered, and born again. A Rookie cop named Denny Colt returns from the beyond as The Spirit, a hero whose mission is to fight against the bad forces from the shadows of Central City. The Octopus, who kills anyone unfortunate enough to see his face, has other plans; he is going to wipe out the entire city.", + "release_date": "2008-12-25", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Silken Floss" + }, + { + "actor": "Samuel L. Jackson", + "character": "Octopuss" + } + ] + }, + { + "title": "S.W.A.T.", + "overview": "Hondo Harrelson recruits Jim Street to join an elite unit of the Los Angeles Police Department. Together they seek out more members, including tough Deke Kay and single mom Chris Sanchez. The team's first big assignment is to escort crime boss Alex Montel to prison. It seems routine, but when Montel offers a huge reward to anyone who can break him free, criminals of various stripes step up for the prize.", + "release_date": "2003-08-08", + "characters": [ + { + "actor": "Samuel L. Jackson", + "character": "Sgt. Dan 'Hondo' Harrelson" + }, + { + "actor": "Jeremy Renner", + "character": "Brian Gamble" + } + ] + }, + { + "title": "Iron Man 2", + "overview": "With the world now aware of his dual life as the armored superhero Iron Man, billionaire inventor Tony Stark faces pressure from the government, the press and the public to share his technology with the military. Unwilling to let go of his invention, Stark, with Pepper Potts and James 'Rhodey' Rhodes at his side, must forge new alliances – and confront powerful enemies.", + "release_date": "2010-04-28", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Scarlett Johansson", + "character": "Natalie Rushman / Natasha Romanoff / Black Widow" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + } + ] + }, + { + "title": "Thor", + "overview": "Against his father Odin's will, The Mighty Thor - a powerful but arrogant warrior god - recklessly reignites an ancient war. Thor is cast down to Earth and forced to live among humans as punishment. Once here, Thor learns what it takes to be a true hero when the most dangerous villain of his world sends the darkest forces of Asgard to invade Earth.", + "release_date": "2011-04-21", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye (uncredited)" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + } + ] + }, + { + "title": "View from the Top", + "overview": "A small-town woman tries to achieve her goal of becoming a flight attendant.", + "release_date": "2003-03-21", + "characters": [ + { + "actor": "Gwyneth Paltrow", + "character": "Donna" + }, + { + "actor": "Mark Ruffalo", + "character": "Ted Stewart" + } + ] + }, + { + "title": "The Nanny Diaries", + "overview": "A college graduate goes to work as a nanny for a rich New York family. Ensconced in their home, she has to juggle their dysfunction, a new romance, and the spoiled brat in her charge.", + "release_date": "2007-08-24", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Annie Braddock" + }, + { + "actor": "Chris Evans", + "character": "Hayden \"Harvard Hottie\"" + } + ] + }, + { + "title": "The Perfect Score", + "overview": "Six high school seniors decide to break into the Princeton Testing Center so they can steal the answers to their upcoming SAT tests and all get perfect scores.", + "release_date": "2004-01-30", + "characters": [ + { + "actor": "Chris Evans", + "character": "Kyle" + }, + { + "actor": "Scarlett Johansson", + "character": "Francesca Curtis" + } + ] + }, + { + "title": "The Avengers", + "overview": "When an unexpected enemy emerges and threatens global safety and security, Nick Fury, director of the international peacekeeping agency known as S.H.I.E.L.D., finds himself in need of a team to pull the world back from the brink of disaster. Spanning the globe, a daring recruitment effort begins!", + "release_date": "2012-04-25", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + } + ] + }, + { + "title": "Iron Man 3", + "overview": "When Tony Stark's world is torn apart by a formidable terrorist called the Mandarin, he starts an odyssey of rebuilding and retribution.", + "release_date": "2013-04-18", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / Iron Patriot" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner (uncredited)" + } + ] + }, + { + "title": "Marvel One-Shot: The Consultant", + "overview": "Agent Coulson informs Agent Sitwell that the World Security Council wishes Emil Blonsky to be released from prison to join the Avengers Initiative. As Nick Fury doesn't want to release Blonsky, the two agents decide to send a patsy to sabotage the meeting...", + "release_date": "2011-09-12", + "characters": [ + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark (archive footage)" + } + ] + }, + { + "title": "Thor: The Dark World", + "overview": "Thor fights to restore order across the cosmos… but an ancient race led by the vengeful Malekith returns to plunge the universe back into darkness. Faced with an enemy that even Odin and Asgard cannot withstand, Thor must embark on his most perilous and personal journey yet, one that will reunite him with Jane Foster and force him to sacrifice everything to save us all.", + "release_date": "2013-10-29", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Chris Evans", + "character": "Loki as Captain America (uncredited)" + } + ] + }, + { + "title": "Avengers: Age of Ultron", + "overview": "When Tony Stark tries to jumpstart a dormant peacekeeping program, things go awry and Earth’s Mightiest Heroes are put to the ultimate test as the fate of the planet hangs in the balance. As the villainous Ultron emerges, it is up to The Avengers to stop him from enacting his terrible plans, and soon uneasy alliances and unexpected action pave the way for an epic and unique global adventure.", + "release_date": "2015-04-22", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + } + ] + }, + { + "title": "Captain America: The Winter Soldier", + "overview": "After the cataclysmic events in New York with The Avengers, Steve Rogers, aka Captain America is living quietly in Washington, D.C. and trying to adjust to the modern world. But when a S.H.I.E.L.D. colleague comes under attack, Steve becomes embroiled in a web of intrigue that threatens to put the world at risk. Joining forces with the Black Widow, Captain America struggles to expose the ever-widening conspiracy while fighting off professional assassins sent to silence him at every turn. When the full scope of the villainous plot is revealed, Captain America and the Black Widow enlist the help of a new ally, the Falcon. However, they soon find themselves up against an unexpected and formidable enemy—the Winter Soldier.", + "release_date": "2014-03-20", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + } + ] + }, + { + "title": "Thanks for Sharing", + "overview": "A romantic comedy that brings together three disparate characters who are learning to face a challenging and often confusing world as they struggle together against a common demon—sex addiction.", + "release_date": "2013-09-19", + "characters": [ + { + "actor": "Mark Ruffalo", + "character": "Adam" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Phoebe" + } + ] + }, + { + "title": "Chef", + "overview": "When Chef Carl Casper suddenly quits his job at a prominent Los Angeles restaurant after refusing to compromise his creative integrity for its controlling owner, he is left to figure out what's next. Finding himself in Miami, he teams up with his ex-wife, his friend and his son to launch a food truck. Taking to the road, Chef Carl goes back to his roots to reignite his passion for the kitchen -- and zest for life and love.", + "release_date": "2014-05-08", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Molly" + }, + { + "actor": "Robert Downey Jr.", + "character": "Marvin" + } + ] + }, + { + "title": "Marvel Studios: Assembling a Universe", + "overview": "A look at the story behind Marvel Studios and the Marvel Cinematic Universe, featuring interviews and behind-the-scenes footage from all of the Marvel films, the Marvel One-Shots and \"Marvel's Agents of S.H.I.E.L.D.\"", + "release_date": "2014-03-18", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Himself / Tony Stark / Iron Man" + }, + { + "actor": "Chris Hemsworth", + "character": "Himself / Thor" + }, + { + "actor": "Chris Evans", + "character": "Himself / Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Himself / Bruce Banner / Hulk" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Herself" + }, + { + "actor": "Clark Gregg", + "character": "Himself" + }, + { + "actor": "Samuel L. Jackson", + "character": "Himself" + }, + { + "actor": "Scarlett Johansson", + "character": "Herself" + }, + { + "actor": "Jeremy Renner", + "character": "Himself" + } + ] + }, + { + "title": "Captain America: Civil War", + "overview": "Following the events of Age of Ultron, the collective governments of the world pass an act designed to regulate all superhuman activity. This polarizes opinion amongst the Avengers, causing two factions to side with Iron Man or Captain America, which causes an epic battle between former allies.", + "release_date": "2016-04-27", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + } + ] + }, + { + "title": "Thor: Ragnarok", + "overview": "Thor is imprisoned on the other side of the universe and finds himself in a race against time to get back to Asgard to stop Ragnarok, the destruction of his home-world and the end of Asgardian civilization, at the hands of an all-powerful new threat, the ruthless Hela.", + "release_date": "2017-10-25", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / Hulk" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow (archive footage / uncredited)" + } + ] + }, + { + "title": "Avengers: Endgame", + "overview": "After the devastating events of Avengers: Infinity War, the universe is in ruins due to the efforts of the Mad Titan, Thanos. With the help of remaining allies, the Avengers must assemble once more in order to undo Thanos' actions and restore order to the universe once and for all, no matter what consequences may be in store.", + "release_date": "2019-04-24", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Don Cheadle", + "character": "James Rhodes / War Machine" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Pepper Potts" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "Avengers: Infinity War", + "overview": "As the Avengers and their allies have continued to protect the world from threats too large for any one hero to handle, a new danger has emerged from the cosmic shadows: Thanos. A despot of intergalactic infamy, his goal is to collect all six Infinity Stones, artifacts of unimaginable power, and use them to inflict his twisted will on all of reality. Everything the Avengers have fought for has led up to this moment - the fate of Earth and existence itself has never been more uncertain.", + "release_date": "2018-04-25", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + } + ] + }, + { + "title": "Captain Marvel", + "overview": "The story follows Carol Danvers as she becomes one of the universe’s most powerful heroes when Earth is caught in the middle of a galactic war between two alien races. Set in the 1990s, Captain Marvel is an all-new adventure from a previously unseen period in the history of the Marvel Cinematic Universe.", + "release_date": "2019-03-06", + "characters": [ + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Agent Phil Coulson" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America (uncredited)" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow (uncredited)" + }, + { + "actor": "Don Cheadle", + "character": "James 'Rhodey' Rhodes / War Machine (uncredited)" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk (uncredited)" + } + ] + }, + { + "title": "Spider-Man: Homecoming", + "overview": "Following the events of Captain America: Civil War, Peter Parker, with the help of his mentor Tony Stark, tries to balance his life as an ordinary high school student in Queens, New York City, with fighting crime as his superhero alter ego Spider-Man as a new threat, the Vulture, emerges.", + "release_date": "2017-07-05", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + } + ] + }, + { + "title": "Team Thor", + "overview": "Discover what Thor was up to during the events of Captain America: Civil War.", + "release_date": "2016-08-28", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner" + } + ] + }, + { + "title": "Black Widow", + "overview": "Natasha Romanoff, also known as Black Widow, confronts the darker parts of her ledger when a dangerous conspiracy with ties to her past arises. Pursued by a force that will stop at nothing to bring her down, Natasha must deal with her history as a spy and the broken relationships left in her wake long before she became an Avenger.", + "release_date": "2020-10-28", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + } + ] + } ] ``` diff --git a/docs/developers/operations-api/bulk-operations.md b/docs/developers/operations-api/bulk-operations.md index 4c8300b6..5caad1c2 100644 --- a/docs/developers/operations-api/bulk-operations.md +++ b/docs/developers/operations-api/bulk-operations.md @@ -1,201 +1,219 @@ # Bulk Operations ## Export Local + Exports data based on a given search operation to a local file in JSON or CSV format. -* operation _(required)_ - must always be `export_local` -* format _(required)_ - the format you wish to export the data, options are `json` & `csv` -* path _(required)_ - path local to the server to export the data -* search_operation _(required)_ - search_operation of `search_by_hash`, `search_by_value`, `search_by_conditions` or `sql` -* filename _(optional)_ - the name of the file where your export will be written to (do not include extension in filename). If one is not provided it will be autogenerated based on the epoch. +- operation _(required)_ - must always be `export_local` +- format _(required)_ - the format you wish to export the data, options are `json` & `csv` +- path _(required)_ - path local to the server to export the data +- search*operation *(required)\_ - search_operation of `search_by_hash`, `search_by_value`, `search_by_conditions` or `sql` +- filename _(optional)_ - the name of the file where your export will be written to (do not include extension in filename). If one is not provided it will be autogenerated based on the epoch. ### Body + ```json { - "operation": "export_local", - "format": "json", - "path": "/data/", - "search_operation": { - "operation": "sql", - "sql": "SELECT * FROM dev.breed" - } + "operation": "export_local", + "format": "json", + "path": "/data/", + "search_operation": { + "operation": "sql", + "sql": "SELECT * FROM dev.breed" + } } ``` ### Response: 200 + ```json { - "message": "Starting job with id 6fc18eaa-3504-4374-815c-44840a12e7e5" + "message": "Starting job with id 6fc18eaa-3504-4374-815c-44840a12e7e5" } ``` --- ## CSV Data Load + Ingests CSV data, provided directly in the operation as an `insert`, `update` or `upsert` into the specified database table. -* operation _(required)_ - must always be `csv_data_load` -* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` -* database _(optional)_ - name of the database where you are loading your data. The default is `data` -* table _(required)_ - name of the table where you are loading your data -* data _(required)_ - csv data to import into Harper +- operation _(required)_ - must always be `csv_data_load` +- action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +- database _(optional)_ - name of the database where you are loading your data. The default is `data` +- table _(required)_ - name of the table where you are loading your data +- data _(required)_ - csv data to import into Harper ### Body + ```json { - "operation": "csv_data_load", - "database": "dev", - "action": "insert", - "table": "breed", - "data": "id,name,section,country,image\n1,ENGLISH POINTER,British and Irish Pointers and Setters,GREAT BRITAIN,http://www.fci.be/Nomenclature/Illustrations/001g07.jpg\n2,ENGLISH SETTER,British and Irish Pointers and Setters,GREAT BRITAIN,http://www.fci.be/Nomenclature/Illustrations/002g07.jpg\n3,KERRY BLUE TERRIER,Large and medium sized Terriers,IRELAND,\n" + "operation": "csv_data_load", + "database": "dev", + "action": "insert", + "table": "breed", + "data": "id,name,section,country,image\n1,ENGLISH POINTER,British and Irish Pointers and Setters,GREAT BRITAIN,http://www.fci.be/Nomenclature/Illustrations/001g07.jpg\n2,ENGLISH SETTER,British and Irish Pointers and Setters,GREAT BRITAIN,http://www.fci.be/Nomenclature/Illustrations/002g07.jpg\n3,KERRY BLUE TERRIER,Large and medium sized Terriers,IRELAND,\n" } ``` ### Response: 200 + ```json - { - "message": "Starting job with id 2fe25039-566e-4670-8bb3-2db3d4e07e69", - "job_id": "2fe25039-566e-4670-8bb3-2db3d4e07e69" - } +{ + "message": "Starting job with id 2fe25039-566e-4670-8bb3-2db3d4e07e69", + "job_id": "2fe25039-566e-4670-8bb3-2db3d4e07e69" +} ``` --- ## CSV File Load -Ingests CSV data, provided via a path on the local filesystem, as an `insert`, `update` or `upsert` into the specified database table. + +Ingests CSV data, provided via a path on the local filesystem, as an `insert`, `update` or `upsert` into the specified database table. _Note: The CSV file must reside on the same machine on which Harper is running. For example, the path to a CSV on your computer will produce an error if your Harper instance is a cloud instance._ -* operation _(required)_ - must always be `csv_file_load` -* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` -* database _(optional)_ - name of the database where you are loading your data. The default is `data` -* table _(required)_ - name of the table where you are loading your data -* file_path _(required)_ - path to the csv file on the host running Harper +- operation _(required)_ - must always be `csv_file_load` +- action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +- database _(optional)_ - name of the database where you are loading your data. The default is `data` +- table _(required)_ - name of the table where you are loading your data +- file*path *(required)\_ - path to the csv file on the host running Harper ### Body + ```json { - "operation": "csv_file_load", - "action": "insert", - "database": "dev", - "table": "breed", - "file_path": "/home/user/imports/breeds.csv" + "operation": "csv_file_load", + "action": "insert", + "database": "dev", + "table": "breed", + "file_path": "/home/user/imports/breeds.csv" } ``` ### Response: 200 + ```json { - "message": "Starting job with id 3994d8e2-ec6a-43c4-8563-11c1df81870e", - "job_id": "3994d8e2-ec6a-43c4-8563-11c1df81870e" + "message": "Starting job with id 3994d8e2-ec6a-43c4-8563-11c1df81870e", + "job_id": "3994d8e2-ec6a-43c4-8563-11c1df81870e" } ``` --- ## CSV URL Load + Ingests CSV data, provided via URL, as an `insert`, `update` or `upsert` into the specified database table. -* operation _(required)_ - must always be `csv_url_load` -* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` -* database _(optional)_ - name of the database where you are loading your data. The default is `data` -* table _(required)_ - name of the table where you are loading your data -* csv_url _(required)_ - URL to the csv +- operation _(required)_ - must always be `csv_url_load` +- action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +- database _(optional)_ - name of the database where you are loading your data. The default is `data` +- table _(required)_ - name of the table where you are loading your data +- csv*url *(required)\_ - URL to the csv ### Body + ```json { - "operation": "csv_url_load", - "action": "insert", - "database": "dev", - "table": "breed", - "csv_url": "https://s3.amazonaws.com/complimentarydata/breeds.csv" + "operation": "csv_url_load", + "action": "insert", + "database": "dev", + "table": "breed", + "csv_url": "https://s3.amazonaws.com/complimentarydata/breeds.csv" } ``` ### Response: 200 + ```json { - "message": "Starting job with id 332aa0a2-6833-46cd-88a6-ae375920436a", - "job_id": "332aa0a2-6833-46cd-88a6-ae375920436a" + "message": "Starting job with id 332aa0a2-6833-46cd-88a6-ae375920436a", + "job_id": "332aa0a2-6833-46cd-88a6-ae375920436a" } ``` --- ## Export To S3 + Exports data based on a given search operation from table to AWS S3 in JSON or CSV format. -* operation _(required)_ - must always be `export_to_s3` -* format _(required)_ - the format you wish to export the data, options are `json` & `csv` -* s3 _(required)_ - details your access keys, bucket, bucket region and key for saving the data to S3 -* search_operation _(required)_ - search_operation of `search_by_hash`, `search_by_value`, `search_by_conditions` or `sql` +- operation _(required)_ - must always be `export_to_s3` +- format _(required)_ - the format you wish to export the data, options are `json` & `csv` +- s3 _(required)_ - details your access keys, bucket, bucket region and key for saving the data to S3 +- search*operation *(required)\_ - search_operation of `search_by_hash`, `search_by_value`, `search_by_conditions` or `sql` ### Body + ```json { - "operation": "export_to_s3", - "format": "json", - "s3": { - "aws_access_key_id": "YOUR_KEY", - "aws_secret_access_key": "YOUR_SECRET_KEY", - "bucket": "BUCKET_NAME", - "key": "OBJECT_NAME", - "region": "BUCKET_REGION" - }, - "search_operation": { - "operation": "sql", - "sql": "SELECT * FROM dev.dog" - } + "operation": "export_to_s3", + "format": "json", + "s3": { + "aws_access_key_id": "YOUR_KEY", + "aws_secret_access_key": "YOUR_SECRET_KEY", + "bucket": "BUCKET_NAME", + "key": "OBJECT_NAME", + "region": "BUCKET_REGION" + }, + "search_operation": { + "operation": "sql", + "sql": "SELECT * FROM dev.dog" + } } ``` ### Response: 200 + ```json { - "message": "Starting job with id 9fa85968-4cb1-4008-976e-506c4b13fc4a", - "job_id": "9fa85968-4cb1-4008-976e-506c4b13fc4a" + "message": "Starting job with id 9fa85968-4cb1-4008-976e-506c4b13fc4a", + "job_id": "9fa85968-4cb1-4008-976e-506c4b13fc4a" } ``` --- ## Import from S3 + This operation allows users to import CSV or JSON files from an AWS S3 bucket as an `insert`, `update` or `upsert`. -* operation _(required)_ - must always be `import_from_s3` -* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` -* database _(optional)_ - name of the database where you are loading your data. The default is `data` -* table _(required)_ - name of the table where you are loading your data -* s3 _(required)_ - object containing required AWS S3 bucket info for operation: - * aws_access_key_id - AWS access key for authenticating into your S3 bucket - * aws_secret_access_key - AWS secret for authenticating into your S3 bucket - * bucket - AWS S3 bucket to import from - * key - the name of the file to import - _the file must include a valid file extension ('.csv' or '.json')_ - * region - the region of the bucket +- operation _(required)_ - must always be `import_from_s3` +- action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +- database _(optional)_ - name of the database where you are loading your data. The default is `data` +- table _(required)_ - name of the table where you are loading your data +- s3 _(required)_ - object containing required AWS S3 bucket info for operation: + - aws_access_key_id - AWS access key for authenticating into your S3 bucket + - aws_secret_access_key - AWS secret for authenticating into your S3 bucket + - bucket - AWS S3 bucket to import from + - key - the name of the file to import - _the file must include a valid file extension ('.csv' or '.json')_ + - region - the region of the bucket ### Body + ```json { - "operation": "import_from_s3", - "action": "insert", - "database": "dev", - "table": "dog", - "s3": { - "aws_access_key_id": "YOUR_KEY", - "aws_secret_access_key": "YOUR_SECRET_KEY", - "bucket": "BUCKET_NAME", - "key": "OBJECT_NAME", - "region": "BUCKET_REGION" - } + "operation": "import_from_s3", + "action": "insert", + "database": "dev", + "table": "dog", + "s3": { + "aws_access_key_id": "YOUR_KEY", + "aws_secret_access_key": "YOUR_SECRET_KEY", + "bucket": "BUCKET_NAME", + "key": "OBJECT_NAME", + "region": "BUCKET_REGION" + } } ``` ### Response: 200 + ```json { - "message": "Starting job with id 062a1892-6a0a-4282-9791-0f4c93b12e16", - "job_id": "062a1892-6a0a-4282-9791-0f4c93b12e16" + "message": "Starting job with id 062a1892-6a0a-4282-9791-0f4c93b12e16", + "job_id": "062a1892-6a0a-4282-9791-0f4c93b12e16" } ``` @@ -207,25 +225,27 @@ Delete data before the specified timestamp on the specified database table exclu _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `delete_records_before` -* date _(required)_ - records older than this date will be deleted. Supported format looks like: `YYYY-MM-DDThh:mm:ss.sZ` -* schema _(required)_ - name of the schema where you are deleting your data -* table _(required)_ - name of the table where you are deleting your data +- operation _(required)_ - must always be `delete_records_before` +- date _(required)_ - records older than this date will be deleted. Supported format looks like: `YYYY-MM-DDThh:mm:ss.sZ` +- schema _(required)_ - name of the schema where you are deleting your data +- table _(required)_ - name of the table where you are deleting your data ### Body + ```json { - "operation": "delete_records_before", - "date": "2021-01-25T23:05:27.464", - "schema": "dev", - "table": "breed" + "operation": "delete_records_before", + "date": "2021-01-25T23:05:27.464", + "schema": "dev", + "table": "breed" } ``` ### Response: 200 + ```json { - "message": "Starting job with id d3aed926-e9fe-4ec1-aea7-0fb4451bd373", - "job_id": "d3aed926-e9fe-4ec1-aea7-0fb4451bd373" + "message": "Starting job with id d3aed926-e9fe-4ec1-aea7-0fb4451bd373", + "job_id": "d3aed926-e9fe-4ec1-aea7-0fb4451bd373" } -``` \ No newline at end of file +``` diff --git a/docs/developers/operations-api/certificate-management.md b/docs/developers/operations-api/certificate-management.md index 8bf820f2..219d0b26 100644 --- a/docs/developers/operations-api/certificate-management.md +++ b/docs/developers/operations-api/certificate-management.md @@ -2,34 +2,36 @@ ## Add Certificate -Adds or updates a certificate in the `hdb_certificate` system table. -If a `private_key` is provided it will __not__ be stored in `hdb_certificate`, it will be written to file in `/keys/`. +Adds or updates a certificate in the `hdb_certificate` system table. +If a `private_key` is provided it will **not** be stored in `hdb_certificate`, it will be written to file in `/keys/`. If a `private_key` is not passed the operation will search for one that matches the certificate. If one is not found an error will be returned. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `add_certificate` -* name _(required)_ - a unique name for the certificate -* certificate _(required)_ - a PEM formatted certificate string -* is_authority _(required)_ - a boolean indicating if the certificate is a certificate authority -* hosts _(optional)_ - an array of hostnames that the certificate is valid for -* private_key _(optional)_ - a PEM formatted private key string +- operation _(required)_ - must always be `add_certificate` +- name _(required)_ - a unique name for the certificate +- certificate _(required)_ - a PEM formatted certificate string +- is*authority *(required)\_ - a boolean indicating if the certificate is a certificate authority +- hosts _(optional)_ - an array of hostnames that the certificate is valid for +- private*key *(optional)\_ - a PEM formatted private key string ### Body + ```json { - "operation": "add_certificate", - "name": "my-cert", - "certificate": "-----BEGIN CERTIFICATE-----ZDFAay... -----END CERTIFICATE-----", - "is_authority": false, - "private_key": "-----BEGIN RSA PRIVATE KEY-----Y4dMpw5f... -----END RSA PRIVATE KEY-----" + "operation": "add_certificate", + "name": "my-cert", + "certificate": "-----BEGIN CERTIFICATE-----ZDFAay... -----END CERTIFICATE-----", + "is_authority": false, + "private_key": "-----BEGIN RSA PRIVATE KEY-----Y4dMpw5f... -----END RSA PRIVATE KEY-----" } ``` ### Response: 200 + ```json { - "message": "Successfully added certificate: my-cert" + "message": "Successfully added certificate: my-cert" } ``` @@ -41,21 +43,23 @@ Removes a certificate from the `hdb_certificate` system table and deletes the co _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `remove_certificate` -* name _(required)_ - the name of the certificate +- operation _(required)_ - must always be `remove_certificate` +- name _(required)_ - the name of the certificate ### Body + ```json { - "operation": "remove_certificate", - "name": "my-cert" + "operation": "remove_certificate", + "name": "my-cert" } ``` ### Response: 200 + ```json { - "message": "Successfully removed my-cert" + "message": "Successfully removed my-cert" } ``` @@ -67,54 +71,50 @@ Lists all certificates in the `hdb_certificate` system table. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `list_certificates` +- operation _(required)_ - must always be `list_certificates` ### Body + ```json { - "operation": "list_certificates" + "operation": "list_certificates" } ``` ### Response: 200 + ```json [ - { - "name": "HarperDB-Certificate-Authority-node1", - "certificate": "-----BEGIN CERTIFICATE-----\r\nTANBgkqhk... S34==\r\n-----END CERTIFICATE-----\r\n", - "private_key_name": "privateKey.pem", - "is_authority": true, - "details": { - "issuer": "CN=HarperDB-Certificate-Authority-node1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", - "subject": "CN=HarperDB-Certificate-Authority-node1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", - "serial_number": "5235345", - "valid_from": "Aug 27 15:00:00 2024 GMT", - "valid_to": "Aug 25 15:00:00 2034 GMT" - }, - "is_self_signed": true, - "uses": [ - "https", - "wss" - ] - }, - { - "name": "node1", - "certificate": "-----BEGIN CERTIFICATE-----\r\ngIEcSR1M... 5bv==\r\n-----END CERTIFICATE-----\r\n", - "private_key_name": "privateKey.pem", - "is_authority": false, - "details": { - "issuer": "CN=HarperDB-Certificate-Authority-node1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", - "subject": "CN=node.1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", - "subject_alt_name": "IP Address:127.0.0.1, DNS:localhost, IP Address:0:0:0:0:0:0:0:1, DNS:node.1", - "serial_number": "5243646", - "valid_from": "Aug 27 15:00:00 2024 GMT", - "valid_to": "Aug 25 15:00:00 2034 GMT" - }, - "is_self_signed": true, - "uses": [ - "https", - "wss" - ] - } + { + "name": "HarperDB-Certificate-Authority-node1", + "certificate": "-----BEGIN CERTIFICATE-----\r\nTANBgkqhk... S34==\r\n-----END CERTIFICATE-----\r\n", + "private_key_name": "privateKey.pem", + "is_authority": true, + "details": { + "issuer": "CN=HarperDB-Certificate-Authority-node1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "subject": "CN=HarperDB-Certificate-Authority-node1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "serial_number": "5235345", + "valid_from": "Aug 27 15:00:00 2024 GMT", + "valid_to": "Aug 25 15:00:00 2034 GMT" + }, + "is_self_signed": true, + "uses": ["https", "wss"] + }, + { + "name": "node1", + "certificate": "-----BEGIN CERTIFICATE-----\r\ngIEcSR1M... 5bv==\r\n-----END CERTIFICATE-----\r\n", + "private_key_name": "privateKey.pem", + "is_authority": false, + "details": { + "issuer": "CN=HarperDB-Certificate-Authority-node1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "subject": "CN=node.1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "subject_alt_name": "IP Address:127.0.0.1, DNS:localhost, IP Address:0:0:0:0:0:0:0:1, DNS:node.1", + "serial_number": "5243646", + "valid_from": "Aug 27 15:00:00 2024 GMT", + "valid_to": "Aug 25 15:00:00 2034 GMT" + }, + "is_self_signed": true, + "uses": ["https", "wss"] + } ] -``` \ No newline at end of file +``` diff --git a/docs/developers/operations-api/clustering-nats.md b/docs/developers/operations-api/clustering-nats.md index cf3e2ff4..8c7fa2e4 100644 --- a/docs/developers/operations-api/clustering-nats.md +++ b/docs/developers/operations-api/clustering-nats.md @@ -1,425 +1,453 @@ # Clustering using NATS ## Cluster Set Routes + Adds a route/routes to either the hub or leaf server cluster configuration. This operation behaves as a PATCH/upsert, meaning it will add new routes to the configuration while leaving existing routes untouched. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `cluster_set_routes` -* server _(required)_ - must always be `hub` or `leaf`, in most cases you should use `hub` here -* routes _(required)_ - must always be an objects array with a host and port: - * host - the host of the remote instance you are clustering to - * port - the clustering port of the remote instance you are clustering to, in most cases this is the value in `clustering.hubServer.cluster.network.port` on the remote instance `harperdb-config.yaml` +- operation _(required)_ - must always be `cluster_set_routes` +- server _(required)_ - must always be `hub` or `leaf`, in most cases you should use `hub` here +- routes _(required)_ - must always be an objects array with a host and port: + - host - the host of the remote instance you are clustering to + - port - the clustering port of the remote instance you are clustering to, in most cases this is the value in `clustering.hubServer.cluster.network.port` on the remote instance `harperdb-config.yaml` ### Body + ```json { - "operation": "cluster_set_routes", - "server": "hub", - "routes": [ - { - "host": "3.22.181.22", - "port": 12345 - }, - { - "host": "3.137.184.8", - "port": 12345 - }, - { - "host": "18.223.239.195", - "port": 12345 - }, - { - "host": "18.116.24.71", - "port": 12345 - } - ] + "operation": "cluster_set_routes", + "server": "hub", + "routes": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ] } ``` ### Response: 200 + ```json { - "message": "cluster routes successfully set", - "set": [ - { - "host": "3.22.181.22", - "port": 12345 - }, - { - "host": "3.137.184.8", - "port": 12345 - }, - { - "host": "18.223.239.195", - "port": 12345 - }, - { - "host": "18.116.24.71", - "port": 12345 - } - ], - "skipped": [] + "message": "cluster routes successfully set", + "set": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "skipped": [] } ``` --- ## Cluster Get Routes + Gets all the hub and leaf server routes from the config file. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `cluster_get_routes` +- operation _(required)_ - must always be `cluster_get_routes` ### Body + ```json { - "operation": "cluster_get_routes" + "operation": "cluster_get_routes" } ``` ### Response: 200 + ```json { - "hub": [ - { - "host": "3.22.181.22", - "port": 12345 - }, - { - "host": "3.137.184.8", - "port": 12345 - }, - { - "host": "18.223.239.195", - "port": 12345 - }, - { - "host": "18.116.24.71", - "port": 12345 - } - ], - "leaf": [] + "hub": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "leaf": [] } ``` --- ## Cluster Delete Routes + Removes route(s) from hub and/or leaf server routes array in config file. Returns a deletion success message and arrays of deleted and skipped records. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `cluster_delete_routes` -* routes _required_ - Must be an array of route object(s) +- operation _(required)_ - must always be `cluster_delete_routes` +- routes _required_ - Must be an array of route object(s) ### Body ```json { - "operation": "cluster_delete_routes", - "routes": [ - { - "host": "18.116.24.71", - "port": 12345 - } - ] + "operation": "cluster_delete_routes", + "routes": [ + { + "host": "18.116.24.71", + "port": 12345 + } + ] } ``` ### Response: 200 + ```json { - "message": "cluster routes successfully deleted", - "deleted": [ - { - "host": "18.116.24.71", - "port": 12345 - } - ], - "skipped": [] + "message": "cluster routes successfully deleted", + "deleted": [ + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "skipped": [] } ``` - --- ## Add Node + Registers an additional Harper instance with associated subscriptions. Learn more about [Harper clustering here](../clustering/README.md). _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `add_node` -* node_name _(required)_ - the node name of the remote node -* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `schema`, `table`, `subscribe` and `publish`: - * schema - the schema to replicate from - * table - the table to replicate from - * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table - * publish - a boolean which determines if transactions on the local table should be replicated on the remote table - * start_time _(optional)_ - How far back to go to get transactions from node being added. Must be in UTC YYYY-MM-DDTHH:mm:ss.sssZ format - +- operation _(required)_ - must always be `add_node` +- node*name *(required)\_ - the node name of the remote node +- subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `schema`, `table`, `subscribe` and `publish`: + - schema - the schema to replicate from + - table - the table to replicate from + - subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + - publish - a boolean which determines if transactions on the local table should be replicated on the remote table + - start*time *(optional)\_ - How far back to go to get transactions from node being added. Must be in UTC YYYY-MM-DDTHH:mm:ss.sssZ format + ### Body + ```json { - "operation": "add_node", - "node_name": "ec2-3-22-181-22", - "subscriptions": [ - { - "schema": "dev", - "table": "dog", - "subscribe": false, - "publish": true, - "start_time": "2022-09-02T20:06:35.993Z" - } - ] + "operation": "add_node", + "node_name": "ec2-3-22-181-22", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": false, + "publish": true, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] } ``` ### Response: 200 + ```json { - "message": "Successfully added 'ec2-3-22-181-22' to manifest" + "message": "Successfully added 'ec2-3-22-181-22' to manifest" } ``` --- ## Update Node -Modifies an existing Harper instance registration and associated subscriptions. This operation behaves as a PATCH/upsert, meaning it will insert or update the specified replication configurations while leaving other table replication configuration untouched. Learn more about [Harper clustering here](../clustering/README.md). + +Modifies an existing Harper instance registration and associated subscriptions. This operation behaves as a PATCH/upsert, meaning it will insert or update the specified replication configurations while leaving other table replication configuration untouched. Learn more about [Harper clustering here](../clustering/README.md). _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `update_node` -* node_name _(required)_ - the node name of the remote node you are updating -* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `schema`, `table`, `subscribe` and `publish`: - * schema - the schema to replicate from - * table - the table to replicate from - * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table - * publish - a boolean which determines if transactions on the local table should be replicated on the remote table - * start_time _(optional)_ - How far back to go to get transactions from node being added. Must be in UTC YYYY-MM-DDTHH:mm:ss.sssZ format +- operation _(required)_ - must always be `update_node` +- node*name *(required)\_ - the node name of the remote node you are updating +- subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `schema`, `table`, `subscribe` and `publish`: + - schema - the schema to replicate from + - table - the table to replicate from + - subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + - publish - a boolean which determines if transactions on the local table should be replicated on the remote table + - start*time *(optional)\_ - How far back to go to get transactions from node being added. Must be in UTC YYYY-MM-DDTHH:mm:ss.sssZ format ### Body + ```json { - "operation": "update_node", - "node_name": "ec2-18-223-239-195", - "subscriptions": [ - { - "schema": "dev", - "table": "dog", - "subscribe": true, - "publish": false, - "start_time": "2022-09-02T20:06:35.993Z" - } - ] + "operation": "update_node", + "node_name": "ec2-18-223-239-195", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": false, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] } ``` ### Response: 200 + ```json { - "message": "Successfully updated 'ec2-3-22-181-22'" + "message": "Successfully updated 'ec2-3-22-181-22'" } ``` --- ## Set Node Replication + A more adeptly named alias for add and update node. This operation behaves as a PATCH/upsert, meaning it will insert or update the specified replication configurations while leaving other table replication configuration untouched. The `database` (aka `schema`) parameter is optional, it will default to `data`. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `set_node_replication` -* node_name _(required)_ - the node name of the remote node you are updating -* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and `table`, `subscribe` and `publish`: - * database *(optional)* - the database to replicate from - * table *(required)* - the table to replicate from - * subscribe *(required)* - a boolean which determines if transactions on the remote table should be replicated on the local table - * publish *(required)* - a boolean which determines if transactions on the local table should be replicated on the remote table -* +- operation _(required)_ - must always be `set_node_replication` +- node*name *(required)\_ - the node name of the remote node you are updating +- subscriptions _(required)_ - The relationship created between nodes. Must be an object array and `table`, `subscribe` and `publish`: + - database _(optional)_ - the database to replicate from + - table _(required)_ - the table to replicate from + - subscribe _(required)_ - a boolean which determines if transactions on the remote table should be replicated on the local table + - publish _(required)_ - a boolean which determines if transactions on the local table should be replicated on the remote table +- + ### Body + ```json { - "operation": "set_node_replication", - "node_name": "node1", - "subscriptions": [ - { - "table": "dog", - "subscribe": true, - "publish": true - } - ] + "operation": "set_node_replication", + "node_name": "node1", + "subscriptions": [ + { + "table": "dog", + "subscribe": true, + "publish": true + } + ] } ``` + ### Response: 200 + ```json { - "message": "Successfully updated 'ec2-3-22-181-22'" + "message": "Successfully updated 'ec2-3-22-181-22'" } ``` --- ## Cluster Status + Returns an array of status objects from a cluster. A status object will contain the clustering node name, whether or not clustering is enabled, and a list of possible connections. Learn more about [Harper clustering here](../clustering/README.md). _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `cluster_status` +- operation _(required)_ - must always be `cluster_status` ### Body + ```json { - "operation": "cluster_status" + "operation": "cluster_status" } ``` ### Response: 200 + ```json { - "node_name": "ec2-18-221-143-69", - "is_enabled": true, - "connections": [ - { - "node_name": "ec2-3-22-181-22", - "status": "open", - "ports": { - "clustering": 12345, - "operations_api": 9925 - }, - "latency_ms": 13, - "uptime": "30d 1h 18m 8s", - "subscriptions": [ - { - "schema": "dev", - "table": "dog", - "publish": true, - "subscribe": true - } - ] - } - ] + "node_name": "ec2-18-221-143-69", + "is_enabled": true, + "connections": [ + { + "node_name": "ec2-3-22-181-22", + "status": "open", + "ports": { + "clustering": 12345, + "operations_api": 9925 + }, + "latency_ms": 13, + "uptime": "30d 1h 18m 8s", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ] + } + ] } ``` - --- ## Cluster Network + Returns an object array of enmeshed nodes. Each node object will contain the name of the node, the amount of time (in milliseconds) it took for it to respond, the names of the nodes it is enmeshed with and the routes set in its config file. Learn more about [Harper clustering here](../clustering/README.md). _Operation is restricted to super_user roles only_ -* operation _(required)_- must always be `cluster_network` -* timeout (_optional_) - the amount of time in milliseconds to wait for a response from the network. Must be a number -* connected_nodes (_optional_) - omit `connected_nodes` from the response. Must be a boolean. Defaults to `false` -* routes (_optional_) - omit `routes` from the response. Must be a boolean. Defaults to `false` +- operation _(required)_- must always be `cluster_network` +- timeout (_optional_) - the amount of time in milliseconds to wait for a response from the network. Must be a number +- connected*nodes (\_optional*) - omit `connected_nodes` from the response. Must be a boolean. Defaults to `false` +- routes (_optional_) - omit `routes` from the response. Must be a boolean. Defaults to `false` ### Body ```json { - "operation": "cluster_network" + "operation": "cluster_network" } ``` ### Response: 200 + ```json { - "nodes": [ - { - "name": "local_node", - "response_time": 4, - "connected_nodes": ["ec2-3-142-255-78"], - "routes": [ - { - "host": "3.142.255.78", - "port": 9932 - } - ] - }, - { - "name": "ec2-3-142-255-78", - "response_time": 57, - "connected_nodes": ["ec2-3-12-153-124", "ec2-3-139-236-138", "local_node"], - "routes": [] - } - ] + "nodes": [ + { + "name": "local_node", + "response_time": 4, + "connected_nodes": ["ec2-3-142-255-78"], + "routes": [ + { + "host": "3.142.255.78", + "port": 9932 + } + ] + }, + { + "name": "ec2-3-142-255-78", + "response_time": 57, + "connected_nodes": ["ec2-3-12-153-124", "ec2-3-139-236-138", "local_node"], + "routes": [] + } + ] } ``` --- ## Remove Node + Removes a Harper instance and associated subscriptions from the cluster. Learn more about [Harper clustering here](../clustering/README.md). _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `remove_node` -* name _(required)_ - The name of the node you are de-registering +- operation _(required)_ - must always be `remove_node` +- name _(required)_ - The name of the node you are de-registering ### Body + ```json { - "operation": "remove_node", - "node_name": "ec2-3-22-181-22" + "operation": "remove_node", + "node_name": "ec2-3-22-181-22" } ``` ### Response: 200 + ```json { - "message": "Successfully removed 'ec2-3-22-181-22' from manifest" + "message": "Successfully removed 'ec2-3-22-181-22' from manifest" } ``` --- ## Configure Cluster + Bulk create/remove subscriptions for any number of remote nodes. Resets and replaces any existing clustering setup. Learn more about [Harper clustering here](../clustering/README.md). _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `configure_cluster` -* connections _(required)_ - must be an object array with each object containing `node_name` and `subscriptions` for that node +- operation _(required)_ - must always be `configure_cluster` +- connections _(required)_ - must be an object array with each object containing `node_name` and `subscriptions` for that node ### Body + ```json { - "operation": "configure_cluster", - "connections": [ - { - "node_name": "ec2-3-137-184-8", - "subscriptions": [ - { - "schema": "dev", - "table": "dog", - "subscribe": true, - "publish": false - } - ] - }, - { - "node_name": "ec2-18-223-239-195", - "subscriptions": [ - { - "schema": "dev", - "table": "dog", - "subscribe": true, - "publish": true - } - ] - } - ] + "operation": "configure_cluster", + "connections": [ + { + "node_name": "ec2-3-137-184-8", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": false + } + ] + }, + { + "node_name": "ec2-18-223-239-195", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": true + } + ] + } + ] } ``` ### Response: 200 + ```json { - "message": "Cluster successfully configured." + "message": "Cluster successfully configured." } ``` @@ -431,23 +459,24 @@ Will purge messages from a stream _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `purge_stream` -* database _(required)_ - the name of the database where the streams table resides -* table _(required)_ - the name of the table that belongs to the stream -* options _(optional)_ - control how many messages get purged. Options are: - * `keep` - purge will keep this many most recent messages - * `seq` - purge all messages up to, but not including, this sequence +- operation _(required)_ - must always be `purge_stream` +- database _(required)_ - the name of the database where the streams table resides +- table _(required)_ - the name of the table that belongs to the stream +- options _(optional)_ - control how many messages get purged. Options are: + - `keep` - purge will keep this many most recent messages + - `seq` - purge all messages up to, but not including, this sequence ### Body + ```json { - "operation": "purge_stream", - "database": "dev", - "table": "dog", - "options": { - "keep": 100 - } + "operation": "purge_stream", + "database": "dev", + "table": "dog", + "options": { + "keep": 100 + } } ``` ---- \ No newline at end of file +--- diff --git a/docs/developers/operations-api/clustering.md b/docs/developers/operations-api/clustering.md index 8b79e55b..5b78e2d1 100644 --- a/docs/developers/operations-api/clustering.md +++ b/docs/developers/operations-api/clustering.md @@ -2,39 +2,38 @@ The following operations are available for configuring and managing [Harper replication](../replication/).\ - _**If you are using NATS for clustering, please see the**_ [_**NATS Clustering Operations**_](clustering-nats.md) _**documentation.**_ ## Add Node Adds a new Harper instance to the cluster. If `subscriptions` are provided, it will also create the replication relationships between the nodes. If they are not provided a fully replicating system will be created. [Learn more about adding nodes here](../replication/). -_Operation is restricted to super\_user roles only_ - -* operation _(required)_ - must always be `add_node` -* hostname or url _(required)_ - one of these fields is required. You must provide either the `hostname` or the `url` of the node you want to add -* verify\_tls _(optional)_ - a boolean which determines if the TLS certificate should be verified. This will allow the Harper default self-signed certificates to be accepted. Defaults to `true` -* authorization _(optional)_ - an object or a string which contains the authorization information for the node being added. If it is an object, it should contain `username` and `password` fields. If it is a string, it should use HTTP `Authorization` style credentials -* retain_authorization _(optional)_ - a boolean which determines if the authorization credentials should be retained/stored and used everytime a connection is made to this node. If `true`, the authorization will be stored on the node record. Generally this should not be used, as mTLS/certificate based authorization is much more secure and safe, and avoids the need for storing credentials. Defaults to `false`. -* revoked_certificates _(optional)_ - an array of revoked certificates serial numbers. If a certificate is revoked, it will not be accepted for any connections. -* shard _(optional)_ - a number which can be used to indicate which shard this node belongs to. This is only needed if you are using sharding. -* subscriptions _(optional)_ - The relationship created between nodes. If not provided a fully replicated cluster will be setup. Must be an object array and include `database`, `table`, `subscribe` and `publish`: - * database - the database to replicate - * table - the table to replicate - * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table - * publish - a boolean which determines if transactions on the local table should be replicated on the remote table +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `add_node` +- hostname or url _(required)_ - one of these fields is required. You must provide either the `hostname` or the `url` of the node you want to add +- verify_tls _(optional)_ - a boolean which determines if the TLS certificate should be verified. This will allow the Harper default self-signed certificates to be accepted. Defaults to `true` +- authorization _(optional)_ - an object or a string which contains the authorization information for the node being added. If it is an object, it should contain `username` and `password` fields. If it is a string, it should use HTTP `Authorization` style credentials +- retain*authorization *(optional)\_ - a boolean which determines if the authorization credentials should be retained/stored and used everytime a connection is made to this node. If `true`, the authorization will be stored on the node record. Generally this should not be used, as mTLS/certificate based authorization is much more secure and safe, and avoids the need for storing credentials. Defaults to `false`. +- revoked*certificates *(optional)\_ - an array of revoked certificates serial numbers. If a certificate is revoked, it will not be accepted for any connections. +- shard _(optional)_ - a number which can be used to indicate which shard this node belongs to. This is only needed if you are using sharding. +- subscriptions _(optional)_ - The relationship created between nodes. If not provided a fully replicated cluster will be setup. Must be an object array and include `database`, `table`, `subscribe` and `publish`: + - database - the database to replicate + - table - the table to replicate + - subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + - publish - a boolean which determines if transactions on the local table should be replicated on the remote table ### Body ```json { - "operation": "add_node", - "hostname": "server-two", - "verify_tls": false, - "authorization": { - "username": "admin", - "password": "password" - } + "operation": "add_node", + "hostname": "server-two", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password" + } } ``` @@ -42,44 +41,44 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "Successfully added 'server-two' to cluster" + "message": "Successfully added 'server-two' to cluster" } ``` -*** +--- ## Update Node Modifies an existing Harper instance in the cluster. -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ _Note: will attempt to add the node if it does not exist_ -* operation _(required)_ - must always be `update_node` -* hostname _(required)_ - the `hostname` of the remote node you are updating -* revoked_certificates _(optional)_ - an array of revoked certificates serial numbers. If a certificate is revoked, it will not be accepted for any connections. -* shard _(optional)_ - a number which can be used to indicate which shard this node belongs to. This is only needed if you are using sharding. -* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `database`, `table`, `subscribe` and `publish`: - * database - the database to replicate from - * table - the table to replicate from - * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table - * publish - a boolean which determines if transactions on the local table should be replicated on the remote table +- operation _(required)_ - must always be `update_node` +- hostname _(required)_ - the `hostname` of the remote node you are updating +- revoked*certificates *(optional)\_ - an array of revoked certificates serial numbers. If a certificate is revoked, it will not be accepted for any connections. +- shard _(optional)_ - a number which can be used to indicate which shard this node belongs to. This is only needed if you are using sharding. +- subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `database`, `table`, `subscribe` and `publish`: + - database - the database to replicate from + - table - the table to replicate from + - subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + - publish - a boolean which determines if transactions on the local table should be replicated on the remote table ### Body ```json { - "operation": "update_node", - "hostname": "server-two", - "subscriptions": [ - { - "database": "dev", - "table": "my-table", - "subscribe": true, - "publish": true - } - ] + "operation": "update_node", + "hostname": "server-two", + "subscriptions": [ + { + "database": "dev", + "table": "my-table", + "subscribe": true, + "publish": true + } + ] } ``` @@ -87,27 +86,27 @@ _Note: will attempt to add the node if it does not exist_ ```json { - "message": "Successfully updated 'server-two'" + "message": "Successfully updated 'server-two'" } ``` -*** +--- ## Remove Node Removes a Harper node from the cluster and stops replication, [Learn more about remove node here](../replication/). -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `remove_node` -* name _(required)_ - The name of the node you are removing +- operation _(required)_ - must always be `remove_node` +- name _(required)_ - The name of the node you are removing ### Body ```json { - "operation": "remove_node", - "hostname": "server-two" + "operation": "remove_node", + "hostname": "server-two" } ``` @@ -115,11 +114,11 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "Successfully removed 'server-two' from cluster" + "message": "Successfully removed 'server-two' from cluster" } ``` -*** +--- ## Cluster Status @@ -127,15 +126,15 @@ Returns an array of status objects from a cluster. `database_sockets` shows the actual websocket connections that exist between nodes. -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `cluster_status` +- operation _(required)_ - must always be `cluster_status` ### Body ```json { - "operation": "cluster_status" + "operation": "cluster_status" } ``` @@ -171,62 +170,64 @@ _Operation is restricted to super\_user roles only_ "is_enabled": true } ``` + There is a separate socket for each database for each node. Each node is represented in the connections array, and each database connection to that node is represented in the `database_sockets` array. Additional timing statistics include: -* `lastCommitConfirmed`: When a commit is sent out, it should receive a confirmation from the remote server; this is the last receipt of confirmation of an outgoing commit. -* `lastReceivedRemoteTime`: This is the timestamp of the transaction that was last received. The timestamp is from when the original transaction occurred. -* `lastReceivedLocalTime`: This is local time when the last transaction was received. If there is a different between this and `lastReceivedRemoteTime`, it means there is a delay from the original transaction to * receiving it and so it is probably catching-up/behind. -* `sendingMessage`: The timestamp of transaction is actively being sent. This won't exist if the replicator is waiting for the next transaction to send. -*** +- `lastCommitConfirmed`: When a commit is sent out, it should receive a confirmation from the remote server; this is the last receipt of confirmation of an outgoing commit. +- `lastReceivedRemoteTime`: This is the timestamp of the transaction that was last received. The timestamp is from when the original transaction occurred. +- `lastReceivedLocalTime`: This is local time when the last transaction was received. If there is a different between this and `lastReceivedRemoteTime`, it means there is a delay from the original transaction to \* receiving it and so it is probably catching-up/behind. +- `sendingMessage`: The timestamp of transaction is actively being sent. This won't exist if the replicator is waiting for the next transaction to send. + +--- ## Configure Cluster Bulk create/remove subscriptions for any number of remote nodes. Resets and replaces any existing clustering setup. -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `configure_cluster` -* connections _(required)_ - must be an object array with each object following the `add_node` schema. +- operation _(required)_ - must always be `configure_cluster` +- connections _(required)_ - must be an object array with each object following the `add_node` schema. ### Body ```json { - "operation": "configure_cluster", - "connections": [ - { - "hostname": "server-two", - "verify_tls": false, - "authorization": { - "username": "admin", - "password": "password2" - }, - "subscriptions": [ - { - "schema": "dev", - "table": "my-table", - "subscribe": true, - "publish": false - } - ] - }, - { - "hostname": "server-three", - "verify_tls": false, - "authorization": { - "username": "admin", - "password": "password3" - }, - "subscriptions": [ - { - "schema": "dev", - "table": "dog", - "subscribe": true, - "publish": true - } - ] - } - ] + "operation": "configure_cluster", + "connections": [ + { + "hostname": "server-two", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password2" + }, + "subscriptions": [ + { + "schema": "dev", + "table": "my-table", + "subscribe": true, + "publish": false + } + ] + }, + { + "hostname": "server-three", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password3" + }, + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": true + } + ] + } + ] } ``` @@ -234,33 +235,33 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "Cluster successfully configured." + "message": "Cluster successfully configured." } ``` -*** +--- ## Cluster Set Routes Adds a route/routes to the `replication.routes` configuration. This operation behaves as a PATCH/upsert, meaning it will add new routes to the configuration while leaving existing routes untouched. -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `cluster_set_routes` -* routes _(required)_ - the routes field is an array that specifies the routes for clustering. Each element in the array can be either a string or an object with `hostname` and `port` properties. +- operation _(required)_ - must always be `cluster_set_routes` +- routes _(required)_ - the routes field is an array that specifies the routes for clustering. Each element in the array can be either a string or an object with `hostname` and `port` properties. ### Body ```json { - "operation": "cluster_set_routes", - "routes": [ - "wss://server-two:9925", - { - "hostname": "server-three", - "port": 9930 - } - ] + "operation": "cluster_set_routes", + "routes": [ + "wss://server-two:9925", + { + "hostname": "server-three", + "port": 9930 + } + ] } ``` @@ -268,33 +269,33 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "cluster routes successfully set", - "set": [ - "wss://server-two:9925", - { - "hostname": "server-three", - "port": 9930 - } - ], - "skipped": [] + "message": "cluster routes successfully set", + "set": [ + "wss://server-two:9925", + { + "hostname": "server-three", + "port": 9930 + } + ], + "skipped": [] } ``` -*** +--- ## Cluster Get Routes Gets the replication routes from the Harper config file. -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `cluster_get_routes` +- operation _(required)_ - must always be `cluster_get_routes` ### Body ```json { - "operation": "cluster_get_routes" + "operation": "cluster_get_routes" } ``` @@ -302,36 +303,36 @@ _Operation is restricted to super\_user roles only_ ```json [ - "wss://server-two:9925", - { - "hostname": "server-three", - "port": 9930 - } + "wss://server-two:9925", + { + "hostname": "server-three", + "port": 9930 + } ] ``` -*** +--- ## Cluster Delete Routes Removes route(s) from the Harper config file. Returns a deletion success message and arrays of deleted and skipped records. -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `cluster_delete_routes` -* routes _required_ - Must be an array of route object(s) +- operation _(required)_ - must always be `cluster_delete_routes` +- routes _required_ - Must be an array of route object(s) ### Body ```json { - "operation": "cluster_delete_routes", - "routes": [ - { - "hostname": "server-three", - "port": 9930 - } - ] + "operation": "cluster_delete_routes", + "routes": [ + { + "hostname": "server-three", + "port": 9930 + } + ] } ``` @@ -339,13 +340,13 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "cluster routes successfully deleted", - "deleted": [ - { - "hostname": "server-three", - "port": 9930 - } - ], - "skipped": [] + "message": "cluster routes successfully deleted", + "deleted": [ + { + "hostname": "server-three", + "port": 9930 + } + ], + "skipped": [] } ``` diff --git a/docs/developers/operations-api/components.md b/docs/developers/operations-api/components.md index 898ddb61..3839d7c8 100644 --- a/docs/developers/operations-api/components.md +++ b/docs/developers/operations-api/components.md @@ -4,18 +4,18 @@ Creates a new component project in the component root directory using a predefined template. -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `add_component` -* project _(required)_ - the name of the project you wish to create -* replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. +- operation _(required)_ - must always be `add_component` +- project _(required)_ - the name of the project you wish to create +- replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. ### Body ```json { - "operation": "add_component", - "project": "my-component" + "operation": "add_component", + "project": "my-component" } ``` @@ -23,11 +23,11 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "Successfully added project: my-component" + "message": "Successfully added project: my-component" } ``` -*** +--- ## Deploy Component @@ -65,36 +65,36 @@ Or you can use a GitLab Project Access Token: https://my-project:@gitlab.com/my-group/my-project#semver:v1.0.0 ``` -Note that your component will be installed by NPM. If your component has dependencies, NPM will attempt to download and install these as well. NPM normally uses the public registry.npmjs.org registry. If you are installing without network access to this, you may wish to define [custom registry locations](https://docs.npmjs.com/cli/v8/configuring-npm/npmrc) if you have any dependencies that need to be installed. NPM will install the deployed component and any dependencies in node\_modules in the hdb root directory (typically `~/hdb/node_modules`). +Note that your component will be installed by NPM. If your component has dependencies, NPM will attempt to download and install these as well. NPM normally uses the public registry.npmjs.org registry. If you are installing without network access to this, you may wish to define [custom registry locations](https://docs.npmjs.com/cli/v8/configuring-npm/npmrc) if you have any dependencies that need to be installed. NPM will install the deployed component and any dependencies in node_modules in the hdb root directory (typically `~/hdb/node_modules`). _Note: After deploying a component a restart may be required_ -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `deploy_component` -* project _(required)_ - the name of the project you wish to deploy -* package _(optional)_ - this can be any valid GitHub or NPM reference -* payload _(optional)_ - a base64-encoded string representation of the .tar file. Must be a string -* restart _(optional)_ - must be either a boolean or the string `rolling`. If set to `rolling`, a rolling restart will be triggered after the component is deployed, meaning that each node in the cluster will be sequentially restarted (waiting for the last restart to start the next). If set to `true`, the restart will not be rolling, all nodes will be restarted in parallel. If `replicated` is `true`, the restart operations will be replicated across the cluster. -* replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. -* install\_command _(optional)_ - A command to use when installing the component. Must be a string. This can be used to install dependencies with pnpm or yarn, for example, like: `"install_command": "npm install -g pnpm && pnpm install"` +- operation _(required)_ - must always be `deploy_component` +- project _(required)_ - the name of the project you wish to deploy +- package _(optional)_ - this can be any valid GitHub or NPM reference +- payload _(optional)_ - a base64-encoded string representation of the .tar file. Must be a string +- restart _(optional)_ - must be either a boolean or the string `rolling`. If set to `rolling`, a rolling restart will be triggered after the component is deployed, meaning that each node in the cluster will be sequentially restarted (waiting for the last restart to start the next). If set to `true`, the restart will not be rolling, all nodes will be restarted in parallel. If `replicated` is `true`, the restart operations will be replicated across the cluster. +- replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. +- install_command _(optional)_ - A command to use when installing the component. Must be a string. This can be used to install dependencies with pnpm or yarn, for example, like: `"install_command": "npm install -g pnpm && pnpm install"` ### Body ```json { - "operation": "deploy_component", - "project": "my-component", - "payload": "A very large base64-encoded string representation of the .tar file" + "operation": "deploy_component", + "project": "my-component", + "payload": "A very large base64-encoded string representation of the .tar file" } ``` ```json { - "operation": "deploy_component", - "project": "my-component", - "package": "HarperDB/application-template", - "replicated": true + "operation": "deploy_component", + "project": "my-component", + "package": "HarperDB/application-template", + "replicated": true } ``` @@ -102,29 +102,29 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "Successfully deployed: my-component" + "message": "Successfully deployed: my-component" } ``` -*** +--- ## Package Component Creates a temporary `.tar` file of the specified project folder, then reads it into a base64-encoded string and returns an object with the string and the payload. -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `package_component` -* project _(required)_ - the name of the project you wish to package -* skip\_node\_modules _(optional)_ - if true, creates option for tar module that will exclude the project's node\_modules directory. Must be a boolean +- operation _(required)_ - must always be `package_component` +- project _(required)_ - the name of the project you wish to package +- skip_node_modules _(optional)_ - if true, creates option for tar module that will exclude the project's node_modules directory. Must be a boolean ### Body ```json { - "operation": "package_component", - "project": "my-component", - "skip_node_modules": true + "operation": "package_component", + "project": "my-component", + "skip_node_modules": true } ``` @@ -132,12 +132,12 @@ _Operation is restricted to super\_user roles only_ ```json { - "project": "my-component", - "payload": "LgAAAAAAAAAAAAAAAAAAA...AAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" + "project": "my-component", + "payload": "LgAAAAAAAAAAAAAAAAAAA...AAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" } ``` -*** +--- ## Drop Component @@ -145,21 +145,21 @@ Deletes a file from inside the component project or deletes the complete project **If just `project` is provided it will delete all that projects local files and folders** -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `drop_component` -* project _(required)_ - the name of the project you wish to delete or to delete from if using the `file` parameter -* file _(optional)_ - the path relative to your project folder of the file you wish to delete -* replicated _(optional)_ - if true, Harper will replicate the component deletion to all nodes in the cluster. Must be a boolean. -* restart _(optional)_ - if true, Harper will restart after dropping the component. Must be a boolean. +- operation _(required)_ - must always be `drop_component` +- project _(required)_ - the name of the project you wish to delete or to delete from if using the `file` parameter +- file _(optional)_ - the path relative to your project folder of the file you wish to delete +- replicated _(optional)_ - if true, Harper will replicate the component deletion to all nodes in the cluster. Must be a boolean. +- restart _(optional)_ - if true, Harper will restart after dropping the component. Must be a boolean. ### Body ```json { - "operation": "drop_component", - "project": "my-component", - "file": "utils/myUtils.js" + "operation": "drop_component", + "project": "my-component", + "file": "utils/myUtils.js" } ``` @@ -167,25 +167,25 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "Successfully dropped: my-component/utils/myUtils.js" + "message": "Successfully dropped: my-component/utils/myUtils.js" } ``` -*** +--- ## Get Components Gets all local component files and folders and any component config from `harperdb-config.yaml` -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `get_components` +- operation _(required)_ - must always be `get_components` ### Body ```json { - "operation": "get_components" + "operation": "get_components" } ``` @@ -193,85 +193,85 @@ _Operation is restricted to super\_user roles only_ ```json { - "name": "components", - "entries": [ - { - "package": "HarperDB/application-template", - "name": "deploy-test-gh" - }, - { - "package": "@fastify/compress", - "name": "fast-compress" - }, - { - "name": "my-component", - "entries": [ - { - "name": "LICENSE", - "mtime": "2023-08-22T16:00:40.286Z", - "size": 1070 - }, - { - "name": "README.md", - "mtime": "2023-08-22T16:00:40.287Z", - "size": 1207 - }, - { - "name": "config.yaml", - "mtime": "2023-08-22T16:00:40.287Z", - "size": 1069 - }, - { - "name": "package.json", - "mtime": "2023-08-22T16:00:40.288Z", - "size": 145 - }, - { - "name": "resources.js", - "mtime": "2023-08-22T16:00:40.289Z", - "size": 583 - }, - { - "name": "schema.graphql", - "mtime": "2023-08-22T16:00:40.289Z", - "size": 466 - }, - { - "name": "utils", - "entries": [ - { - "name": "commonUtils.js", - "mtime": "2023-08-22T16:00:40.289Z", - "size": 583 - } - ] - } - ] - } - ] + "name": "components", + "entries": [ + { + "package": "HarperDB/application-template", + "name": "deploy-test-gh" + }, + { + "package": "@fastify/compress", + "name": "fast-compress" + }, + { + "name": "my-component", + "entries": [ + { + "name": "LICENSE", + "mtime": "2023-08-22T16:00:40.286Z", + "size": 1070 + }, + { + "name": "README.md", + "mtime": "2023-08-22T16:00:40.287Z", + "size": 1207 + }, + { + "name": "config.yaml", + "mtime": "2023-08-22T16:00:40.287Z", + "size": 1069 + }, + { + "name": "package.json", + "mtime": "2023-08-22T16:00:40.288Z", + "size": 145 + }, + { + "name": "resources.js", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 583 + }, + { + "name": "schema.graphql", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 466 + }, + { + "name": "utils", + "entries": [ + { + "name": "commonUtils.js", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 583 + } + ] + } + ] + } + ] } ``` -*** +--- ## Get Component File Gets the contents of a file inside a component project. -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `get_component_file` -* project _(required)_ - the name of the project where the file is located -* file _(required)_ - the path relative to your project folder of the file you wish to view -* encoding _(optional)_ - the encoding that will be passed to the read file call. Defaults to `utf8` +- operation _(required)_ - must always be `get_component_file` +- project _(required)_ - the name of the project where the file is located +- file _(required)_ - the path relative to your project folder of the file you wish to view +- encoding _(optional)_ - the encoding that will be passed to the read file call. Defaults to `utf8` ### Body ```json { - "operation": "get_component_file", - "project": "my-component", - "file": "resources.js" + "operation": "get_component_file", + "project": "my-component", + "file": "resources.js" } ``` @@ -279,33 +279,33 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "/**export class MyCustomResource extends tables.TableName {\n\t// we can define our own custom POST handler\n\tpost(content) {\n\t\t// do something with the incoming content;\n\t\treturn super.post(content);\n\t}\n\t// or custom GET handler\n\tget() {\n\t\t// we can modify this resource before returning\n\t\treturn super.get();\n\t}\n}\n */\n// we can also define a custom resource without a specific table\nexport class Greeting extends Resource {\n\t// a \"Hello, world!\" handler\n\tget() {\n\t\treturn { greeting: 'Hello, world!' };\n\t}\n}" + "message": "/**export class MyCustomResource extends tables.TableName {\n\t// we can define our own custom POST handler\n\tpost(content) {\n\t\t// do something with the incoming content;\n\t\treturn super.post(content);\n\t}\n\t// or custom GET handler\n\tget() {\n\t\t// we can modify this resource before returning\n\t\treturn super.get();\n\t}\n}\n */\n// we can also define a custom resource without a specific table\nexport class Greeting extends Resource {\n\t// a \"Hello, world!\" handler\n\tget() {\n\t\treturn { greeting: 'Hello, world!' };\n\t}\n}" } ``` -*** +--- ## Set Component File Creates or updates a file inside a component project. -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `set_component_file` -* project _(required)_ - the name of the project the file is located in -* file _(required)_ - the path relative to your project folder of the file you wish to set -* payload _(required)_ - what will be written to the file -* encoding _(optional)_ - the encoding that will be passed to the write file call. Defaults to `utf8` -* replicated _(optional)_ - if true, Harper will replicate the component update to all nodes in the cluster. Must be a boolean. +- operation _(required)_ - must always be `set_component_file` +- project _(required)_ - the name of the project the file is located in +- file _(required)_ - the path relative to your project folder of the file you wish to set +- payload _(required)_ - what will be written to the file +- encoding _(optional)_ - the encoding that will be passed to the write file call. Defaults to `utf8` +- replicated _(optional)_ - if true, Harper will replicate the component update to all nodes in the cluster. Must be a boolean. ### Body ```json { - "operation": "set_component_file", - "project": "my-component", - "file": "test.js", - "payload": "console.log('hello world')" + "operation": "set_component_file", + "project": "my-component", + "file": "test.js", + "payload": "console.log('hello world')" } ``` @@ -313,11 +313,11 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "Successfully set component: test.js" + "message": "Successfully set component: test.js" } ``` -*** +--- ## Add SSH Key @@ -330,27 +330,27 @@ _Operation is restricted to super_user roles only_ - key _(required)_ - the private key contents. Must be an ed25519 key. Line breaks must be delimited with `\n` and have a trailing `\n` - host _(required)_ - the host for the ssh config (see below). Used as part of the `package` url when deploying a component using this key - hostname _(required)_ - the hostname for the ssh config (see below). Used to map `host` to an actual domain (e.g. `github.com`) -- known_hosts _(optional)_ - the public SSH keys of the host your component will be retrieved from. If `hostname` is `github.com` this will be retrieved automatically. Line breaks must be delimited with `\n` +- known*hosts *(optional)\_ - the public SSH keys of the host your component will be retrieved from. If `hostname` is `github.com` this will be retrieved automatically. Line breaks must be delimited with `\n` - replicated _(optional)_ - if true, HarperDB will replicate the key to all nodes in the cluster. Must be a boolean. -_Operation is restricted to super\_user roles only_ + _Operation is restricted to super_user roles only_ * operation _(required)_ - must always be `add_ssh_key` * name _(required)_ - the name of the key * key _(required)_ - the private key contents. Line breaks must be delimited with * host _(required)_ - the host for the ssh config (see below). Used as part of the `package` url when deploying a component using this key * hostname _(required)_ - the hostname for the ssh config (see below). Used to map `host` to an actual domain (e.g. `github.com`) -* known\_hosts _(optional)_ - the public SSH keys of the host your component will be retrieved from. If `hostname` is `github.com` this will be retrieved automatically. Line breaks must be delimited with +* known_hosts _(optional)_ - the public SSH keys of the host your component will be retrieved from. If `hostname` is `github.com` this will be retrieved automatically. Line breaks must be delimited with * replicated _(optional)_ - if true, Harper will replicate the key to all nodes in the cluster. Must be a boolean. ### Body ```json { - "operation": "add_ssh_key", - "name": "harperdb-private-component", - "key": "-----BEGIN OPENSSH PRIVATE KEY-----\nthis\nis\na\nfake\nkey\n-----END OPENSSH PRIVATE KEY-----\n", - "host": "harperdb-private-component.github.com", - "hostname": "github.com" + "operation": "add_ssh_key", + "name": "harperdb-private-component", + "key": "-----BEGIN OPENSSH PRIVATE KEY-----\nthis\nis\na\nfake\nkey\n-----END OPENSSH PRIVATE KEY-----\n", + "host": "harperdb-private-component.github.com", + "hostname": "github.com" } ``` @@ -358,7 +358,7 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "Added ssh key: harperdb-private-component" + "message": "Added ssh key: harperdb-private-component" } ``` @@ -381,13 +381,13 @@ Host harperdb-private-component.github.com Note that `deploy_component` with a package uses `npm install` so the url must be a valid npm format url. The above is an example of a url using a tag in the repo to install. -*** +--- ## Update SSH Key Updates the private key contents of an existing SSH key. -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ - operation _(required)_ - must always be `update_ssh_key` - name _(required)_ - the name of the key to be updated @@ -398,11 +398,11 @@ _Operation is restricted to super\_user roles only_ ```json { - "operation": "update_ssh_key", - "name": "harperdb-private-component", - "key": "-----BEGIN OPENSSH PRIVATE KEY-----\nthis\nis\na\nNEWFAKE\nkey\n-----END OPENSSH PRIVATE KEY-----\n", - "host": "harperdb-private-component.github.com", - "hostname": "github.com" + "operation": "update_ssh_key", + "name": "harperdb-private-component", + "key": "-----BEGIN OPENSSH PRIVATE KEY-----\nthis\nis\na\nNEWFAKE\nkey\n-----END OPENSSH PRIVATE KEY-----\n", + "host": "harperdb-private-component.github.com", + "hostname": "github.com" } ``` @@ -410,7 +410,7 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "Updated ssh key: harperdb-private-component" + "message": "Updated ssh key: harperdb-private-component" } ``` @@ -418,17 +418,17 @@ _Operation is restricted to super\_user roles only_ Deletes a SSH key. This will also remove it from the generated SSH config. -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `delete_ssh_key` -* name _(required)_ - the name of the key to be deleted -* replicated _(optional)_ - if true, Harper will replicate the key deletion to all nodes in the cluster. Must be a boolean. +- operation _(required)_ - must always be `delete_ssh_key` +- name _(required)_ - the name of the key to be deleted +- replicated _(optional)_ - if true, Harper will replicate the key deletion to all nodes in the cluster. Must be a boolean. ### Body ```json { - "name": "harperdb-private-component" + "name": "harperdb-private-component" } ``` @@ -436,25 +436,25 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "Deleted ssh key: harperdb-private-component" + "message": "Deleted ssh key: harperdb-private-component" } ``` -*** +--- ## List SSH Keys List off the names of added SSH keys -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `list_ssh_keys` +- operation _(required)_ - must always be `list_ssh_keys` ### Body ```json { - "operation": "list_ssh_keys" + "operation": "list_ssh_keys" } ``` @@ -469,24 +469,24 @@ _Operation is restricted to super\_user roles only_ ] ``` -*** +--- ## Set SSH Known Hosts -Sets the SSH known\_hosts file. This will overwrite the file. +Sets the SSH known_hosts file. This will overwrite the file. -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `set_ssh_known_hosts` -* known\_hosts _(required)_ - The contents to set the known\_hosts to. Line breaks must be delimite d with -* replicated _(optional)_ - if true, Harper will replicate the known hosts to all nodes in the cluster. Must be a boolean. +- operation _(required)_ - must always be `set_ssh_known_hosts` +- known_hosts _(required)_ - The contents to set the known_hosts to. Line breaks must be delimite d with +- replicated _(optional)_ - if true, Harper will replicate the known hosts to all nodes in the cluster. Must be a boolean. ### Body ```json { - "operation": "set_ssh_known_hosts", - "known_hosts": "github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=\n" + "operation": "set_ssh_known_hosts", + "known_hosts": "github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=\n" } ``` @@ -494,23 +494,23 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "Known hosts successfully set" + "message": "Known hosts successfully set" } ``` ## Get SSH Known Hosts -Gets the contents of the known\_hosts file +Gets the contents of the known_hosts file -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `get_ssh_known_hosts` +- operation _(required)_ - must always be `get_ssh_known_hosts` ### Body ```json { - "operation": "get_ssh_known_hosts" + "operation": "get_ssh_known_hosts" } ``` @@ -518,30 +518,29 @@ _Operation is restricted to super\_user roles only_ ```json { - "known_hosts": "github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=\n" + "known_hosts": "github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=\n" } ``` -*** +--- ## Install Node Modules + This operation is deprecated, as it is handled automatically by deploy_component and restart. Executes npm install against specified custom function projects. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `install_node_modules` -* projects _(required)_ - must ba an array of custom functions projects. -* dry_run _(optional)_ - refers to the npm --dry-run flag: [https://docs.npmjs.com/cli/v8/commands/npm-install#dry-run](https://docs.npmjs.com/cli/v8/commands/npm-install#dry-run). Defaults to false. +- operation _(required)_ - must always be `install_node_modules` +- projects _(required)_ - must ba an array of custom functions projects. +- dry*run *(optional)\_ - refers to the npm --dry-run flag: [https://docs.npmjs.com/cli/v8/commands/npm-install#dry-run](https://docs.npmjs.com/cli/v8/commands/npm-install#dry-run). Defaults to false. ### Body + ```json { - "operation": "install_node_modules", - "projects": [ - "dogs", - "cats" - ], - "dry_run": true + "operation": "install_node_modules", + "projects": ["dogs", "cats"], + "dry_run": true } -``` \ No newline at end of file +``` diff --git a/docs/developers/operations-api/configuration.md b/docs/developers/operations-api/configuration.md index bbdbfaa6..f4f2018f 100644 --- a/docs/developers/operations-api/configuration.md +++ b/docs/developers/operations-api/configuration.md @@ -6,125 +6,126 @@ Modifies the Harper configuration file parameters. Must follow with a restart or _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `set_configuration` -* logging_level _(example/optional)_ - one or more configuration keywords to be updated in the Harper configuration file -* clustering_enabled _(example/optional)_ - one or more configuration keywords to be updated in the Harper configuration file +- operation _(required)_ - must always be `set_configuration` +- logging*level *(example/optional)\_ - one or more configuration keywords to be updated in the Harper configuration file +- clustering*enabled *(example/optional)\_ - one or more configuration keywords to be updated in the Harper configuration file ### Body + ```json { - "operation": "set_configuration", - "logging_level": "trace", - "clustering_enabled": true + "operation": "set_configuration", + "logging_level": "trace", + "clustering_enabled": true } ``` ### Response: 200 + ```json { - "message": "Configuration successfully set. You must restart HarperDB for new config settings to take effect." + "message": "Configuration successfully set. You must restart HarperDB for new config settings to take effect." } ``` --- ## Get Configuration + Returns the Harper configuration parameters. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `get_configuration` +- operation _(required)_ - must always be `get_configuration` ### Body + ```json { - "operation": "get_configuration" + "operation": "get_configuration" } ``` ### Response: 200 + ```json { - "http": { - "compressionThreshold": 1200, - "cors": false, - "corsAccessList": [ - null - ], - "keepAliveTimeout": 30000, - "port": 9926, - "securePort": null, - "timeout": 120000 - }, - "threads": 11, - "authentication": { - "cacheTTL": 30000, - "enableSessions": true, - "operationTokenTimeout": "1d", - "refreshTokenTimeout": "30d" - }, - "analytics": { - "aggregatePeriod": 60 - }, - "replication": { - "hostname": "node1", - "databases": "*", - "routes": null, - "url": "wss://127.0.0.1:9925" - }, - "componentsRoot": "/Users/hdb/components", - "localStudio": { - "enabled": false - }, - "logging": { - "auditAuthEvents": { - "logFailed": false, - "logSuccessful": false - }, - "auditLog": true, - "auditRetention": "3d", - "file": true, - "level": "error", - "root": "/Users/hdb/log", - "rotation": { - "enabled": false, - "compress": false, - "interval": null, - "maxSize": null, - "path": "/Users/hdb/log" - }, - "stdStreams": false - }, - "mqtt": { - "network": { - "port": 1883, - "securePort": 8883 - }, - "webSocket": true, - "requireAuthentication": true - }, - "operationsApi": { - "network": { - "cors": true, - "corsAccessList": [ - "*" - ], - "domainSocket": "/Users/hdb/operations-server", - "port": 9925, - "securePort": null - } - }, - "rootPath": "/Users/hdb", - "storage": { - "writeAsync": false, - "caching": true, - "compression": false, - "noReadAhead": true, - "path": "/Users/hdb/database", - "prefetchWrites": true - }, - "tls": { - "privateKey": "/Users/hdb/keys/privateKey.pem" - } + "http": { + "compressionThreshold": 1200, + "cors": false, + "corsAccessList": [null], + "keepAliveTimeout": 30000, + "port": 9926, + "securePort": null, + "timeout": 120000 + }, + "threads": 11, + "authentication": { + "cacheTTL": 30000, + "enableSessions": true, + "operationTokenTimeout": "1d", + "refreshTokenTimeout": "30d" + }, + "analytics": { + "aggregatePeriod": 60 + }, + "replication": { + "hostname": "node1", + "databases": "*", + "routes": null, + "url": "wss://127.0.0.1:9925" + }, + "componentsRoot": "/Users/hdb/components", + "localStudio": { + "enabled": false + }, + "logging": { + "auditAuthEvents": { + "logFailed": false, + "logSuccessful": false + }, + "auditLog": true, + "auditRetention": "3d", + "file": true, + "level": "error", + "root": "/Users/hdb/log", + "rotation": { + "enabled": false, + "compress": false, + "interval": null, + "maxSize": null, + "path": "/Users/hdb/log" + }, + "stdStreams": false + }, + "mqtt": { + "network": { + "port": 1883, + "securePort": 8883 + }, + "webSocket": true, + "requireAuthentication": true + }, + "operationsApi": { + "network": { + "cors": true, + "corsAccessList": ["*"], + "domainSocket": "/Users/hdb/operations-server", + "port": 9925, + "securePort": null + } + }, + "rootPath": "/Users/hdb", + "storage": { + "writeAsync": false, + "caching": true, + "compression": false, + "noReadAhead": true, + "path": "/Users/hdb/database", + "prefetchWrites": true + }, + "tls": { + "privateKey": "/Users/hdb/keys/privateKey.pem" + } } -``` \ No newline at end of file +``` diff --git a/docs/developers/operations-api/custom-functions.md b/docs/developers/operations-api/custom-functions.md index a0115773..7ebffc6e 100644 --- a/docs/developers/operations-api/custom-functions.md +++ b/docs/developers/operations-api/custom-functions.md @@ -1,6 +1,6 @@ # Custom Functions -*These operations are deprecated.* +_These operations are deprecated._ ## Custom Functions Status @@ -8,21 +8,23 @@ Returns the state of the Custom functions server. This includes whether it is en _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `custom_function_status` +- operation _(required)_ - must always be `custom_function_status` ### Body + ```json { - "operation": "custom_functions_status" + "operation": "custom_functions_status" } ``` ### Response: 200 + ```json { - "is_enabled": true, - "port": 9926, - "directory": "/Users/myuser/hdb/custom_functions" + "is_enabled": true, + "port": 9926, + "directory": "/Users/myuser/hdb/custom_functions" } ``` @@ -34,13 +36,13 @@ Returns an array of projects within the Custom Functions root project directory. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `get_custom_functions` +- operation _(required)_ - must always be `get_custom_functions` ### Body ```json { - "operation": "get_custom_functions" + "operation": "get_custom_functions" } ``` @@ -48,11 +50,11 @@ _Operation is restricted to super_user roles only_ ```json { - "dogs": { - "routes": ["examples"], - "helpers":["example"], - "static":3 - } + "dogs": { + "routes": ["examples"], + "helpers": ["example"], + "static": 3 + } } ``` @@ -64,19 +66,19 @@ Returns the content of the specified file as text. HarperDStudio uses this call _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `get_custom_function` -* project _(required)_ - the name of the project containing the file for which you wish to get content -* type _(required)_ - the name of the sub-folder containing the file for which you wish to get content - must be either routes or helpers -* file _(required)_ - The name of the file for which you wish to get content - should not include the file extension (which is always .js) +- operation _(required)_ - must always be `get_custom_function` +- project _(required)_ - the name of the project containing the file for which you wish to get content +- type _(required)_ - the name of the sub-folder containing the file for which you wish to get content - must be either routes or helpers +- file _(required)_ - The name of the file for which you wish to get content - should not include the file extension (which is always .js) ### Body ```json { - "operation": "get_custom_function", - "project": "dogs", - "type": "helpers", - "file": "example" + "operation": "get_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example" } ``` @@ -84,7 +86,7 @@ _Operation is restricted to super_user roles only_ ```json { - "message": "'use strict';\n\nconst https = require('https');\n\nconst authRequest = (options) => {\n return new Promise((resolve, reject) => {\n const req = https.request(options, (res) => {\n res.setEncoding('utf8');\n let responseBody = '';\n\n res.on('data', (chunk) => {\n responseBody += chunk;\n });\n\n res.on('end', () => {\n resolve(JSON.parse(responseBody));\n });\n });\n\n req.on('error', (err) => {\n reject(err);\n });\n\n req.end();\n });\n};\n\nconst customValidation = async (request,logger) => {\n const options = {\n hostname: 'jsonplaceholder.typicode.com',\n port: 443,\n path: '/todos/1',\n method: 'GET',\n headers: { authorization: request.headers.authorization },\n };\n\n const result = await authRequest(options);\n\n /*\n * throw an authentication error based on the response body or statusCode\n */\n if (result.error) {\n const errorString = result.error || 'Sorry, there was an error authenticating your request';\n logger.error(errorString);\n throw new Error(errorString);\n }\n return request;\n};\n\nmodule.exports = customValidation;\n" + "message": "'use strict';\n\nconst https = require('https');\n\nconst authRequest = (options) => {\n return new Promise((resolve, reject) => {\n const req = https.request(options, (res) => {\n res.setEncoding('utf8');\n let responseBody = '';\n\n res.on('data', (chunk) => {\n responseBody += chunk;\n });\n\n res.on('end', () => {\n resolve(JSON.parse(responseBody));\n });\n });\n\n req.on('error', (err) => {\n reject(err);\n });\n\n req.end();\n });\n};\n\nconst customValidation = async (request,logger) => {\n const options = {\n hostname: 'jsonplaceholder.typicode.com',\n port: 443,\n path: '/todos/1',\n method: 'GET',\n headers: { authorization: request.headers.authorization },\n };\n\n const result = await authRequest(options);\n\n /*\n * throw an authentication error based on the response body or statusCode\n */\n if (result.error) {\n const errorString = result.error || 'Sorry, there was an error authenticating your request';\n logger.error(errorString);\n throw new Error(errorString);\n }\n return request;\n};\n\nmodule.exports = customValidation;\n" } ``` @@ -96,21 +98,21 @@ Updates the content of the specified file. Harper Studio uses this call to save _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `set_custom_function` -* project _(required)_ - the name of the project containing the file for which you wish to set content -* type _(required)_ - the name of the sub-folder containing the file for which you wish to set content - must be either routes or helpers -* file _(required)_ - the name of the file for which you wish to set content - should not include the file extension (which is always .js) -* function_content _(required)_ - the content you wish to save into the specified file +- operation _(required)_ - must always be `set_custom_function` +- project _(required)_ - the name of the project containing the file for which you wish to set content +- type _(required)_ - the name of the sub-folder containing the file for which you wish to set content - must be either routes or helpers +- file _(required)_ - the name of the file for which you wish to set content - should not include the file extension (which is always .js) +- function*content *(required)\_ - the content you wish to save into the specified file ### Body ```json { - "operation": "set_custom_function", - "project": "dogs", - "type": "helpers", - "file": "example", - "function_content": "'use strict';\n\nconst https = require('https');\n\nconst authRequest = (options) => {\n return new Promise((resolve, reject) => {\n const req = https.request(options, (res) => {\n res.setEncoding('utf8');\n let responseBody = '';\n\n res.on('data', (chunk) => {\n responseBody += chunk;\n });\n\n res.on('end', () => {\n resolve(JSON.parse(responseBody));\n });\n });\n\n req.on('error', (err) => {\n reject(err);\n });\n\n req.end();\n });\n};\n\nconst customValidation = async (request,logger) => {\n const options = {\n hostname: 'jsonplaceholder.typicode.com',\n port: 443,\n path: '/todos/1',\n method: 'GET',\n headers: { authorization: request.headers.authorization },\n };\n\n const result = await authRequest(options);\n\n /*\n * throw an authentication error based on the response body or statusCode\n */\n if (result.error) {\n const errorString = result.error || 'Sorry, there was an error authenticating your request';\n logger.error(errorString);\n throw new Error(errorString);\n }\n return request;\n};\n\nmodule.exports = customValidation;\n" + "operation": "set_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example", + "function_content": "'use strict';\n\nconst https = require('https');\n\nconst authRequest = (options) => {\n return new Promise((resolve, reject) => {\n const req = https.request(options, (res) => {\n res.setEncoding('utf8');\n let responseBody = '';\n\n res.on('data', (chunk) => {\n responseBody += chunk;\n });\n\n res.on('end', () => {\n resolve(JSON.parse(responseBody));\n });\n });\n\n req.on('error', (err) => {\n reject(err);\n });\n\n req.end();\n });\n};\n\nconst customValidation = async (request,logger) => {\n const options = {\n hostname: 'jsonplaceholder.typicode.com',\n port: 443,\n path: '/todos/1',\n method: 'GET',\n headers: { authorization: request.headers.authorization },\n };\n\n const result = await authRequest(options);\n\n /*\n * throw an authentication error based on the response body or statusCode\n */\n if (result.error) {\n const errorString = result.error || 'Sorry, there was an error authenticating your request';\n logger.error(errorString);\n throw new Error(errorString);\n }\n return request;\n};\n\nmodule.exports = customValidation;\n" } ``` @@ -118,7 +120,7 @@ _Operation is restricted to super_user roles only_ ```json { - "message": "Successfully updated custom function: example.js" + "message": "Successfully updated custom function: example.js" } ``` @@ -130,19 +132,19 @@ Deletes the specified file. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `drop_custom_function` -* project _(required)_ - the name of the project containing the file you wish to delete -* type _(required)_ - the name of the sub-folder containing the file you wish to delete. Must be either routes or helpers -* file _(required)_ - the name of the file you wish to delete. Should not include the file extension (which is always .js) +- operation _(required)_ - must always be `drop_custom_function` +- project _(required)_ - the name of the project containing the file you wish to delete +- type _(required)_ - the name of the sub-folder containing the file you wish to delete. Must be either routes or helpers +- file _(required)_ - the name of the file you wish to delete. Should not include the file extension (which is always .js) ### Body ```json { - "operation": "drop_custom_function", - "project": "dogs", - "type": "helpers", - "file": "example" + "operation": "drop_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example" } ``` @@ -150,7 +152,7 @@ _Operation is restricted to super_user roles only_ ```json { - "message":"Successfully deleted custom function: example.js" + "message": "Successfully deleted custom function: example.js" } ``` @@ -162,15 +164,15 @@ Creates a new project folder in the Custom Functions root project directory. It _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `add_custom_function_project` -* project _(required)_ - the name of the project you wish to create +- operation _(required)_ - must always be `add_custom_function_project` +- project _(required)_ - the name of the project you wish to create ### Body ```json { - "operation": "add_custom_function_project", - "project": "dogs" + "operation": "add_custom_function_project", + "project": "dogs" } ``` @@ -178,7 +180,7 @@ _Operation is restricted to super_user roles only_ ```json { - "message":"Successfully created custom function project: dogs" + "message": "Successfully created custom function project: dogs" } ``` @@ -190,15 +192,15 @@ Deletes the specified project folder and all of its contents. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `drop_custom_function_project` -* project _(required)_ - the name of the project you wish to delete +- operation _(required)_ - must always be `drop_custom_function_project` +- project _(required)_ - the name of the project you wish to delete ### Body ```json { - "operation": "drop_custom_function_project", - "project": "dogs" + "operation": "drop_custom_function_project", + "project": "dogs" } ``` @@ -206,7 +208,7 @@ _Operation is restricted to super_user roles only_ ```json { - "message": "Successfully deleted project: dogs" + "message": "Successfully deleted project: dogs" } ``` @@ -218,17 +220,17 @@ Creates a .tar file of the specified project folder, then reads it into a base64 _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `package_custom_function_project` -* project _(required)_ - the name of the project you wish to package up for deployment -* skip_node_modules _(optional)_ - if true, creates option for tar module that will exclude the project's node_modules directory. Must be a boolean. +- operation _(required)_ - must always be `package_custom_function_project` +- project _(required)_ - the name of the project you wish to package up for deployment +- skip*node_modules *(optional)\_ - if true, creates option for tar module that will exclude the project's node_modules directory. Must be a boolean. ### Body ```json { - "operation": "package_custom_function_project", - "project": "dogs", - "skip_node_modules": true + "operation": "package_custom_function_project", + "project": "dogs", + "skip_node_modules": true } ``` @@ -236,9 +238,9 @@ _Operation is restricted to super_user roles only_ ```json { - "project": "dogs", - "payload": "LgAAAAAAAAAAAAAAAAAAA...AAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", - "file": "/tmp/d27f1154-5d82-43f0-a5fb-a3018f366081.tar" + "project": "dogs", + "payload": "LgAAAAAAAAAAAAAAAAAAA...AAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "file": "/tmp/d27f1154-5d82-43f0-a5fb-a3018f366081.tar" } ``` @@ -250,18 +252,17 @@ Takes the output of package_custom_function_project, decrypts the base64-encoded _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `deploy_custom_function_project` -* project _(required)_ - the name of the project you wish to deploy. Must be a string -* payload _(required)_ - a base64-encoded string representation of the .tar file. Must be a string - +- operation _(required)_ - must always be `deploy_custom_function_project` +- project _(required)_ - the name of the project you wish to deploy. Must be a string +- payload _(required)_ - a base64-encoded string representation of the .tar file. Must be a string ### Body ```json { - "operation": "deploy_custom_function_project", - "project": "dogs", - "payload": "A very large base64-encoded string represenation of the .tar file" + "operation": "deploy_custom_function_project", + "project": "dogs", + "payload": "A very large base64-encoded string represenation of the .tar file" } ``` @@ -269,6 +270,6 @@ _Operation is restricted to super_user roles only_ ```json { - "message": "Successfully deployed project: dogs" + "message": "Successfully deployed project: dogs" } ``` diff --git a/docs/developers/operations-api/databases-and-tables.md b/docs/developers/operations-api/databases-and-tables.md index ebe3eb2f..41994a9c 100644 --- a/docs/developers/operations-api/databases-and-tables.md +++ b/docs/developers/operations-api/databases-and-tables.md @@ -1,360 +1,384 @@ -# Databases and Tables +# Databases and Tables ## Describe All + Returns the definitions of all databases and tables within the database. Record counts about 5000 records are estimated, as determining the exact count can be expensive. When the record count is estimated, this is indicated by the inclusion of a confidence interval of `estimated_record_range`. If you need the exact count, you can include an `"exact_count": true` in the operation, but be aware that this requires a full table scan (may be expensive). -* operation _(required)_ - must always be `describe_all` +- operation _(required)_ - must always be `describe_all` ### Body + ```json { - "operation": "describe_all" + "operation": "describe_all" } ``` ### Response: 200 + ```json { - "dev": { - "dog": { - "schema": "dev", - "name": "dog", - "hash_attribute": "id", - "audit": true, - "schema_defined": false, - "attributes": [ - { - "attribute": "id", - "indexed": true, - "is_primary_key": true - }, - { - "attribute": "__createdtime__", - "indexed": true - }, - { - "attribute": "__updatedtime__", - "indexed": true - }, - { - "attribute": "type", - "indexed": true - } - ], - "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", - "record_count": 4000, - "estimated_record_range": [3976, 4033], - "last_updated_record": 1697658683698.4504 - } - } + "dev": { + "dog": { + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 + } + } } ``` --- ## Describe database + Returns the definitions of all tables within the specified database. -* operation _(required)_ - must always be `describe_database` -* database _(optional)_ - database where the table you wish to describe lives. The default is `data` +- operation _(required)_ - must always be `describe_database` +- database _(optional)_ - database where the table you wish to describe lives. The default is `data` ### Body + ```json { - "operation": "describe_database", - "database": "dev" + "operation": "describe_database", + "database": "dev" } ``` ### Response: 200 + ```json { - "dog": { - "schema": "dev", - "name": "dog", - "hash_attribute": "id", - "audit": true, - "schema_defined": false, - "attributes": [ - { - "attribute": "id", - "indexed": true, - "is_primary_key": true - }, - { - "attribute": "__createdtime__", - "indexed": true - }, - { - "attribute": "__updatedtime__", - "indexed": true - }, - { - "attribute": "type", - "indexed": true - } - ], - "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", - "record_count": 4000, - "estimated_record_range": [3976, 4033], - "last_updated_record": 1697658683698.4504 - } + "dog": { + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 + } } ``` --- ## Describe Table + Returns the definition of the specified table. -* operation _(required)_ - must always be `describe_table` -* table _(required)_ - table you wish to describe -* database _(optional)_ - database where the table you wish to describe lives. The default is `data` +- operation _(required)_ - must always be `describe_table` +- table _(required)_ - table you wish to describe +- database _(optional)_ - database where the table you wish to describe lives. The default is `data` ### Body + ```json { - "operation": "describe_table", - "table": "dog" + "operation": "describe_table", + "table": "dog" } ``` ### Response: 200 + ```json { - "schema": "dev", - "name": "dog", - "hash_attribute": "id", - "audit": true, - "schema_defined": false, - "attributes": [ - { - "attribute": "id", - "indexed": true, - "is_primary_key": true - }, - { - "attribute": "__createdtime__", - "indexed": true - }, - { - "attribute": "__updatedtime__", - "indexed": true - }, - { - "attribute": "type", - "indexed": true - } - ], - "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", - "record_count": 4000, - "estimated_record_range": [3976, 4033], - "last_updated_record": 1697658683698.4504 + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 } ``` --- ## Create database + Create a new database. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `create_database` -* database _(optional)_ - name of the database you are creating. The default is `data` +- operation _(required)_ - must always be `create_database` +- database _(optional)_ - name of the database you are creating. The default is `data` ### Body + ```json { - "operation": "create_database", - "database": "dev" + "operation": "create_database", + "database": "dev" } ``` ### Response: 200 + ```json { - "message": "database 'dev' successfully created" + "message": "database 'dev' successfully created" } ``` --- ## Drop database + Drop an existing database. NOTE: Dropping a database will delete all tables and all of their records in that database. _Operation is restricted to super_user roles only_ -* operation _(required)_ - this should always be `drop_database` -* database _(required)_ - name of the database you are dropping -* replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. +- operation _(required)_ - this should always be `drop_database` +- database _(required)_ - name of the database you are dropping +- replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. ### Body + ```json { - "operation": "drop_database", - "database": "dev" + "operation": "drop_database", + "database": "dev" } ``` ### Response: 200 + ```json { - "message": "successfully deleted 'dev'" + "message": "successfully deleted 'dev'" } ``` --- ## Create Table + Create a new table within a database. _Operation is restricted to super_user roles only_ - -* operation _(required)_ - must always be `create_table` -* database _(optional)_ - name of the database where you want your table to live. If the database does not exist, it will be created. If the `database` property is not provided it will default to `data`. -* table _(required)_ - name of the table you are creating -* primary_key _(required)_ - primary key for the table -* attributes _(optional)_ - an array of attributes that specifies the schema for the table, that is the set of attributes for the table. When attributes are supplied the table will not be considered a "dynamic schema" table, and attributes will not be auto-added when records with new properties are inserted. Each attribute is specified as: - * name _(required)_ - the name of the attribute - * indexed _(optional)_ - indicates if the attribute should be indexed - * type _(optional)_ - specifies the data type of the attribute (can be String, Int, Float, Date, ID, Any) -* expiration _(optional)_ - specifies the time-to-live or expiration of records in the table before they are evicted (records are not evicted on any timer if not specified). This is specified in seconds. +- operation _(required)_ - must always be `create_table` +- database _(optional)_ - name of the database where you want your table to live. If the database does not exist, it will be created. If the `database` property is not provided it will default to `data`. +- table _(required)_ - name of the table you are creating +- primary*key *(required)\_ - primary key for the table +- attributes _(optional)_ - an array of attributes that specifies the schema for the table, that is the set of attributes for the table. When attributes are supplied the table will not be considered a "dynamic schema" table, and attributes will not be auto-added when records with new properties are inserted. Each attribute is specified as: + - name _(required)_ - the name of the attribute + - indexed _(optional)_ - indicates if the attribute should be indexed + - type _(optional)_ - specifies the data type of the attribute (can be String, Int, Float, Date, ID, Any) +- expiration _(optional)_ - specifies the time-to-live or expiration of records in the table before they are evicted (records are not evicted on any timer if not specified). This is specified in seconds. ### Body + ```json { - "operation": "create_table", - "database": "dev", - "table": "dog", - "primary_key": "id" + "operation": "create_table", + "database": "dev", + "table": "dog", + "primary_key": "id" } ``` ### Response: 200 + ```json { - "message": "table 'dev.dog' successfully created." + "message": "table 'dev.dog' successfully created." } ``` --- ## Drop Table + Drop an existing database table. NOTE: Dropping a table will delete all associated records in that table. _Operation is restricted to super_user roles only_ -* operation _(required)_ - this should always be `drop_table` -* database _(optional)_ - database where the table you are dropping lives. The default is `data` -* table _(required)_ - name of the table you are dropping -* replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. +- operation _(required)_ - this should always be `drop_table` +- database _(optional)_ - database where the table you are dropping lives. The default is `data` +- table _(required)_ - name of the table you are dropping +- replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. ### Body ```json { - "operation": "drop_table", - "database": "dev", - "table": "dog" + "operation": "drop_table", + "database": "dev", + "table": "dog" } ``` ### Response: 200 + ```json { - "message": "successfully deleted table 'dev.dog'" + "message": "successfully deleted table 'dev.dog'" } ``` --- -## Create Attribute +## Create Attribute + Create a new attribute within the specified table. **The create_attribute operation can be used for admins wishing to pre-define database values for setting role-based permissions or for any other reason.** _Note: Harper will automatically create new attributes on insert and update if they do not already exist within the database._ -* operation _(required)_ - must always be `create_attribute` -* database _(optional)_ - name of the database of the table you want to add your attribute. The default is `data` -* table _(required)_ - name of the table where you want to add your attribute to live -* attribute _(required)_ - name for the attribute +- operation _(required)_ - must always be `create_attribute` +- database _(optional)_ - name of the database of the table you want to add your attribute. The default is `data` +- table _(required)_ - name of the table where you want to add your attribute to live +- attribute _(required)_ - name for the attribute ### Body + ```json { - "operation": "create_attribute", - "database": "dev", - "table": "dog", - "attribute": "is_adorable" + "operation": "create_attribute", + "database": "dev", + "table": "dog", + "attribute": "is_adorable" } ``` ### Response: 200 + ```json { - "message": "inserted 1 of 1 records", - "skipped_hashes": [], - "inserted_hashes": [ - "383c0bef-5781-4e1c-b5c8-987459ad0831" - ] + "message": "inserted 1 of 1 records", + "skipped_hashes": [], + "inserted_hashes": ["383c0bef-5781-4e1c-b5c8-987459ad0831"] } ``` --- ## Drop Attribute + Drop an existing attribute from the specified table. NOTE: Dropping an attribute will delete all associated attribute values in that table. _Operation is restricted to super_user roles only_ -* operation _(required)_ - this should always be `drop_attribute` -* database _(optional)_ - database where the table you are dropping lives. The default is `data` -* table _(required)_ - table where the attribute you are dropping lives -* attribute _(required)_ - attribute that you intend to drop +- operation _(required)_ - this should always be `drop_attribute` +- database _(optional)_ - database where the table you are dropping lives. The default is `data` +- table _(required)_ - table where the attribute you are dropping lives +- attribute _(required)_ - attribute that you intend to drop ### Body ```json { - "operation": "drop_attribute", - "database": "dev", - "table": "dog", - "attribute": "is_adorable" + "operation": "drop_attribute", + "database": "dev", + "table": "dog", + "attribute": "is_adorable" } ``` ### Response: 200 + ```json { - "message": "successfully deleted attribute 'is_adorable'" + "message": "successfully deleted attribute 'is_adorable'" } ``` --- ## Get Backup + This will return a snapshot of the requested database. This provides a means for backing up the database through the operations API. The response will be the raw database file (in binary format), which can later be restored as a database file by copying into the appropriate hdb/databases directory (with Harper not running). The returned file is a snapshot of the database at the moment in time that the get_backup operation begins. This also supports backing up individual tables in a database. However, this is a more expensive operation than backing up a database in whole, and will lose any transactional atomicity between writes across tables, so generally it is recommended that you backup the entire database. It is important to note that trying to copy a database file that is in use (Harper actively running and writing to the file) using standard file copying tools is not safe (the copied file will likely be corrupt), which is why using this snapshot operation is recommended for backups (volume snapshots are also a good way to backup Harper databases). _Operation is restricted to super_user roles only_ -* operation _(required)_ - this should always be `get_backup` -* database _(required)_ - this is the database that will be snapshotted and returned -* table _(optional)_ - this will specify a specific table to backup -* tables _(optional)_ - this will specify a specific set of tables to backup +- operation _(required)_ - this should always be `get_backup` +- database _(required)_ - this is the database that will be snapshotted and returned +- table _(optional)_ - this will specify a specific table to backup +- tables _(optional)_ - this will specify a specific set of tables to backup ### Body ```json { - "operation": "get_backup", - "database": "dev" + "operation": "get_backup", + "database": "dev" } ``` ### Response: 200 + ``` The database in raw binary data format ``` diff --git a/docs/developers/operations-api/jobs.md b/docs/developers/operations-api/jobs.md index 8a5fea46..0240ae4a 100644 --- a/docs/developers/operations-api/jobs.md +++ b/docs/developers/operations-api/jobs.md @@ -1,78 +1,83 @@ -# Jobs +# Jobs ## Get Job + Returns job status, metrics, and messages for the specified job ID. -* operation _(required)_ - must always be `get_job` -* id _(required)_ - the id of the job you wish to view +- operation _(required)_ - must always be `get_job` +- id _(required)_ - the id of the job you wish to view ### Body ```json { - "operation": "get_job", - "id": "4a982782-929a-4507-8794-26dae1132def" + "operation": "get_job", + "id": "4a982782-929a-4507-8794-26dae1132def" } ``` ### Response: 200 + ```json [ - { - "__createdtime__": 1611615798782, - "__updatedtime__": 1611615801207, - "created_datetime": 1611615798774, - "end_datetime": 1611615801206, - "id": "4a982782-929a-4507-8794-26dae1132def", - "job_body": null, - "message": "successfully loaded 350 of 350 records", - "start_datetime": 1611615798805, - "status": "COMPLETE", - "type": "csv_url_load", - "user": "HDB_ADMIN", - "start_datetime_converted": "2021-01-25T23:03:18.805Z", - "end_datetime_converted": "2021-01-25T23:03:21.206Z" - } + { + "__createdtime__": 1611615798782, + "__updatedtime__": 1611615801207, + "created_datetime": 1611615798774, + "end_datetime": 1611615801206, + "id": "4a982782-929a-4507-8794-26dae1132def", + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "start_datetime": 1611615798805, + "status": "COMPLETE", + "type": "csv_url_load", + "user": "HDB_ADMIN", + "start_datetime_converted": "2021-01-25T23:03:18.805Z", + "end_datetime_converted": "2021-01-25T23:03:21.206Z" + } ] ``` --- ## Search Jobs By Start Date + Returns a list of job statuses, metrics, and messages for all jobs executed within the specified time window. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `search_jobs_by_start_date` -* from_date _(required)_ - the date you wish to start the search -* to_date _(required)_ - the date you wish to end the search +- operation _(required)_ - must always be `search_jobs_by_start_date` +- from*date *(required)\_ - the date you wish to start the search +- to*date *(required)\_ - the date you wish to end the search ### Body + ```json { - "operation": "search_jobs_by_start_date", - "from_date": "2021-01-25T22:05:27.464+0000", - "to_date": "2021-01-25T23:05:27.464+0000" + "operation": "search_jobs_by_start_date", + "from_date": "2021-01-25T22:05:27.464+0000", + "to_date": "2021-01-25T23:05:27.464+0000" } ``` ### Response: 200 + ```json [ - { - "id": "942dd5cb-2368-48a5-8a10-8770ff7eb1f1", - "user": "HDB_ADMIN", - "type": "csv_url_load", - "status": "COMPLETE", - "start_datetime": 1611613284781, - "end_datetime": 1611613287204, - "job_body": null, - "message": "successfully loaded 350 of 350 records", - "created_datetime": 1611613284764, - "__createdtime__": 1611613284767, - "__updatedtime__": 1611613287207, - "start_datetime_converted": "2021-01-25T22:21:24.781Z", - "end_datetime_converted": "2021-01-25T22:21:27.204Z" - } + { + "id": "942dd5cb-2368-48a5-8a10-8770ff7eb1f1", + "user": "HDB_ADMIN", + "type": "csv_url_load", + "status": "COMPLETE", + "start_datetime": 1611613284781, + "end_datetime": 1611613287204, + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "created_datetime": 1611613284764, + "__createdtime__": 1611613284767, + "__updatedtime__": 1611613287207, + "start_datetime_converted": "2021-01-25T22:21:24.781Z", + "end_datetime_converted": "2021-01-25T22:21:27.204Z" + } ] -``` \ No newline at end of file +``` diff --git a/docs/developers/operations-api/logs.md b/docs/developers/operations-api/logs.md index eeb8ac43..2f7e9630 100644 --- a/docs/developers/operations-api/logs.md +++ b/docs/developers/operations-api/logs.md @@ -4,27 +4,27 @@ Returns log outputs from the primary Harper log based on the provided search criteria. [Read more about Harper logging here](../../administration/logging/logging.md#read-logs-via-the-api). -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `read_Log` -* start _(optional)_ - result to start with. Default is 0, the first log in `hdb.log`. Must be a number -* limit _(optional)_ - number of results returned. Default behavior is 1000. Must be a number -* level _(optional)_ - error level to filter on. Default behavior is all levels. Must be `notify`, `error`, `warn`, `info`, `debug` or `trace` -* from _(optional)_ - date to begin showing log results. Must be `YYYY-MM-DD` or `YYYY-MM-DD hh:mm:ss`. Default is first log in `hdb.log` -* until _(optional)_ - date to end showing log results. Must be `YYYY-MM-DD` or `YYYY-MM-DD hh:mm:ss`. Default is last log in `hdb.log` -* order _(optional)_ - order to display logs desc or asc by timestamp. By default, will maintain `hdb.log` order +- operation _(required)_ - must always be `read_Log` +- start _(optional)_ - result to start with. Default is 0, the first log in `hdb.log`. Must be a number +- limit _(optional)_ - number of results returned. Default behavior is 1000. Must be a number +- level _(optional)_ - error level to filter on. Default behavior is all levels. Must be `notify`, `error`, `warn`, `info`, `debug` or `trace` +- from _(optional)_ - date to begin showing log results. Must be `YYYY-MM-DD` or `YYYY-MM-DD hh:mm:ss`. Default is first log in `hdb.log` +- until _(optional)_ - date to end showing log results. Must be `YYYY-MM-DD` or `YYYY-MM-DD hh:mm:ss`. Default is last log in `hdb.log` +- order _(optional)_ - order to display logs desc or asc by timestamp. By default, will maintain `hdb.log` order ### Body ```json { - "operation": "read_log", - "start": 0, - "limit": 1000, - "level": "error", - "from": "2021-01-25T22:05:27.464+0000", - "until": "2021-01-25T23:05:27.464+0000", - "order": "desc" + "operation": "read_log", + "start": 0, + "limit": 1000, + "level": "error", + "from": "2021-01-25T22:05:27.464+0000", + "until": "2021-01-25T23:05:27.464+0000", + "order": "desc" } ``` @@ -32,56 +32,55 @@ _Operation is restricted to super\_user roles only_ ```json [ - { - "level": "notify", - "message": "Connected to cluster server.", - "timestamp": "2021-01-25T23:03:20.710Z", - "thread": "main/0", - "tags": [] - }, - { - "level": "warn", - "message": "Login failed", - "timestamp": "2021-01-25T22:24:45.113Z", - "thread": "http/9", - "tags": [] - }, - { - "level": "error", - "message": "unknown attribute 'name and breed'", - "timestamp": "2021-01-25T22:23:24.167Z", - "thread": "http/9", - "tags": [] - } + { + "level": "notify", + "message": "Connected to cluster server.", + "timestamp": "2021-01-25T23:03:20.710Z", + "thread": "main/0", + "tags": [] + }, + { + "level": "warn", + "message": "Login failed", + "timestamp": "2021-01-25T22:24:45.113Z", + "thread": "http/9", + "tags": [] + }, + { + "level": "error", + "message": "unknown attribute 'name and breed'", + "timestamp": "2021-01-25T22:23:24.167Z", + "thread": "http/9", + "tags": [] + } ] - ``` -*** +--- ## Read Transaction Log Returns all transactions logged for the specified database table. You may filter your results with the optional from, to, and limit fields. [Read more about Harper transaction logs here](logs.md#read-transaction-log). -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `read_transaction_log` -* schema _(required)_ - schema under which the transaction log resides -* table _(required)_ - table under which the transaction log resides -* from _(optional)_ - time format must be millisecond-based epoch in UTC -* to _(optional)_ - time format must be millisecond-based epoch in UTC -* limit _(optional)_ - max number of logs you want to receive. Must be a number +- operation _(required)_ - must always be `read_transaction_log` +- schema _(required)_ - schema under which the transaction log resides +- table _(required)_ - table under which the transaction log resides +- from _(optional)_ - time format must be millisecond-based epoch in UTC +- to _(optional)_ - time format must be millisecond-based epoch in UTC +- limit _(optional)_ - max number of logs you want to receive. Must be a number ### Body ```json { - "operation": "read_transaction_log", - "schema": "dev", - "table": "dog", - "from": 1560249020865, - "to": 1660585656639, - "limit": 10 + "operation": "read_transaction_log", + "schema": "dev", + "table": "dog", + "from": 1560249020865, + "to": 1660585656639, + "limit": 10 } ``` @@ -89,198 +88,198 @@ _Operation is restricted to super\_user roles only_ ```json [ - { - "operation": "insert", - "user": "admin", - "timestamp": 1660165619736, - "records": [ - { - "id": 1, - "dog_name": "Penny", - "owner_name": "Kyle", - "breed_id": 154, - "age": 7, - "weight_lbs": 38, - "__updatedtime__": 1660165619688, - "__createdtime__": 1660165619688 - } - ] - }, - { - "operation": "insert", - "user": "admin", - "timestamp": 1660165619813, - "records": [ - { - "id": 2, - "dog_name": "Harper", - "owner_name": "Stephen", - "breed_id": 346, - "age": 7, - "weight_lbs": 55, - "adorable": true, - "__updatedtime__": 1660165619797, - "__createdtime__": 1660165619797 - }, - { - "id": 3, - "dog_name": "Alby", - "owner_name": "Kaylan", - "breed_id": 348, - "age": 7, - "weight_lbs": 84, - "adorable": true, - "__updatedtime__": 1660165619797, - "__createdtime__": 1660165619797 - }, - { - "id": 4, - "dog_name": "Billy", - "owner_name": "Zach", - "breed_id": 347, - "age": 6, - "weight_lbs": 60, - "adorable": true, - "__updatedtime__": 1660165619797, - "__createdtime__": 1660165619797 - }, - { - "id": 5, - "dog_name": "Rose Merry", - "owner_name": "Zach", - "breed_id": 348, - "age": 8, - "weight_lbs": 15, - "adorable": true, - "__updatedtime__": 1660165619797, - "__createdtime__": 1660165619797 - }, - { - "id": 6, - "dog_name": "Kato", - "owner_name": "Kyle", - "breed_id": 351, - "age": 6, - "weight_lbs": 32, - "adorable": true, - "__updatedtime__": 1660165619797, - "__createdtime__": 1660165619797 - }, - { - "id": 7, - "dog_name": "Simon", - "owner_name": "Fred", - "breed_id": 349, - "age": 3, - "weight_lbs": 35, - "adorable": true, - "__updatedtime__": 1660165619797, - "__createdtime__": 1660165619797 - }, - { - "id": 8, - "dog_name": "Gemma", - "owner_name": "Stephen", - "breed_id": 350, - "age": 5, - "weight_lbs": 55, - "adorable": true, - "__updatedtime__": 1660165619797, - "__createdtime__": 1660165619797 - }, - { - "id": 9, - "dog_name": "Yeti", - "owner_name": "Jaxon", - "breed_id": 200, - "age": 5, - "weight_lbs": 55, - "adorable": true, - "__updatedtime__": 1660165619797, - "__createdtime__": 1660165619797 - }, - { - "id": 10, - "dog_name": "Monkey", - "owner_name": "Aron", - "breed_id": 271, - "age": 7, - "weight_lbs": 35, - "adorable": true, - "__updatedtime__": 1660165619797, - "__createdtime__": 1660165619797 - }, - { - "id": 11, - "dog_name": "Bode", - "owner_name": "Margo", - "breed_id": 104, - "age": 8, - "weight_lbs": 75, - "adorable": true, - "__updatedtime__": 1660165619797, - "__createdtime__": 1660165619797 - }, - { - "id": 12, - "dog_name": "Tucker", - "owner_name": "David", - "breed_id": 346, - "age": 2, - "weight_lbs": 60, - "adorable": true, - "__updatedtime__": 1660165619798, - "__createdtime__": 1660165619798 - }, - { - "id": 13, - "dog_name": "Jagger", - "owner_name": "Margo", - "breed_id": 271, - "age": 7, - "weight_lbs": 35, - "adorable": true, - "__updatedtime__": 1660165619798, - "__createdtime__": 1660165619798 - } - ] - }, - { - "operation": "update", - "user": "admin", - "timestamp": 1660165620040, - "records": [ - { - "id": 1, - "dog_name": "Penny B", - "__updatedtime__": 1660165620036 - } - ] - } + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619736, + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38, + "__updatedtime__": 1660165619688, + "__createdtime__": 1660165619688 + } + ] + }, + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619813, + "records": [ + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "breed_id": 346, + "age": 7, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 3, + "dog_name": "Alby", + "owner_name": "Kaylan", + "breed_id": 348, + "age": 7, + "weight_lbs": 84, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "breed_id": 347, + "age": 6, + "weight_lbs": 60, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "breed_id": 348, + "age": 8, + "weight_lbs": 15, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 6, + "dog_name": "Kato", + "owner_name": "Kyle", + "breed_id": 351, + "age": 6, + "weight_lbs": 32, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 7, + "dog_name": "Simon", + "owner_name": "Fred", + "breed_id": 349, + "age": 3, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "breed_id": 350, + "age": 5, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 9, + "dog_name": "Yeti", + "owner_name": "Jaxon", + "breed_id": 200, + "age": 5, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 10, + "dog_name": "Monkey", + "owner_name": "Aron", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 11, + "dog_name": "Bode", + "owner_name": "Margo", + "breed_id": 104, + "age": 8, + "weight_lbs": 75, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 12, + "dog_name": "Tucker", + "owner_name": "David", + "breed_id": 346, + "age": 2, + "weight_lbs": 60, + "adorable": true, + "__updatedtime__": 1660165619798, + "__createdtime__": 1660165619798 + }, + { + "id": 13, + "dog_name": "Jagger", + "owner_name": "Margo", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619798, + "__createdtime__": 1660165619798 + } + ] + }, + { + "operation": "update", + "user": "admin", + "timestamp": 1660165620040, + "records": [ + { + "id": 1, + "dog_name": "Penny B", + "__updatedtime__": 1660165620036 + } + ] + } ] ``` -*** +--- ## Delete Transaction Logs Before Deletes transaction log data for the specified database table that is older than the specified timestamp. -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `delete_transaction_log_before` -* schema _(required)_ - schema under which the transaction log resides. Must be a string -* table _(required)_ - table under which the transaction log resides. Must be a string -* timestamp _(required)_ - records older than this date will be deleted. Format is millisecond-based epoch in UTC +- operation _(required)_ - must always be `delete_transaction_log_before` +- schema _(required)_ - schema under which the transaction log resides. Must be a string +- table _(required)_ - table under which the transaction log resides. Must be a string +- timestamp _(required)_ - records older than this date will be deleted. Format is millisecond-based epoch in UTC ### Body ```json { - "operation": "delete_transaction_logs_before", - "schema": "dev", - "table": "dog", - "timestamp": 1598290282817 + "operation": "delete_transaction_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1598290282817 } ``` @@ -288,31 +287,31 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "Starting job with id 26a6d3a6-6d77-40f9-bee7-8d6ef479a126" + "message": "Starting job with id 26a6d3a6-6d77-40f9-bee7-8d6ef479a126" } ``` -*** +--- ## Read Audit Log -AuditLog must be enabled in the Harper configuration file to make this request. Returns a verbose history of all transactions logged for the specified database table, including original data records. You may filter your results with the optional search\_type and search\_values fields. [Read more about Harper transaction logs here.](../../administration/logging/transaction-logging.md#read_transaction_log) +AuditLog must be enabled in the Harper configuration file to make this request. Returns a verbose history of all transactions logged for the specified database table, including original data records. You may filter your results with the optional search_type and search_values fields. [Read more about Harper transaction logs here.](../../administration/logging/transaction-logging.md#read_transaction_log) -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `read_audit_log` -* schema _(required)_ - schema under which the transaction log resides -* table _(required)_ - table under which the transaction log resides -* search\_type _(optional)_ - possibilities are `hash_value`, `timestamp` and `username` -* search\_values _(optional)_ - an array of string or numbers relating to search\_type +- operation _(required)_ - must always be `read_audit_log` +- schema _(required)_ - schema under which the transaction log resides +- table _(required)_ - table under which the transaction log resides +- search_type _(optional)_ - possibilities are `hash_value`, `timestamp` and `username` +- search_values _(optional)_ - an array of string or numbers relating to search_type ### Body ```json { - "operation": "read_audit_log", - "schema": "dev", - "table": "dog" + "operation": "read_audit_log", + "schema": "dev", + "table": "dog" } ``` @@ -320,110 +319,99 @@ _Operation is restricted to super\_user roles only_ ```json [ - { - "operation": "insert", - "user_name": "admin", - "timestamp": 1660585635882.288, - "hash_values": [ - 318 - ], - "records": [ - { - "id": 318, - "dog_name": "Polliwog", - "__updatedtime__": 1660585635876, - "__createdtime__": 1660585635876 - } - ] - }, - { - "operation": "insert", - "user_name": "admin", - "timestamp": 1660585716133.01, - "hash_values": [ - 444 - ], - "records": [ - { - "id": 444, - "dog_name": "Davis", - "__updatedtime__": 1660585716128, - "__createdtime__": 1660585716128 - } - ] - }, - { - "operation": "update", - "user_name": "admin", - "timestamp": 1660585740558.415, - "hash_values": [ - 444 - ], - "records": [ - { - "id": 444, - "fur_type": "coarse", - "__updatedtime__": 1660585740556 - } - ], - "original_records": [ - { - "id": 444, - "dog_name": "Davis", - "__updatedtime__": 1660585716128, - "__createdtime__": 1660585716128 - } - ] - }, - { - "operation": "delete", - "user_name": "admin", - "timestamp": 1660585759710.56, - "hash_values": [ - 444 - ], - "original_records": [ - { - "id": 444, - "dog_name": "Davis", - "__updatedtime__": 1660585740556, - "__createdtime__": 1660585716128, - "fur_type": "coarse" - } - ] - } + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [318], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [444], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [444], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [444], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + } ] ``` -*** +--- ## Read Audit Log by timestamp AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table between the specified time window. [Read more about Harper transaction logs here](logs.md#read-transaction-log). -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `read_audit_log` -* schema _(required)_ - schema under which the transaction log resides -* table _(required)_ - table under which the transaction log resides -* search\_type _(optional)_ - timestamp -* search\_values _(optional)_ - an array containing a maximum of two values \[`from_timestamp`, `to_timestamp`] defining the range of transactions you would like to view. - * Timestamp format is millisecond-based epoch in UTC - * If no items are supplied then all transactions are returned - * If only one entry is supplied then all transactions after the supplied timestamp will be returned +- operation _(required)_ - must always be `read_audit_log` +- schema _(required)_ - schema under which the transaction log resides +- table _(required)_ - table under which the transaction log resides +- search_type _(optional)_ - timestamp +- search_values _(optional)_ - an array containing a maximum of two values \[`from_timestamp`, `to_timestamp`] defining the range of transactions you would like to view. + - Timestamp format is millisecond-based epoch in UTC + - If no items are supplied then all transactions are returned + - If only one entry is supplied then all transactions after the supplied timestamp will be returned ### Body ```json { - "operation": "read_audit_log", - "schema": "dev", - "table": "dog", - "search_type": "timestamp", - "search_values": [ - 1660585740558, - 1660585759710.56 - ] + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "timestamp", + "search_values": [1660585740558, 1660585759710.56] } ``` @@ -431,129 +419,117 @@ _Operation is restricted to super\_user roles only_ ```json [ - { - "operation": "insert", - "user_name": "admin", - "timestamp": 1660585635882.288, - "hash_values": [ - 318 - ], - "records": [ - { - "id": 318, - "dog_name": "Polliwog", - "__updatedtime__": 1660585635876, - "__createdtime__": 1660585635876 - } - ] - }, - { - "operation": "insert", - "user_name": "admin", - "timestamp": 1660585716133.01, - "hash_values": [ - 444 - ], - "records": [ - { - "id": 444, - "dog_name": "Davis", - "__updatedtime__": 1660585716128, - "__createdtime__": 1660585716128 - } - ] - }, - { - "operation": "update", - "user_name": "admin", - "timestamp": 1660585740558.415, - "hash_values": [ - 444 - ], - "records": [ - { - "id": 444, - "fur_type": "coarse", - "__updatedtime__": 1660585740556 - } - ], - "original_records": [ - { - "id": 444, - "dog_name": "Davis", - "__updatedtime__": 1660585716128, - "__createdtime__": 1660585716128 - } - ] - }, - { - "operation": "delete", - "user_name": "admin", - "timestamp": 1660585759710.56, - "hash_values": [ - 444 - ], - "original_records": [ - { - "id": 444, - "dog_name": "Davis", - "__updatedtime__": 1660585740556, - "__createdtime__": 1660585716128, - "fur_type": "coarse" - } - ] - }, - { - "operation": "update", - "user_name": "admin", - "timestamp": 1660586298457.224, - "hash_values": [ - 318 - ], - "records": [ - { - "id": 318, - "fur_type": "super fluffy", - "__updatedtime__": 1660586298455 - } - ], - "original_records": [ - { - "id": 318, - "dog_name": "Polliwog", - "__updatedtime__": 1660585635876, - "__createdtime__": 1660585635876 - } - ] - } + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [318], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [444], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [444], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [444], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "hash_values": [318], + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } ] ``` -*** +--- ## Read Audit Log by username AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table which were committed by the specified user. [Read more about Harper transaction logs here](../../administration/logging/transaction-logging.md#read_transaction_log). -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `read_audit_log` -* schema _(required)_ - schema under which the transaction log resides -* table _(required)_ - table under which the transaction log resides -* search\_type _(optional)_ - username -* search\_values _(optional)_ - the Harper user for whom you would like to view transactions +- operation _(required)_ - must always be `read_audit_log` +- schema _(required)_ - schema under which the transaction log resides +- table _(required)_ - table under which the transaction log resides +- search_type _(optional)_ - username +- search_values _(optional)_ - the Harper user for whom you would like to view transactions ### Body ```json { - "operation": "read_audit_log", - "schema": "dev", - "table": "dog", - "search_type": "username", - "search_values": [ - "admin" - ] + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "username", + "search_values": ["admin"] } ``` @@ -561,131 +537,119 @@ _Operation is restricted to super\_user roles only_ ```json { - "admin": [ - { - "operation": "insert", - "user_name": "admin", - "timestamp": 1660585635882.288, - "hash_values": [ - 318 - ], - "records": [ - { - "id": 318, - "dog_name": "Polliwog", - "__updatedtime__": 1660585635876, - "__createdtime__": 1660585635876 - } - ] - }, - { - "operation": "insert", - "user_name": "admin", - "timestamp": 1660585716133.01, - "hash_values": [ - 444 - ], - "records": [ - { - "id": 444, - "dog_name": "Davis", - "__updatedtime__": 1660585716128, - "__createdtime__": 1660585716128 - } - ] - }, - { - "operation": "update", - "user_name": "admin", - "timestamp": 1660585740558.415, - "hash_values": [ - 444 - ], - "records": [ - { - "id": 444, - "fur_type": "coarse", - "__updatedtime__": 1660585740556 - } - ], - "original_records": [ - { - "id": 444, - "dog_name": "Davis", - "__updatedtime__": 1660585716128, - "__createdtime__": 1660585716128 - } - ] - }, - { - "operation": "delete", - "user_name": "admin", - "timestamp": 1660585759710.56, - "hash_values": [ - 444 - ], - "original_records": [ - { - "id": 444, - "dog_name": "Davis", - "__updatedtime__": 1660585740556, - "__createdtime__": 1660585716128, - "fur_type": "coarse" - } - ] - }, - { - "operation": "update", - "user_name": "admin", - "timestamp": 1660586298457.224, - "hash_values": [ - 318 - ], - "records": [ - { - "id": 318, - "fur_type": "super fluffy", - "__updatedtime__": 1660586298455 - } - ], - "original_records": [ - { - "id": 318, - "dog_name": "Polliwog", - "__updatedtime__": 1660585635876, - "__createdtime__": 1660585635876 - } - ] - } - ] + "admin": [ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [318], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [444], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [444], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [444], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "hash_values": [318], + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } + ] } ``` -*** +--- -## Read Audit Log by hash\_value +## Read Audit Log by hash_value AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table which were committed to the specified hash value(s). [Read more about Harper transaction logs here](../../administration/logging/transaction-logging.md#read_transaction_log). -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `read_audit_log` -* schema _(required)_ - schema under which the transaction log resides -* table _(required)_ - table under which the transaction log resides -* search\_type _(optional)_ - hash\_value -* search\_values _(optional)_ - an array of hash\_attributes for which you wish to see transaction logs +- operation _(required)_ - must always be `read_audit_log` +- schema _(required)_ - schema under which the transaction log resides +- table _(required)_ - table under which the transaction log resides +- search_type _(optional)_ - hash_value +- search_values _(optional)_ - an array of hash_attributes for which you wish to see transaction logs ### Body ```json { - "operation": "read_audit_log", - "schema": "dev", - "table": "dog", - "search_type": "hash_value", - "search_values": [ - 318 - ] + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "hash_value", + "search_values": [318] } ``` @@ -693,65 +657,65 @@ _Operation is restricted to super\_user roles only_ ```json { - "318": [ - { - "operation": "insert", - "user_name": "admin", - "timestamp": 1660585635882.288, - "records": [ - { - "id": 318, - "dog_name": "Polliwog", - "__updatedtime__": 1660585635876, - "__createdtime__": 1660585635876 - } - ] - }, - { - "operation": "update", - "user_name": "admin", - "timestamp": 1660586298457.224, - "records": [ - { - "id": 318, - "fur_type": "super fluffy", - "__updatedtime__": 1660586298455 - } - ], - "original_records": [ - { - "id": 318, - "dog_name": "Polliwog", - "__updatedtime__": 1660585635876, - "__createdtime__": 1660585635876 - } - ] - } - ] + "318": [ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } + ] } ``` -*** +--- ## Delete Audit Logs Before AuditLog must be enabled in the Harper configuration file to make this request. Deletes audit log data for the specified database table that is older than the specified timestamp. -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `delete_audit_logs_before` -* schema _(required)_ - schema under which the transaction log resides. Must be a string -* table _(required)_ - table under which the transaction log resides. Must be a string -* timestamp _(required)_ - records older than this date will be deleted. Format is millisecond-based epoch in UTC +- operation _(required)_ - must always be `delete_audit_logs_before` +- schema _(required)_ - schema under which the transaction log resides. Must be a string +- table _(required)_ - table under which the transaction log resides. Must be a string +- timestamp _(required)_ - records older than this date will be deleted. Format is millisecond-based epoch in UTC ### Body ```json { - "operation": "delete_audit_logs_before", - "schema": "dev", - "table": "dog", - "timestamp": 1660585759710.56 + "operation": "delete_audit_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1660585759710.56 } ``` @@ -759,6 +723,6 @@ _Operation is restricted to super\_user roles only_ ```json { - "message": "Starting job with id 7479e5f8-a86e-4fc9-add7-749493bc100f" + "message": "Starting job with id 7479e5f8-a86e-4fc9-add7-749493bc100f" } ``` diff --git a/docs/developers/operations-api/nosql-operations.md b/docs/developers/operations-api/nosql-operations.md index 0c541d0a..0b8b7d7f 100644 --- a/docs/developers/operations-api/nosql-operations.md +++ b/docs/developers/operations-api/nosql-operations.md @@ -4,32 +4,32 @@ Adds one or more rows of data to a database table. Primary keys of the inserted JSON record may be supplied on insert. If a primary key is not provided, then a GUID or incremented number (depending on type) will be generated for each record. -* operation _(required)_ - must always be `insert` -* database _(optional)_ - database where the table you are inserting records into lives. The default is `data` -* table _(required)_ - table where you want to insert records -* records _(required)_ - array of one or more records for insert +- operation _(required)_ - must always be `insert` +- database _(optional)_ - database where the table you are inserting records into lives. The default is `data` +- table _(required)_ - table where you want to insert records +- records _(required)_ - array of one or more records for insert ### Body ```json { - "operation": "insert", - "database": "dev", - "table": "dog", - "records": [ - { - "id": 8, - "dog_name": "Harper", - "breed_id": 346, - "age": 7 - }, - { - "id": 9, - "dog_name": "Penny", - "breed_id": 154, - "age": 7 - } - ] + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 8, + "dog_name": "Harper", + "breed_id": 346, + "age": 7 + }, + { + "id": 9, + "dog_name": "Penny", + "breed_id": 154, + "age": 7 + } + ] } ``` @@ -37,12 +37,9 @@ Adds one or more rows of data to a database table. Primary keys of the inserted ```json { - "message": "inserted 2 of 2 records", - "inserted_hashes": [ - 8, - 9 - ], - "skipped_hashes": [] + "message": "inserted 2 of 2 records", + "inserted_hashes": [8, 9], + "skipped_hashes": [] } ``` @@ -52,29 +49,29 @@ Adds one or more rows of data to a database table. Primary keys of the inserted Changes the values of specified attributes in one or more rows in a database table as identified by the primary key. NOTE: Primary key of the updated JSON record(s) MUST be supplied on update. -* operation _(required)_ - must always be `update` -* database _(optional)_ - database of the table you are updating records in. The default is `data` -* table _(required)_ - table where you want to update records -* records _(required)_ - array of one or more records for update +- operation _(required)_ - must always be `update` +- database _(optional)_ - database of the table you are updating records in. The default is `data` +- table _(required)_ - table where you want to update records +- records _(required)_ - array of one or more records for update ### Body ```json { - "operation": "update", - "database": "dev", - "table": "dog", - "records": [ - { - "id": 1, - "weight_lbs": 55 - }, - { - "id": 2, - "owner": "Kyle B", - "weight_lbs": 35 - } - ] + "operation": "update", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 1, + "weight_lbs": 55 + }, + { + "id": 2, + "owner": "Kyle B", + "weight_lbs": 35 + } + ] } ``` @@ -82,12 +79,9 @@ Changes the values of specified attributes in one or more rows in a database tab ```json { - "message": "updated 2 of 2 records", - "update_hashes": [ - 1, - 3 - ], - "skipped_hashes": [] + "message": "updated 2 of 2 records", + "update_hashes": [1, 3], + "skipped_hashes": [] } ``` @@ -97,37 +91,37 @@ Changes the values of specified attributes in one or more rows in a database tab Changes the values of specified attributes for rows with matching primary keys that exist in the table. Adds rows to the database table for primary keys that do not exist or are not provided. -* operation _(required)_ - must always be `upsert` -* database _(optional)_ - database of the table you are updating records in. The default is `data` -* table _(required)_ - table where you want to update records -* records _(required)_ - array of one or more records for update +- operation _(required)_ - must always be `upsert` +- database _(optional)_ - database of the table you are updating records in. The default is `data` +- table _(required)_ - table where you want to update records +- records _(required)_ - array of one or more records for update ### Body ```json { - "operation": "upsert", - "database": "dev", - "table": "dog", - "records": [ - { - "id": 8, - "weight_lbs": 155 - }, - { - "name": "Bill", - "breed": "Pit Bull", - "id": 10, - "Age": 11, - "weight_lbs": 155 - }, - { - "name": "Harper", - "breed": "Mutt", - "age": 5, - "weight_lbs": 155 - } - ] + "operation": "upsert", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 8, + "weight_lbs": 155 + }, + { + "name": "Bill", + "breed": "Pit Bull", + "id": 10, + "Age": 11, + "weight_lbs": 155 + }, + { + "name": "Harper", + "breed": "Mutt", + "age": 5, + "weight_lbs": 155 + } + ] } ``` @@ -135,12 +129,8 @@ Changes the values of specified attributes for rows with matching primary keys t ```json { - "message": "upserted 3 of 3 records", - "upserted_hashes": [ - 8, - 10, - "ea06fc8e-717b-4c6c-b69d-b29014054ab7" - ] + "message": "upserted 3 of 3 records", + "upserted_hashes": [8, 10, "ea06fc8e-717b-4c6c-b69d-b29014054ab7"] } ``` @@ -150,22 +140,19 @@ Changes the values of specified attributes for rows with matching primary keys t Removes one or more rows of data from a specified table. -* operation _(required)_ - must always be `delete` -* database _(optional)_ - database where the table you are deleting records lives. The default is `data` -* table _(required)_ - table where you want to deleting records -* ids _(required)_ - array of one or more primary key values, which identifies records to delete +- operation _(required)_ - must always be `delete` +- database _(optional)_ - database where the table you are deleting records lives. The default is `data` +- table _(required)_ - table where you want to deleting records +- ids _(required)_ - array of one or more primary key values, which identifies records to delete ### Body ```json { - "operation": "delete", - "database": "dev", - "table": "dog", - "ids": [ - 1, - 2 - ] + "operation": "delete", + "database": "dev", + "table": "dog", + "ids": [1, 2] } ``` @@ -173,12 +160,9 @@ Removes one or more rows of data from a specified table. ```json { - "message": "2 of 2 records successfully deleted", - "deleted_hashes": [ - 1, - 2 - ], - "skipped_hashes": [] + "message": "2 of 2 records successfully deleted", + "deleted_hashes": [1, 2], + "skipped_hashes": [] } ``` @@ -188,27 +172,21 @@ Removes one or more rows of data from a specified table. Returns data from a table for one or more primary keys. -* operation _(required)_ - must always be `search_by_id` -* database _(optional)_ - database where the table you are searching lives. The default is `data` -* table _(required)_ - table you wish to search -* ids _(required)_ - array of primary keys to retrieve -* get_attributes _(required)_ - define which attributes you want returned. _Use `['*']` to return all attributes_ +- operation _(required)_ - must always be `search_by_id` +- database _(optional)_ - database where the table you are searching lives. The default is `data` +- table _(required)_ - table you wish to search +- ids _(required)_ - array of primary keys to retrieve +- get*attributes *(required)_ - define which attributes you want returned. \_Use `['*']` to return all attributes_ ### Body ```json { - "operation": "search_by_id", - "database": "dev", - "table": "dog", - "ids": [ - 1, - 2 - ], - "get_attributes": [ - "dog_name", - "breed_id" - ] + "operation": "search_by_id", + "database": "dev", + "table": "dog", + "ids": [1, 2], + "get_attributes": ["dog_name", "breed_id"] } ``` @@ -216,14 +194,14 @@ Returns data from a table for one or more primary keys. ```json [ - { - "dog_name": "Penny", - "breed_id": 154 - }, - { - "dog_name": "Harper", - "breed_id": 346 - } + { + "dog_name": "Penny", + "breed_id": 154 + }, + { + "dog_name": "Harper", + "breed_id": 346 + } ] ``` @@ -233,26 +211,23 @@ Returns data from a table for one or more primary keys. Returns data from a table for a matching value. -* operation _(required)_ - must always be `search_by_value` -* database _(optional)_ - database where the table you are searching lives. The default is `data` -* table _(required)_ - table you wish to search -* search_attribute _(required)_ - attribute you wish to search can be any attribute -* search_value _(required)_ - value you wish to search - wild cards are allowed -* get_attributes _(required)_ - define which attributes you want returned. Use `['*']` to return all attributes +- operation _(required)_ - must always be `search_by_value` +- database _(optional)_ - database where the table you are searching lives. The default is `data` +- table _(required)_ - table you wish to search +- search*attribute *(required)\_ - attribute you wish to search can be any attribute +- search*value *(required)\_ - value you wish to search - wild cards are allowed +- get*attributes *(required)\_ - define which attributes you want returned. Use `['*']` to return all attributes ### Body ```json { - "operation": "search_by_value", - "database": "dev", - "table": "dog", - "search_attribute": "owner_name", - "search_value": "Ky*", - "get_attributes": [ - "id", - "dog_name" - ] + "operation": "search_by_value", + "database": "dev", + "table": "dog", + "search_attribute": "owner_name", + "search_value": "Ky*", + "get_attributes": ["id", "dog_name"] } ``` @@ -260,12 +235,12 @@ Returns data from a table for a matching value. ```json [ - { - "dog_name": "Penny" - }, - { - "dog_name": "Kato" - } + { + "dog_name": "Penny" + }, + { + "dog_name": "Kato" + } ] ``` @@ -275,74 +250,70 @@ Returns data from a table for a matching value. Returns data from a table for one or more matching conditions. This supports grouping of conditions to indicate order of operations as well. -* operation _(required)_ - must always be `search_by_conditions` -* database _(optional)_ - database where the table you are searching lives. The default is `data` -* table _(required)_ - table you wish to search -* operator _(optional)_ - the operator used between each condition - `and`, `or`. The default is `and` -* offset _(optional)_ - the number of records that the query results will skip. The default is `0` -* limit _(optional)_ - the number of records that the query results will include. The default is `null`, resulting in no limit -* sort _optional_ - This is an object that indicates the sort order. It has the following properties: - * attribute _(required)_ - The attribute to sort by - * descending _(optional)_ - If true, will sort in descending order (defaults to ascending order) - * next _(optional)_ - This can define the next sort object that will be used to break ties for sorting when there are multiple records with the same value for the first attribute (follows the same structure as `sort`, and can recursive additional attributes). -* get_attributes _(required)_ - define which attributes you want returned. Use `['*']` to return all attributes -* conditions _(required)_ - the array of conditions objects, specified below, to filter by. Must include one or more object in the array that are a condition or a grouped set of conditions. A condition has the following properties: - * search_attribute _(required)_ - the attribute you wish to search, can be any attribute - * search_type _(required)_ - the type of search to perform - `equals`, `contains`, `starts_with`, `ends_with`, `greater_than`, `greater_than_equal`, `less_than`, `less_than_equal`, `between` - * search_value _(required)_ - case-sensitive value you wish to search. If the `search_type` is `between` then use an array of two values to search between - Or a set of grouped conditions has the following properties: - * operator _(optional)_ - the operator used between each condition - `and`, `or`. The default is `and` - * conditions _(required)_ - the array of conditions objects as described above. +- operation _(required)_ - must always be `search_by_conditions` +- database _(optional)_ - database where the table you are searching lives. The default is `data` +- table _(required)_ - table you wish to search +- operator _(optional)_ - the operator used between each condition - `and`, `or`. The default is `and` +- offset _(optional)_ - the number of records that the query results will skip. The default is `0` +- limit _(optional)_ - the number of records that the query results will include. The default is `null`, resulting in no limit +- sort _optional_ - This is an object that indicates the sort order. It has the following properties: + - attribute _(required)_ - The attribute to sort by + - descending _(optional)_ - If true, will sort in descending order (defaults to ascending order) + - next _(optional)_ - This can define the next sort object that will be used to break ties for sorting when there are multiple records with the same value for the first attribute (follows the same structure as `sort`, and can recursive additional attributes). +- get*attributes *(required)\_ - define which attributes you want returned. Use `['*']` to return all attributes +- conditions _(required)_ - the array of conditions objects, specified below, to filter by. Must include one or more object in the array that are a condition or a grouped set of conditions. A condition has the following properties: + - search*attribute *(required)\_ - the attribute you wish to search, can be any attribute + - search*type *(required)\_ - the type of search to perform - `equals`, `contains`, `starts_with`, `ends_with`, `greater_than`, `greater_than_equal`, `less_than`, `less_than_equal`, `between` + - search*value *(required)\_ - case-sensitive value you wish to search. If the `search_type` is `between` then use an array of two values to search between + Or a set of grouped conditions has the following properties: + - operator _(optional)_ - the operator used between each condition - `and`, `or`. The default is `and` + - conditions _(required)_ - the array of conditions objects as described above. + ### Body ```json { - "operation": "search_by_conditions", - "database": "dev", - "table": "dog", - "operator": "and", - "offset": 0, - "limit": 10, - "sort": { - "attribute": "id", - "next": { - "dog_name": "age", - "descending": true - } - }, - "get_attributes": [ - "*" - ], - "conditions": [ - { - "search_attribute": "age", - "search_type": "between", - "search_value": [ - 5, - 8 - ] - }, - { - "search_attribute": "weight_lbs", - "search_type": "greater_than", - "search_value": 40 - }, - { - "operator": "or", - "conditions": [ - { - "search_attribute": "adorable", - "search_type": "equals", - "search_value": true - }, - { - "search_attribute": "lovable", - "search_type": "equals", - "search_value": true - } - ] - } - ] + "operation": "search_by_conditions", + "database": "dev", + "table": "dog", + "operator": "and", + "offset": 0, + "limit": 10, + "sort": { + "attribute": "id", + "next": { + "dog_name": "age", + "descending": true + } + }, + "get_attributes": ["*"], + "conditions": [ + { + "search_attribute": "age", + "search_type": "between", + "search_value": [5, 8] + }, + { + "search_attribute": "weight_lbs", + "search_type": "greater_than", + "search_value": 40 + }, + { + "operator": "or", + "conditions": [ + { + "search_attribute": "adorable", + "search_type": "equals", + "search_value": true + }, + { + "search_attribute": "lovable", + "search_type": "equals", + "search_value": true + } + ] + } + ] } ``` @@ -350,60 +321,60 @@ Returns data from a table for one or more matching conditions. This supports gro ```json [ - { - "__createdtime__": 1620227719791, - "__updatedtime__": 1620227719791, - "adorable": true, - "age": 7, - "breed_id": 346, - "dog_name": "Harper", - "id": 2, - "owner_name": "Stephen", - "weight_lbs": 55 - }, - { - "__createdtime__": 1620227719792, - "__updatedtime__": 1620227719792, - "adorable": true, - "age": 7, - "breed_id": 348, - "dog_name": "Alby", - "id": 3, - "owner_name": "Kaylan", - "weight_lbs": 84 - }, - { - "__createdtime__": 1620227719792, - "__updatedtime__": 1620227719792, - "adorable": true, - "age": 6, - "breed_id": 347, - "dog_name": "Billy", - "id": 4, - "owner_name": "Zach", - "weight_lbs": 60 - }, - { - "__createdtime__": 1620227719792, - "__updatedtime__": 1620227719792, - "adorable": true, - "age": 5, - "breed_id": 250, - "dog_name": "Gemma", - "id": 8, - "owner_name": "Stephen", - "weight_lbs": 55 - }, - { - "__createdtime__": 1620227719792, - "__updatedtime__": 1620227719792, - "adorable": true, - "age": 8, - "breed_id": 104, - "dog_name": "Bode", - "id": 11, - "owner_name": "Margo", - "weight_lbs": 75 - } + { + "__createdtime__": 1620227719791, + "__updatedtime__": 1620227719791, + "adorable": true, + "age": 7, + "breed_id": 346, + "dog_name": "Harper", + "id": 2, + "owner_name": "Stephen", + "weight_lbs": 55 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 7, + "breed_id": 348, + "dog_name": "Alby", + "id": 3, + "owner_name": "Kaylan", + "weight_lbs": 84 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 6, + "breed_id": 347, + "dog_name": "Billy", + "id": 4, + "owner_name": "Zach", + "weight_lbs": 60 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 5, + "breed_id": 250, + "dog_name": "Gemma", + "id": 8, + "owner_name": "Stephen", + "weight_lbs": 55 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 8, + "breed_id": 104, + "dog_name": "Bode", + "id": 11, + "owner_name": "Margo", + "weight_lbs": 75 + } ] ``` diff --git a/docs/developers/operations-api/quickstart-examples.md b/docs/developers/operations-api/quickstart-examples.md index 64149df5..74e46469 100644 --- a/docs/developers/operations-api/quickstart-examples.md +++ b/docs/developers/operations-api/quickstart-examples.md @@ -1,12 +1,12 @@ # Quick Start Examples -Harper recommends utilizing [Harper Applications](../../developers/applications/README.md) for defining databases, tables, and other functionality. However, this guide is a great way to get started using on the Harper Operations API. +Harper recommends utilizing [Harper Applications](../../developers/applications/README.md) for defining databases, tables, and other functionality. However, this guide is a great way to get started using on the Harper Operations API. ## Create dog Table We first need to create a table. Since our company is named after our CEO's dog, lets create a table to store all our employees' dogs. We'll call this table, `dogs`. -Tables in Harper are schema-less, so we don't need to add any attributes other than a primary_key (in pre 4.2 versions this was referred to as the hash_attribute) to create this table. +Tables in Harper are schema-less, so we don't need to add any attributes other than a primary_key (in pre 4.2 versions this was referred to as the hash_attribute) to create this table. Harper does offer a `database` parameter that can be used to hold logical groupings of tables. The parameter is optional and if not provided the operation will default to using a database named `data`. @@ -16,9 +16,9 @@ If you receive an error response, make sure your Basic Authentication user and p ```json { - "operation": "create_table", - "table": "dog", - "primary_key": "id" + "operation": "create_table", + "table": "dog", + "primary_key": "id" } ``` @@ -26,22 +26,23 @@ If you receive an error response, make sure your Basic Authentication user and p ```json { - "message": "table 'data.dog' successfully created." + "message": "table 'data.dog' successfully created." } ``` --- ## Create breed Table + Now that we have a table to store our dog data, we also want to create a table to track known breeds. Just as with the dog table, the only attribute we need to specify is the `primary_key`. ### Body ```json { - "operation": "create_table", - "table": "breed", - "primary_key": "id" + "operation": "create_table", + "table": "breed", + "primary_key": "id" } ``` @@ -49,7 +50,7 @@ Now that we have a table to store our dog data, we also want to create a table t ```json { - "message": "table 'data.breed' successfully created." + "message": "table 'data.breed' successfully created." } ``` @@ -63,18 +64,18 @@ We're ready to add some dog data. Penny is our CTO's pup, so she gets ID 1 or we ```json { - "operation": "insert", - "table": "dog", - "records": [ - { - "id": 1, - "dog_name": "Penny", - "owner_name": "Kyle", - "breed_id": 154, - "age": 7, - "weight_lbs": 38 - } - ] + "operation": "insert", + "table": "dog", + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38 + } + ] } ``` @@ -82,11 +83,9 @@ We're ready to add some dog data. Penny is our CTO's pup, so she gets ID 1 or we ```json { - "message": "inserted 1 of 1 records", - "inserted_hashes": [ - 1 - ], - "skipped_hashes": [] + "message": "inserted 1 of 1 records", + "inserted_hashes": [1], + "skipped_hashes": [] } ``` @@ -100,118 +99,118 @@ Let's add some more Harper doggies! We can add as many dog objects as we want in ```json { - "operation": "insert", - "table": "dog", - "records": [ - { - "id": 2, - "dog_name": "Harper", - "owner_name": "Stephen", - "breed_id": 346, - "age": 7, - "weight_lbs": 55, - "adorable": true - }, - { - "id": 3, - "dog_name": "Alby", - "owner_name": "Kaylan", - "breed_id": 348, - "age": 7, - "weight_lbs": 84, - "adorable": true - }, - { - "id": 4, - "dog_name": "Billy", - "owner_name": "Zach", - "breed_id": 347, - "age": 6, - "weight_lbs": 60, - "adorable": true - }, - { - "id": 5, - "dog_name": "Rose Merry", - "owner_name": "Zach", - "breed_id": 348, - "age": 8, - "weight_lbs": 15, - "adorable": true - }, - { - "id": 6, - "dog_name": "Kato", - "owner_name": "Kyle", - "breed_id": 351, - "age": 6, - "weight_lbs": 32, - "adorable": true - }, - { - "id": 7, - "dog_name": "Simon", - "owner_name": "Fred", - "breed_id": 349, - "age": 3, - "weight_lbs": 35, - "adorable": true - }, - { - "id": 8, - "dog_name": "Gemma", - "owner_name": "Stephen", - "breed_id": 350, - "age": 5, - "weight_lbs": 55, - "adorable": true - }, - { - "id": 9, - "dog_name": "Yeti", - "owner_name": "Jaxon", - "breed_id": 200, - "age": 5, - "weight_lbs": 55, - "adorable": true - }, - { - "id": 10, - "dog_name": "Monkey", - "owner_name": "Aron", - "breed_id": 271, - "age": 7, - "weight_lbs": 35, - "adorable": true - }, - { - "id": 11, - "dog_name": "Bode", - "owner_name": "Margo", - "breed_id": 104, - "age": 8, - "weight_lbs": 75, - "adorable": true - }, - { - "id": 12, - "dog_name": "Tucker", - "owner_name": "David", - "breed_id": 346, - "age": 2, - "weight_lbs": 60, - "adorable": true - }, - { - "id": 13, - "dog_name": "Jagger", - "owner_name": "Margo", - "breed_id": 271, - "age": 7, - "weight_lbs": 35, - "adorable": true - } - ] + "operation": "insert", + "table": "dog", + "records": [ + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "breed_id": 346, + "age": 7, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 3, + "dog_name": "Alby", + "owner_name": "Kaylan", + "breed_id": 348, + "age": 7, + "weight_lbs": 84, + "adorable": true + }, + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "breed_id": 347, + "age": 6, + "weight_lbs": 60, + "adorable": true + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "breed_id": 348, + "age": 8, + "weight_lbs": 15, + "adorable": true + }, + { + "id": 6, + "dog_name": "Kato", + "owner_name": "Kyle", + "breed_id": 351, + "age": 6, + "weight_lbs": 32, + "adorable": true + }, + { + "id": 7, + "dog_name": "Simon", + "owner_name": "Fred", + "breed_id": 349, + "age": 3, + "weight_lbs": 35, + "adorable": true + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "breed_id": 350, + "age": 5, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 9, + "dog_name": "Yeti", + "owner_name": "Jaxon", + "breed_id": 200, + "age": 5, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 10, + "dog_name": "Monkey", + "owner_name": "Aron", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true + }, + { + "id": 11, + "dog_name": "Bode", + "owner_name": "Margo", + "breed_id": 104, + "age": 8, + "weight_lbs": 75, + "adorable": true + }, + { + "id": 12, + "dog_name": "Tucker", + "owner_name": "David", + "breed_id": 346, + "age": 2, + "weight_lbs": 60, + "adorable": true + }, + { + "id": 13, + "dog_name": "Jagger", + "owner_name": "Margo", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true + } + ] } ``` @@ -219,22 +218,9 @@ Let's add some more Harper doggies! We can add as many dog objects as we want in ```json { - "message": "inserted 12 of 12 records", - "inserted_hashes": [ - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13 - ], - "skipped_hashes": [] + "message": "inserted 12 of 12 records", + "inserted_hashes": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], + "skipped_hashes": [] } ``` @@ -250,9 +236,9 @@ Each header in a column will be considered as an attribute, and each row in the ```json { - "operation": "csv_url_load", - "table": "breed", - "csv_url": "https://s3.amazonaws.com/complimentarydata/breeds.csv" + "operation": "csv_url_load", + "table": "breed", + "csv_url": "https://s3.amazonaws.com/complimentarydata/breeds.csv" } ``` @@ -260,8 +246,8 @@ Each header in a column will be considered as an attribute, and each row in the ```json { - "message": "Starting job with id e77d63b9-70d5-499c-960f-6736718a4369", - "job_id": "e77d63b9-70d5-499c-960f-6736718a4369" + "message": "Starting job with id e77d63b9-70d5-499c-960f-6736718a4369", + "job_id": "e77d63b9-70d5-499c-960f-6736718a4369" } ``` @@ -275,14 +261,14 @@ Harper supports NoSQL and SQL commands. We're going to update the dog table to s ```json { - "operation": "update", - "table": "dog", - "records": [ - { - "id": 1, - "dog_name": "Penny B" - } - ] + "operation": "update", + "table": "dog", + "records": [ + { + "id": 1, + "dog_name": "Penny B" + } + ] } ``` @@ -290,11 +276,9 @@ Harper supports NoSQL and SQL commands. We're going to update the dog table to s ```json { - "message": "updated 1 of 1 records", - "update_hashes": [ - 1 - ], - "skipped_hashes": [] + "message": "updated 1 of 1 records", + "update_hashes": [1], + "skipped_hashes": [] } ``` @@ -308,8 +292,8 @@ Now we're going to use a simple SQL SELECT call to pull Penny's updated data. No ```json { - "operation": "sql", - "sql": "SELECT * FROM data.dog where id = 1" + "operation": "sql", + "sql": "SELECT * FROM data.dog where id = 1" } ``` @@ -317,17 +301,17 @@ Now we're going to use a simple SQL SELECT call to pull Penny's updated data. No ```json [ - { - "owner_name": "Kyle", - "adorable": null, - "breed_id": 154, - "__updatedtime__": 1610749428575, - "dog_name": "Penny B", - "weight_lbs": 38, - "id": 1, - "age": 7, - "__createdtime__": 1610749386566 - } + { + "owner_name": "Kyle", + "adorable": null, + "breed_id": 154, + "__updatedtime__": 1610749428575, + "dog_name": "Penny B", + "weight_lbs": 38, + "id": 1, + "age": 7, + "__createdtime__": 1610749386566 + } ] ``` @@ -341,8 +325,8 @@ Here's a more complex SQL command joining the breed table with the dog table. We ```json { - "operation": "sql", - "sql": "SELECT d.id, d.dog_name, d.owner_name, b.name, b.section FROM data.dog AS d INNER JOIN data.breed AS b ON d.breed_id = b.id WHERE d.owner_name IN ('Kyle', 'Zach', 'Stephen') AND b.section = 'Mutt' ORDER BY d.dog_name" + "operation": "sql", + "sql": "SELECT d.id, d.dog_name, d.owner_name, b.name, b.section FROM data.dog AS d INNER JOIN data.breed AS b ON d.breed_id = b.id WHERE d.owner_name IN ('Kyle', 'Zach', 'Stephen') AND b.section = 'Mutt' ORDER BY d.dog_name" } ``` @@ -350,34 +334,33 @@ Here's a more complex SQL command joining the breed table with the dog table. We ```json [ - { - "id": 4, - "dog_name": "Billy", - "owner_name": "Zach", - "name": "LABRADOR / GREAT DANE MIX", - "section": "Mutt" - }, - { - "id": 8, - "dog_name": "Gemma", - "owner_name": "Stephen", - "name": "SHORT HAIRED SETTER MIX", - "section": "Mutt" - }, - { - "id": 2, - "dog_name": "Harper", - "owner_name": "Stephen", - "name": "HUSKY MIX", - "section": "Mutt" - }, - { - "id": 5, - "dog_name": "Rose Merry", - "owner_name": "Zach", - "name": "TERRIER MIX", - "section": "Mutt" - } + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "name": "LABRADOR / GREAT DANE MIX", + "section": "Mutt" + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "name": "SHORT HAIRED SETTER MIX", + "section": "Mutt" + }, + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "name": "HUSKY MIX", + "section": "Mutt" + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "name": "TERRIER MIX", + "section": "Mutt" + } ] - ``` diff --git a/docs/developers/operations-api/registration.md b/docs/developers/operations-api/registration.md index bede8fbf..24e38d1a 100644 --- a/docs/developers/operations-api/registration.md +++ b/docs/developers/operations-api/registration.md @@ -1,12 +1,13 @@ -# Registration - +# Registration ## Registration Info + Returns the registration data of the Harper instance. -* operation _(required)_ - must always be `registration_info` +- operation _(required)_ - must always be `registration_info` ### Body + ```json { "operation": "registration_info" @@ -14,50 +15,194 @@ Returns the registration data of the Harper instance. ``` ### Response: 200 + +```json +{ + "registered": true, + "version": "4.2.0", + "ram_allocation": 2048, + "license_expiration_date": "2022-01-15" +} +``` + +--- + +## Install Usage License + +Install a Harper license for a block of usage. Multiple usage blocks may be installed, and they will be used up sequentially, with the earliest installed blocks used first. A license is installed +by creating a string that consists of three base64 encoded blocks, separated by dots. The three blocks consist of: +* header: This is a JSON object with two properties: + * typ: should be "Harper-License" + * alg: should be "EdDSA" + +This JSON object should be converted to base64 (conversion from utf-8 to base64) and is the first base64 block. + +* license payload: This is a JSON object with properties: + * id _(required)_ - A unique id for the license + * level _(required)_ - Usage level number + * regiondId _(required)_ - The region id where this license can be used + * reads _(required)_ - The number of allowed reads + * readBytes _(required)_ - The number of allowed read bytes + * writes _(required)_ - The number of allowed writes + * writeBytes _(required)_ - The number of allowed write bytes + * realTimeMessages _(required)_ - The number of allowed real-time messages + * realTimeBytes _(required)_ - The number of allowed real-time message bytes + * cpuTime _(optional)_ - The allowed amount of CPU time consumed by application code + * storage _(optional)_ - Maximum of storage that may be used + * expiration _(required_) - The date when this block expires, as an ISO date + +This JSON object should be converted to base64 (conversion from utf-8 to base64) and is the second base64 block. + +For example: +```json +{ + "id": "license-717b-4c6c-b69d-b29014054ab7", + "level": 2, + "regionId": "us-nw-2", + "reads": 2000000000, + "readBytes": 8000000000000, + "writes": 500000000, + "writeBytes": 1000000000000, + "realTimeMessages": 10000000000, + "realTimeBytes": 40000000000000, + "cpuTime": 108000, + "storage": 400000000000000, + "expiration":"2025-07-25T21:17:21.248Z" +} +``` + +* signature: This is the cryptographic signature, signed by Harper, of the first two blocks, separated by a dot, `header.payload`. This is also converted to base64. + +The three base64 blocks are combined to form the `license` property value in the operation. + +- operation _(required)_ - must always be `install_usage_license` +- license _(required)_ - This is the combination of the three blocks in the form header.payload.signature +### Body + +```json +{ + "operation": "install_usage_license", + "license": "abc...0123.abc...0123.abc...0123", +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully installed usage license" +} +``` + +--- + +## Get Usage Licenses + +This will retrieve and return all active usage licenses, with counts of how much of the limits have been consumed. + +- operation _(required)_ - must always be `install_usage_license` + +### Body + ```json { - "registered": true, - "version": "4.2.0", - "ram_allocation": 2048, - "license_expiration_date": "2022-01-15" + "operation": "get_usage_licenses" } ``` +### Response: 200 + +```json +[ + { + "id": "license-717b-4c6c-b69d-b29014054ab7", + "level": 2, + "regionId": "us-nw-2", + "reads": 2000000000, + "usedReads": 1100000000, + "readBytes": 8000000000000, + "usedReadBytes": 3000000000000, + "writes": 500000000, + "usedWrites": 300000000, + "writeBytes": 1000000000000, + "usedWriteBytes": 4300000000000, + "realTimeMessages": 10000000000, + "usedRealTimeMessages": 2000000000, + "realTimeBytes": 40000000000000, + "usedRealTimeBytes": 13000000000000, + "cpuTime": 108000, + "usedCpuTime": 41000, + "storage": 400000000000000, + "expiration":"2025-07-25T21:17:21.248Z" + }, + { + "id": "license-4c6c-b69d-b29014054ab7-717b", + "level": 2, + "regionId": "us-nw-2", + "reads": 2000000000, + "usedReads": 0, + "readBytes": 8000000000000, + "usedReadBytes": 0, + "writes": 500000000, + "usedWrites": 0, + "writeBytes": 1000000000000, + "usedWriteBytes": 0, + "realTimeMessages": 10000000000, + "usedRealTimeMessages": 0, + "realTimeBytes": 40000000000000, + "usedRealTimeBytes": 0, + "cpuTime": 108000, + "usedCpuTime": 0, + "storage": 400000000000000, + "expiration":"2025-09-25T21:17:21.248Z" + }, + { + "id": "license-4c6c-b69d-b29014054ab7-717b", + "level": 2, + "regionId": "us-se-2", + ... usage licenses for other regions may be in here as well + ] +``` + --- + ## Get Fingerprint + +(Deprecated) Returns the Harper fingerprint, uniquely generated based on the machine, for licensing purposes. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `get_fingerprint` +- operation _(required)_ - must always be `get_fingerprint` ### Body ```json { - "operation": "get_fingerprint" + "operation": "get_fingerprint" } ``` --- ## Set License + +(Deprecated) Sets the Harper license as generated by Harper License Management software. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `set_license` -* key _(required)_ - your license key -* company _(required)_ - the company that was used in the license +- operation _(required)_ - must always be `set_license` +- key _(required)_ - your license key +- company _(required)_ - the company that was used in the license ### Body ```json { - "operation": "set_license", - "key": "", - "company": "" + "operation": "set_license", + "key": "", + "company": "" } ``` - diff --git a/docs/developers/operations-api/sql-operations.md b/docs/developers/operations-api/sql-operations.md index abb354b8..1069ce19 100644 --- a/docs/developers/operations-api/sql-operations.md +++ b/docs/developers/operations-api/sql-operations.md @@ -2,117 +2,122 @@ Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. {% endhint %} -# SQL Operations +# SQL Operations ## Select + Executes the provided SQL statement. The SELECT statement is used to query data from the database. -* operation _(required)_ - must always be `sql` -* sql _(required)_ - use standard SQL +- operation _(required)_ - must always be `sql` +- sql _(required)_ - use standard SQL ### Body ```json { - "operation": "sql", - "sql": "SELECT * FROM dev.dog WHERE id = 1" + "operation": "sql", + "sql": "SELECT * FROM dev.dog WHERE id = 1" } ``` ### Response: 200 + ```json [ - { - "id": 1, - "age": 7, - "dog_name": "Penny", - "weight_lbs": 38, - "breed_id": 154, - "owner_name": "Kyle", - "adorable": true, - "__createdtime__": 1611614106043, - "__updatedtime__": 1611614119507 - } + { + "id": 1, + "age": 7, + "dog_name": "Penny", + "weight_lbs": 38, + "breed_id": 154, + "owner_name": "Kyle", + "adorable": true, + "__createdtime__": 1611614106043, + "__updatedtime__": 1611614119507 + } ] ``` --- ## Insert + Executes the provided SQL statement. The INSERT statement is used to add one or more rows to a database table. -* operation _(required)_ - must always be `sql` -* sql _(required)_ - use standard SQL +- operation _(required)_ - must always be `sql` +- sql _(required)_ - use standard SQL ### Body ```json { - "operation": "sql", - "sql": "INSERT INTO dev.dog (id, dog_name) VALUE (22, 'Simon')" + "operation": "sql", + "sql": "INSERT INTO dev.dog (id, dog_name) VALUE (22, 'Simon')" } ``` ### Response: 200 + ```json { - "message": "inserted 1 of 1 records", - "inserted_hashes": [ - 22 - ], - "skipped_hashes": [] + "message": "inserted 1 of 1 records", + "inserted_hashes": [22], + "skipped_hashes": [] } ``` + --- ## Update + Executes the provided SQL statement. The UPDATE statement is used to change the values of specified attributes in one or more rows in a database table. -* operation _(required)_ - must always be `sql` -* sql _(required)_ - use standard SQL +- operation _(required)_ - must always be `sql` +- sql _(required)_ - use standard SQL ### Body + ```json { - "operation": "sql", - "sql": "UPDATE dev.dog SET dog_name = 'penelope' WHERE id = 1" + "operation": "sql", + "sql": "UPDATE dev.dog SET dog_name = 'penelope' WHERE id = 1" } ``` ### Response: 200 + ```json { - "message": "updated 1 of 1 records", - "update_hashes": [ - 1 - ], - "skipped_hashes": [] + "message": "updated 1 of 1 records", + "update_hashes": [1], + "skipped_hashes": [] } ``` --- ## Delete + Executes the provided SQL statement. The DELETE statement is used to remove one or more rows of data from a database table. -* operation _(required)_ - must always be `sql` -* sql _(required)_ - use standard SQL +- operation _(required)_ - must always be `sql` +- sql _(required)_ - use standard SQL ### Body + ```json { - "operation": "sql", - "sql": "DELETE FROM dev.dog WHERE id = 1" + "operation": "sql", + "sql": "DELETE FROM dev.dog WHERE id = 1" } ``` ### Response: 200 + ```json { - "message": "1 of 1 record successfully deleted", - "deleted_hashes": [ - 1 - ], - "skipped_hashes": [] + "message": "1 of 1 record successfully deleted", + "deleted_hashes": [1], + "skipped_hashes": [] } ``` diff --git a/docs/developers/operations-api/system-operations.md b/docs/developers/operations-api/system-operations.md index 6a33c1f2..8252c19d 100644 --- a/docs/developers/operations-api/system-operations.md +++ b/docs/developers/operations-api/system-operations.md @@ -1,100 +1,112 @@ # System Operations ## Restart + Restarts the Harper instance. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `restart` +- operation _(required)_ - must always be `restart` ### Body + ```json { - "operation": "restart" + "operation": "restart" } ``` ### Response: 200 + ```json { - "message": "Restarting HarperDB. This may take up to 60 seconds." + "message": "Restarting HarperDB. This may take up to 60 seconds." } ``` --- ## Restart Service + Restarts servers for the specified Harper service. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `restart_service` -* service _(required)_ - must be one of: `http_workers`, `clustering_config` or `clustering` -* replicated _(optional)_ - must be a boolean. If set to `true`, Harper will replicate the restart service operation across all nodes in the cluster. The restart will occur as a rolling restart, ensuring that each node is fully restarted before the next node begins restarting. +- operation _(required)_ - must always be `restart_service` +- service _(required)_ - must be one of: `http_workers`, `clustering_config` or `clustering` +- replicated _(optional)_ - must be a boolean. If set to `true`, Harper will replicate the restart service operation across all nodes in the cluster. The restart will occur as a rolling restart, ensuring that each node is fully restarted before the next node begins restarting. ### Body + ```json { - "operation": "restart_service", - "service": "http_workers" + "operation": "restart_service", + "service": "http_workers" } ``` ### Response: 200 + ```json { - "message": "Restarting http_workers" + "message": "Restarting http_workers" } ``` --- ## System Information + Returns detailed metrics on the host system. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `system_information` -* attributes _(optional)_ - string array of top level attributes desired in the response, if no value is supplied all attributes will be returned. Available attributes are: ['system', 'time', 'cpu', 'memory', 'disk', 'network', 'harperdb_processes', 'table_size', 'metrics', 'threads', 'replication'] +- operation _(required)_ - must always be `system_information` +- attributes _(optional)_ - string array of top level attributes desired in the response, if no value is supplied all attributes will be returned. Available attributes are: ['system', 'time', 'cpu', 'memory', 'disk', 'network', 'harperdb_processes', 'table_size', 'metrics', 'threads', 'replication'] ### Body + ```json { - "operation": "system_information" + "operation": "system_information" } ``` --- ## Set Status + Sets a status value that can be used for application-specific status tracking. Status values are stored in memory and are not persisted across restarts. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `set_status` -* id _(required)_ - the key identifier for the status -* status _(required)_ - the status value to set (string between 1-512 characters) +- operation _(required)_ - must always be `set_status` +- id _(required)_ - the key identifier for the status +- status _(required)_ - the status value to set (string between 1-512 characters) ### Body + ```json { - "operation": "set_status", - "id": "primary", - "status": "active" + "operation": "set_status", + "id": "primary", + "status": "active" } ``` ### Response: 200 + ```json { - "id": "primary", - "status": "active", - "__createdtime__": 1621364589543, - "__updatedtime__": 1621364589543 + "id": "primary", + "status": "active", + "__createdtime__": 1621364589543, + "__updatedtime__": 1621364589543 } ``` ### Notes + - The `id` parameter must be one of the allowed status types: 'primary', 'maintenance', or 'availability' - If no `id` is specified, it defaults to 'primary' - For 'availability' status, only 'Available' or 'Unavailable' values are accepted @@ -103,70 +115,77 @@ _Operation is restricted to super_user roles only_ --- ## Get Status + Retrieves a status value previously set with the set_status operation. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `get_status` -* id _(optional)_ - the key identifier for the status to retrieve (defaults to all statuses if not provided) +- operation _(required)_ - must always be `get_status` +- id _(optional)_ - the key identifier for the status to retrieve (defaults to all statuses if not provided) ### Body + ```json { - "operation": "get_status", - "id": "primary" + "operation": "get_status", + "id": "primary" } ``` ### Response: 200 + ```json { - "id": "primary", - "status": "active", - "__createdtime__": 1621364589543, - "__updatedtime__": 1621364589543 + "id": "primary", + "status": "active", + "__createdtime__": 1621364589543, + "__updatedtime__": 1621364589543 } ``` If no id parameter is provided, all status values will be returned: + ```json [ - { - "id": "primary", - "status": "active", - "__createdtime__": 1621364589543, - "__updatedtime__": 1621364589543 - }, - { - "id": "maintenance", - "status": "scheduled", - "__createdtime__": 1621364600123, - "__updatedtime__": 1621364600123 - } + { + "id": "primary", + "status": "active", + "__createdtime__": 1621364589543, + "__updatedtime__": 1621364589543 + }, + { + "id": "maintenance", + "status": "scheduled", + "__createdtime__": 1621364600123, + "__updatedtime__": 1621364600123 + } ] ``` --- ## Clear Status + Removes a status entry by its ID. _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `clear_status` -* id _(required)_ - the key identifier for the status to remove +- operation _(required)_ - must always be `clear_status` +- id _(required)_ - the key identifier for the status to remove ### Body + ```json { - "operation": "clear_status", - "id": "primary" + "operation": "clear_status", + "id": "primary" } ``` ### Response: 200 + ```json { - "message": "Status successfully cleared" + "message": "Status successfully cleared" } ``` diff --git a/docs/developers/operations-api/token-authentication.md b/docs/developers/operations-api/token-authentication.md index 5c6770f1..26220051 100644 --- a/docs/developers/operations-api/token-authentication.md +++ b/docs/developers/operations-api/token-authentication.md @@ -1,50 +1,56 @@ -# Token Authentication +# Token Authentication ## Create Authentication Tokens + Creates the tokens needed for authentication: operation & refresh token. _Note - this operation does not require authorization to be set_ -* operation _(required)_ - must always be `create_authentication_tokens` -* username _(required)_ - username of user to generate tokens for -* password _(required)_ - password of user to generate tokens for +- operation _(required)_ - must always be `create_authentication_tokens` +- username _(required)_ - username of user to generate tokens for +- password _(required)_ - password of user to generate tokens for ### Body + ```json { - "operation": "create_authentication_tokens", - "username": "", - "password": "" + "operation": "create_authentication_tokens", + "username": "", + "password": "" } ``` ### Response: 200 + ```json { - "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6IkhEQl9BRE1JTiIsImlhdCI6MTYwNTA2Mzk0OSwiZXhwIjoxNjA1MTUwMzQ5LCJzdWIiOiJvcGVyYXRpb24ifQ.TlV93BqavQVQntXTt_WeY5IjAuCshfd6RzhihLWFWhu1qEKLHdwg9o5Z4ASaNmfuyKBqbFw65IbOYKd348EXeC_T6d0GO3yUhICYWXkqhQnxVW_T-ECKc7m5Bty9HTgfeaJ2e2yW55nbZYWG_gLtNgObUjCziX20-gGGR25sNTRm78mLQPYQkBJph6WXwAuyQrX704h0NfvNqyAZSwjxgtjuuEftTJ7FutLrQSLGIBIYq9nsHrFkheiDSn-C8_WKJ_zATa4YIofjqn9g5wA6o_7kSNaU2-gWnCm_jbcAcfvOmXh6rd89z8pwPqnC0f131qHIBps9UHaC1oozzmu_C6bsg7905OoAdFFY42Vojs98SMbfRApRvwaS4SprBsam3izODNI64ZUBREu3l4SZDalUf2kN8XPVWkI1LKq_mZsdtqr1r11Z9xslI1wVdxjunYeanjBhs7_j2HTX7ieVGn1a23cWceUk8F1HDGe_KEuPQs03R73V8acq_freh-kPhIa4eLqmcHeBw3WcyNGW8GuP8kyQRkGuO5sQSzZqbr_YSbZdSShZWTWDE6RYYC9ZV9KJtHVxhs0hexUpcoqO8OtJocyltRjtDjhSm9oUxszYRaALu-h8YadZT9dEKzsyQIt30d7LS9ETmmGWx4nKSTME2bV21PnDv_rEc5R6gnE", - "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6IkhEQl9BRE1JTiIsImlhdCI6MTYwNTA2Mzk0OSwiZXhwIjoxNjA3NjU1OTQ5LCJzdWIiOiJyZWZyZXNoIn0.znhJhkdSROBPP_GLRzAxYdjgQ3BuqpAbQB7zMSSOQJ3s83HnmZ10Bnpw_3L2aF-tOFgz_t6HUAvn26fNOLsspJD2aOvHPcVS4yLKS5nagpA6ar_pqng9f6Ebfs8ohguLCfHnHRJ8poLxuWRvWW9_9pIlDiwsj4yo3Mbxi3mW8Bbtnk2MwiNHFxTksD12Ne8EWz8q2jic5MjArqBBgR373oYoWU1oxpTM6gIsZCBRowXcc9XFy2vyRoggEUU4ISRFQ4ZY9ayJ-_jleSDCUamJSNQsdb1OUTvc6CxeYlLjCoV0ijRUB6p2XWNVezFhDu8yGqOeyGFJzArhxbVc_pl4UYd5aUVxhrO9DdhG29cY_mHV0FqfXphR9QllK--LJFTP4aFqkCxnVr7HSa17hL0ZVK1HaKrx21PAdCkVNZpD6J3RtRbTkfnIB_C3Be9jhOV3vpTf7ZGn_Bs3CPJi_sL313Z1yKSDAS5rXTPceEOcTPHjzkMP9Wz19KfFq_0kuiZdDmeYNqJeFPAgGJ-S0tO51krzyGqLyCCA32_W104GR8OoQi2gEED6HIx2G0-1rnLnefN6eHQiY5r-Q3Oj9e2y3EvqqgWOmEDw88-SjPTwQVnMbBHYN2RfluU7EmvDh6Saoe79Lhlu8ZeSJ1x6ZgA8-Cirraz1_526Tn8v5FGDfrc" + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6IkhEQl9BRE1JTiIsImlhdCI6MTYwNTA2Mzk0OSwiZXhwIjoxNjA1MTUwMzQ5LCJzdWIiOiJvcGVyYXRpb24ifQ.TlV93BqavQVQntXTt_WeY5IjAuCshfd6RzhihLWFWhu1qEKLHdwg9o5Z4ASaNmfuyKBqbFw65IbOYKd348EXeC_T6d0GO3yUhICYWXkqhQnxVW_T-ECKc7m5Bty9HTgfeaJ2e2yW55nbZYWG_gLtNgObUjCziX20-gGGR25sNTRm78mLQPYQkBJph6WXwAuyQrX704h0NfvNqyAZSwjxgtjuuEftTJ7FutLrQSLGIBIYq9nsHrFkheiDSn-C8_WKJ_zATa4YIofjqn9g5wA6o_7kSNaU2-gWnCm_jbcAcfvOmXh6rd89z8pwPqnC0f131qHIBps9UHaC1oozzmu_C6bsg7905OoAdFFY42Vojs98SMbfRApRvwaS4SprBsam3izODNI64ZUBREu3l4SZDalUf2kN8XPVWkI1LKq_mZsdtqr1r11Z9xslI1wVdxjunYeanjBhs7_j2HTX7ieVGn1a23cWceUk8F1HDGe_KEuPQs03R73V8acq_freh-kPhIa4eLqmcHeBw3WcyNGW8GuP8kyQRkGuO5sQSzZqbr_YSbZdSShZWTWDE6RYYC9ZV9KJtHVxhs0hexUpcoqO8OtJocyltRjtDjhSm9oUxszYRaALu-h8YadZT9dEKzsyQIt30d7LS9ETmmGWx4nKSTME2bV21PnDv_rEc5R6gnE", + "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6IkhEQl9BRE1JTiIsImlhdCI6MTYwNTA2Mzk0OSwiZXhwIjoxNjA3NjU1OTQ5LCJzdWIiOiJyZWZyZXNoIn0.znhJhkdSROBPP_GLRzAxYdjgQ3BuqpAbQB7zMSSOQJ3s83HnmZ10Bnpw_3L2aF-tOFgz_t6HUAvn26fNOLsspJD2aOvHPcVS4yLKS5nagpA6ar_pqng9f6Ebfs8ohguLCfHnHRJ8poLxuWRvWW9_9pIlDiwsj4yo3Mbxi3mW8Bbtnk2MwiNHFxTksD12Ne8EWz8q2jic5MjArqBBgR373oYoWU1oxpTM6gIsZCBRowXcc9XFy2vyRoggEUU4ISRFQ4ZY9ayJ-_jleSDCUamJSNQsdb1OUTvc6CxeYlLjCoV0ijRUB6p2XWNVezFhDu8yGqOeyGFJzArhxbVc_pl4UYd5aUVxhrO9DdhG29cY_mHV0FqfXphR9QllK--LJFTP4aFqkCxnVr7HSa17hL0ZVK1HaKrx21PAdCkVNZpD6J3RtRbTkfnIB_C3Be9jhOV3vpTf7ZGn_Bs3CPJi_sL313Z1yKSDAS5rXTPceEOcTPHjzkMP9Wz19KfFq_0kuiZdDmeYNqJeFPAgGJ-S0tO51krzyGqLyCCA32_W104GR8OoQi2gEED6HIx2G0-1rnLnefN6eHQiY5r-Q3Oj9e2y3EvqqgWOmEDw88-SjPTwQVnMbBHYN2RfluU7EmvDh6Saoe79Lhlu8ZeSJ1x6ZgA8-Cirraz1_526Tn8v5FGDfrc" } ``` --- ## Refresh Operation Token + This operation creates a new operation token. -* operation _(required)_ - must always be `refresh_operation_token` -* refresh_token _(required)_ - the refresh token that was provided when tokens were created +- operation _(required)_ - must always be `refresh_operation_token` +- refresh*token *(required)\_ - the refresh token that was provided when tokens were created ### Body + ```json { - "operation": "refresh_operation_token", - "refresh_token": "EXISTING_REFRESH_TOKEN" + "operation": "refresh_operation_token", + "refresh_token": "EXISTING_REFRESH_TOKEN" } ``` ### Response: 200 + ```json { - "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6eyJfX2NyZWF0ZWR0aW1lX18iOjE2MDQ1MTc4Nzk1MjMsIl9fdXBkYXRlZHRpbWVfXyI6MTYwNDUxNzg3OTUyMywiYWN0aXZlIjp0cnVlLCJhdXRoX3Rva2VuIjpudWxsLCJyb2xlIjp7Il9fY3JlYXRlZHRpbWVfXyI6MTYwNDUxNzg3OTUyMSwiX191cGRhdGVkdGltZV9fIjoxNjA0NTE3ODc5NTIxLCJpZCI6IjZhYmRjNGJhLWU5MjQtNDlhNi1iOGY0LWM1NWUxYmQ0OTYzZCIsInBlcm1pc3Npb24iOnsic3VwZXJfdXNlciI6dHJ1ZSwic3lzdGVtIjp7InRhYmxlcyI6eyJoZGJfdGFibGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9hdHRyaWJ1dGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9zY2hlbWEiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl91c2VyIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfcm9sZSI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2pvYiI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2xpY2Vuc2UiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9pbmZvIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfbm9kZXMiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl90ZW1wIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119fX19LCJyb2xlIjoic3VwZXJfdXNlciJ9LCJ1c2VybmFtZSI6IkhEQl9BRE1JTiJ9LCJpYXQiOjE2MDUwNjQ0MjMsImV4cCI6MTYwNTE1MDgyMywic3ViIjoib3BlcmF0aW9uIn0.VVZdhlh7_xFEaGPwhAh6VJ1d7eisiF3ok3ZwLTQAMWZB6umb2S7pPSTbXAmqAGHRlFAK3BYfnwT3YWt0gZbHvk24_0x3s_dej3PYJ8khIxzMjqpkR6qSjQIC2dhKqpwRPNtoqW_xnep9L-qf5iPtqkwsqWhF1c5VSN8nFouLWMZSuJ6Mag04soNhFvY0AF6QiTyzajMTb6uurRMWOnxk8hwMrY_5xtupabqtZheXP_0DV8l10B7GFi_oWf_lDLmwRmNbeUfW8ZyCIJMj36bjN3PsfVIxog87SWKKCwbWZWfJWw0KEph-HvU0ay35deyGWPIaDQmujuh2vtz-B0GoIAC58PJdXNyQRzES_nSb6Oqc_wGZsLM6EsNn_lrIp3mK_3a5jirZ8s6Z2SfcYKaLF2hCevdm05gRjFJ6ijxZrUSOR2S415wLxmqCCWCp_-sEUz8erUrf07_aj-Bv99GUub4b_znOsQF3uABKd4KKff2cNSMhAa-6sro5GDRRJg376dcLi2_9HOZbnSo90zrpVq8RNV900aydyzDdlXkZja8jdHBk4mxSSewYBvM7up6I0G4X-ZlzFOp30T7kjdLa6480Qp34iYRMMtq0Htpb5k2jPt8dNFnzW-Q2eRy1wNBbH3cCH0rd7_BIGuTCrl4hGU8QjlBiF7Gj0_-uJYhKnhg" + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6eyJfX2NyZWF0ZWR0aW1lX18iOjE2MDQ1MTc4Nzk1MjMsIl9fdXBkYXRlZHRpbWVfXyI6MTYwNDUxNzg3OTUyMywiYWN0aXZlIjp0cnVlLCJhdXRoX3Rva2VuIjpudWxsLCJyb2xlIjp7Il9fY3JlYXRlZHRpbWVfXyI6MTYwNDUxNzg3OTUyMSwiX191cGRhdGVkdGltZV9fIjoxNjA0NTE3ODc5NTIxLCJpZCI6IjZhYmRjNGJhLWU5MjQtNDlhNi1iOGY0LWM1NWUxYmQ0OTYzZCIsInBlcm1pc3Npb24iOnsic3VwZXJfdXNlciI6dHJ1ZSwic3lzdGVtIjp7InRhYmxlcyI6eyJoZGJfdGFibGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9hdHRyaWJ1dGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9zY2hlbWEiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl91c2VyIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfcm9sZSI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2pvYiI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2xpY2Vuc2UiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9pbmZvIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfbm9kZXMiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl90ZW1wIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119fX19LCJyb2xlIjoic3VwZXJfdXNlciJ9LCJ1c2VybmFtZSI6IkhEQl9BRE1JTiJ9LCJpYXQiOjE2MDUwNjQ0MjMsImV4cCI6MTYwNTE1MDgyMywic3ViIjoib3BlcmF0aW9uIn0.VVZdhlh7_xFEaGPwhAh6VJ1d7eisiF3ok3ZwLTQAMWZB6umb2S7pPSTbXAmqAGHRlFAK3BYfnwT3YWt0gZbHvk24_0x3s_dej3PYJ8khIxzMjqpkR6qSjQIC2dhKqpwRPNtoqW_xnep9L-qf5iPtqkwsqWhF1c5VSN8nFouLWMZSuJ6Mag04soNhFvY0AF6QiTyzajMTb6uurRMWOnxk8hwMrY_5xtupabqtZheXP_0DV8l10B7GFi_oWf_lDLmwRmNbeUfW8ZyCIJMj36bjN3PsfVIxog87SWKKCwbWZWfJWw0KEph-HvU0ay35deyGWPIaDQmujuh2vtz-B0GoIAC58PJdXNyQRzES_nSb6Oqc_wGZsLM6EsNn_lrIp3mK_3a5jirZ8s6Z2SfcYKaLF2hCevdm05gRjFJ6ijxZrUSOR2S415wLxmqCCWCp_-sEUz8erUrf07_aj-Bv99GUub4b_znOsQF3uABKd4KKff2cNSMhAa-6sro5GDRRJg376dcLi2_9HOZbnSo90zrpVq8RNV900aydyzDdlXkZja8jdHBk4mxSSewYBvM7up6I0G4X-ZlzFOp30T7kjdLa6480Qp34iYRMMtq0Htpb5k2jPt8dNFnzW-Q2eRy1wNBbH3cCH0rd7_BIGuTCrl4hGU8QjlBiF7Gj0_-uJYhKnhg" } ``` diff --git a/docs/developers/operations-api/users-and-roles.md b/docs/developers/operations-api/users-and-roles.md index d1b5a41a..0326038a 100644 --- a/docs/developers/operations-api/users-and-roles.md +++ b/docs/developers/operations-api/users-and-roles.md @@ -1,480 +1,504 @@ # Users and Roles ## List Roles + Returns a list of all roles. [Learn more about Harper roles here.](../security/users-and-roles.md) _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `list_roles` +- operation _(required)_ - must always be `list_roles` ### Body + ```json { - "operation": "list_roles" + "operation": "list_roles" } ``` ### Response: 200 + ```json [ - { - "__createdtime__": 1611615061106, - "__updatedtime__": 1611615061106, - "id": "05c2ffcd-f780-40b1-9432-cfe8ba5ad890", - "permission": { - "super_user": false, - "dev": { - "tables": { - "dog": { - "read": true, - "insert": true, - "update": true, - "delete": false, - "attribute_permissions": [ - { - "attribute_name": "name", - "read": true, - "insert": true, - "update": true - } - ] - } - } - } - }, - "role": "developer" - }, - { - "__createdtime__": 1610749235614, - "__updatedtime__": 1610749235614, - "id": "136f03fa-a0e9-46c3-bd5d-7f3e7dd5b564", - "permission": { - "cluster_user": true - }, - "role": "cluster_user" - }, - { - "__createdtime__": 1610749235609, - "__updatedtime__": 1610749235609, - "id": "745b3138-a7cf-455a-8256-ac03722eef12", - "permission": { - "super_user": true - }, - "role": "super_user" - } + { + "__createdtime__": 1611615061106, + "__updatedtime__": 1611615061106, + "id": "05c2ffcd-f780-40b1-9432-cfe8ba5ad890", + "permission": { + "super_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + }, + "role": "developer" + }, + { + "__createdtime__": 1610749235614, + "__updatedtime__": 1610749235614, + "id": "136f03fa-a0e9-46c3-bd5d-7f3e7dd5b564", + "permission": { + "cluster_user": true + }, + "role": "cluster_user" + }, + { + "__createdtime__": 1610749235609, + "__updatedtime__": 1610749235609, + "id": "745b3138-a7cf-455a-8256-ac03722eef12", + "permission": { + "super_user": true + }, + "role": "super_user" + } ] ``` --- ## Add Role + Creates a new role with the specified permissions. [Learn more about Harper roles here.](../security/users-and-roles.md) _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `add_role` -* role _(required)_ - name of role you are defining -* permission _(required)_ - object defining permissions for users associated with this role: - * super_user _(optional)_ - boolean which, if set to true, gives users associated with this role full access to all operations and methods. If not included, value will be assumed to be false. - * structure_user (optional) - boolean OR array of database names (as strings). If boolean, user can create new databases and tables. If array of strings, users can only manage tables within the specified databases. This overrides any individual table permissions for specified databases, or for all databases if the value is true. +- operation _(required)_ - must always be `add_role` +- role _(required)_ - name of role you are defining +- permission _(required)_ - object defining permissions for users associated with this role: + - super*user *(optional)\_ - boolean which, if set to true, gives users associated with this role full access to all operations and methods. If not included, value will be assumed to be false. + - structure_user (optional) - boolean OR array of database names (as strings). If boolean, user can create new databases and tables. If array of strings, users can only manage tables within the specified databases. This overrides any individual table permissions for specified databases, or for all databases if the value is true. ### Body + ```json { - "operation": "add_role", - "role": "developer", - "permission": { - "super_user": false, - "structure_user": false, - "dev": { - "tables": { - "dog": { - "read": true, - "insert": true, - "update": true, - "delete": false, - "attribute_permissions": [ - { - "attribute_name": "name", - "read": true, - "insert": true, - "update": true - } - ] - } - } - } - } + "operation": "add_role", + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + } } ``` ### Response: 200 + ```json { - "role": "developer", - "permission": { - "super_user": false, - "structure_user": false, - "dev": { - "tables": { - "dog": { - "read": true, - "insert": true, - "update": true, - "delete": false, - "attribute_permissions": [ - { - "attribute_name": "name", - "read": true, - "insert": true, - "update": true - } - ] - } - } - } - }, - "id": "0a9368b0-bd81-482f-9f5a-8722e3582f96", - "__updatedtime__": 1598549532897, - "__createdtime__": 1598549532897 + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + }, + "id": "0a9368b0-bd81-482f-9f5a-8722e3582f96", + "__updatedtime__": 1598549532897, + "__createdtime__": 1598549532897 } ``` --- ## Alter Role + Modifies an existing role with the specified permissions. updates permissions from an existing role. [Learn more about Harper roles here.](../security/users-and-roles.md) _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `alter_role` -* id _(required)_ - the id value for the role you are altering -* role _(optional)_ - name value to update on the role you are altering -* permission _(required)_ - object defining permissions for users associated with this role: - * super_user _(optional)_ - boolean which, if set to true, gives users associated with this role full access to all operations and methods. If not included, value will be assumed to be false. - * structure_user (optional) - boolean OR array of database names (as strings). If boolean, user can create new databases and tables. If array of strings, users can only manage tables within the specified databases. This overrides any individual table permissions for specified databases, or for all databases if the value is true. +- operation _(required)_ - must always be `alter_role` +- id _(required)_ - the id value for the role you are altering +- role _(optional)_ - name value to update on the role you are altering +- permission _(required)_ - object defining permissions for users associated with this role: + - super*user *(optional)\_ - boolean which, if set to true, gives users associated with this role full access to all operations and methods. If not included, value will be assumed to be false. + - structure_user (optional) - boolean OR array of database names (as strings). If boolean, user can create new databases and tables. If array of strings, users can only manage tables within the specified databases. This overrides any individual table permissions for specified databases, or for all databases if the value is true. ### Body ```json { - "operation": "alter_role", - "id": "f92162e2-cd17-450c-aae0-372a76859038", - "role": "another_developer", - "permission": { - "super_user": false, - "structure_user": false, - "dev": { - "tables": { - "dog": { - "read": true, - "insert": true, - "update": true, - "delete": false, - "attribute_permissions": [ - { - "attribute_name": "name", - "read": false, - "insert": true, - "update": true - } - ] - } - } - } - } + "operation": "alter_role", + "id": "f92162e2-cd17-450c-aae0-372a76859038", + "role": "another_developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": false, + "insert": true, + "update": true + } + ] + } + } + } + } } ``` ### Response: 200 + ```json { - "id": "a7cb91e9-32e4-4dbf-a327-fab4fa9191ea", - "role": "developer", - "permission": { - "super_user": false, - "structure_user": false, - "dev": { - "tables": { - "dog": { - "read": true, - "insert": true, - "update": true, - "delete": false, - "attribute_permissions": [ - { - "attribute_name": "name", - "read": false, - "insert": true, - "update": true - } - ] - } - } - } - }, - "__updatedtime__": 1598549996106 + "id": "a7cb91e9-32e4-4dbf-a327-fab4fa9191ea", + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": false, + "insert": true, + "update": true + } + ] + } + } + } + }, + "__updatedtime__": 1598549996106 } ``` --- ## Drop Role + Deletes an existing role from the database. NOTE: Role with associated users cannot be dropped. [Learn more about Harper roles here.](../security/users-and-roles.md) _Operation is restricted to super_user roles only_ -* operation _(required)_ - this must always be `drop_role` -* id _(required)_ - this is the id of the role you are dropping +- operation _(required)_ - this must always be `drop_role` +- id _(required)_ - this is the id of the role you are dropping ### Body + ```json { - "operation": "drop_role", - "id": "developer" + "operation": "drop_role", + "id": "developer" } ``` ### Response: 200 + ```json { - "message": "developer successfully deleted" + "message": "developer successfully deleted" } ``` --- ## List Users + Returns a list of all users. [Learn more about Harper roles here.](../security/users-and-roles.md) _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `list_users` +- operation _(required)_ - must always be `list_users` ### Body + ```json { - "operation": "list_users" + "operation": "list_users" } ``` ### Response: 200 + ```json [ - { - "__createdtime__": 1635520961165, - "__updatedtime__": 1635520961165, - "active": true, - "role": { - "__createdtime__": 1635520961161, - "__updatedtime__": 1635520961161, - "id": "7c78ef13-c1f3-4063-8ea3-725127a78279", - "permission": { - "super_user": true, - "system": { - "tables": { - "hdb_table": { - "read": true, - "insert": false, - "update": false, - "delete": false, - "attribute_permissions": [] - }, - "hdb_attribute": { - "read": true, - "insert": false, - "update": false, - "delete": false, - "attribute_permissions": [] - }, - "hdb_schema": { - "read": true, - "insert": false, - "update": false, - "delete": false, - "attribute_permissions": [] - }, - "hdb_user": { - "read": true, - "insert": false, - "update": false, - "delete": false, - "attribute_permissions": [] - }, - "hdb_role": { - "read": true, - "insert": false, - "update": false, - "delete": false, - "attribute_permissions": [] - }, - "hdb_job": { - "read": true, - "insert": false, - "update": false, - "delete": false, - "attribute_permissions": [] - }, - "hdb_license": { - "read": true, - "insert": false, - "update": false, - "delete": false, - "attribute_permissions": [] - }, - "hdb_info": { - "read": true, - "insert": false, - "update": false, - "delete": false, - "attribute_permissions": [] - }, - "hdb_nodes": { - "read": true, - "insert": false, - "update": false, - "delete": false, - "attribute_permissions": [] - }, - "hdb_temp": { - "read": true, - "insert": false, - "update": false, - "delete": false, - "attribute_permissions": [] - } - } - } - }, - "role": "super_user" - }, - "username": "HDB_ADMIN" - } + { + "__createdtime__": 1635520961165, + "__updatedtime__": 1635520961165, + "active": true, + "role": { + "__createdtime__": 1635520961161, + "__updatedtime__": 1635520961161, + "id": "7c78ef13-c1f3-4063-8ea3-725127a78279", + "permission": { + "super_user": true, + "system": { + "tables": { + "hdb_table": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_attribute": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_schema": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_user": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_role": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_job": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_license": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_info": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_nodes": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_temp": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + } + } + } + }, + "role": "super_user" + }, + "username": "HDB_ADMIN" + } ] ``` --- ## User Info + Returns user data for the associated user credentials. -* operation _(required)_ - must always be `user_info` +- operation _(required)_ - must always be `user_info` ### Body + ```json { - "operation": "user_info" + "operation": "user_info" } ``` ### Response: 200 + ```json { - "__createdtime__": 1610749235611, - "__updatedtime__": 1610749235611, - "active": true, - "role": { - "__createdtime__": 1610749235609, - "__updatedtime__": 1610749235609, - "id": "745b3138-a7cf-455a-8256-ac03722eef12", - "permission": { - "super_user": true - }, - "role": "super_user" - }, - "username": "HDB_ADMIN" + "__createdtime__": 1610749235611, + "__updatedtime__": 1610749235611, + "active": true, + "role": { + "__createdtime__": 1610749235609, + "__updatedtime__": 1610749235609, + "id": "745b3138-a7cf-455a-8256-ac03722eef12", + "permission": { + "super_user": true + }, + "role": "super_user" + }, + "username": "HDB_ADMIN" } ``` --- ## Add User + Creates a new user with the specified role and credentials. [Learn more about Harper roles here.](../security/users-and-roles.md) _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `add_user` -* role _(required)_ - 'role' name value of the role you wish to assign to the user. See `add_role` for more detail -* username _(required)_ - username assigned to the user. It can not be altered after adding the user. It serves as the hash -* password _(required)_ - clear text for password. Harper will encrypt the password upon receipt -* active _(required)_ - boolean value for status of user's access to your Harper instance. If set to false, user will not be able to access your instance of Harper. +- operation _(required)_ - must always be `add_user` +- role _(required)_ - 'role' name value of the role you wish to assign to the user. See `add_role` for more detail +- username _(required)_ - username assigned to the user. It can not be altered after adding the user. It serves as the hash +- password _(required)_ - clear text for password. Harper will encrypt the password upon receipt +- active _(required)_ - boolean value for status of user's access to your Harper instance. If set to false, user will not be able to access your instance of Harper. ### Body + ```json { - "operation": "add_user", - "role": "role_name", - "username": "hdb_user", - "password": "password", - "active": true + "operation": "add_user", + "role": "role_name", + "username": "hdb_user", + "password": "password", + "active": true } ``` ### Response: 200 + ```json { - "message": "hdb_user successfully added" + "message": "hdb_user successfully added" } ``` --- ## Alter User + Modifies an existing user's role and/or credentials. [Learn more about Harper roles here.](../security/users-and-roles.md) -_Operation is restricted to super\_user roles only_ +_Operation is restricted to super_user roles only_ - * operation _(required)_ - must always be `alter_user` - * username _(required)_ - username assigned to the user. It can not be altered after adding the user. It serves as the hash. - * password _(optional)_ - clear text for password. Harper will encrypt the password upon receipt - * role _(optional)_ - `role` name value of the role you wish to assign to the user. See `add_role` for more detail - * active _(optional)_ - status of user's access to your Harper instance. See `add_role` for more detail +- operation _(required)_ - must always be `alter_user` +- username _(required)_ - username assigned to the user. It can not be altered after adding the user. It serves as the hash. +- password _(optional)_ - clear text for password. Harper will encrypt the password upon receipt +- role _(optional)_ - `role` name value of the role you wish to assign to the user. See `add_role` for more detail +- active _(optional)_ - status of user's access to your Harper instance. See `add_role` for more detail ### Body + ```json { - "operation": "alter_user", - "role": "role_name", - "username": "hdb_user", - "password": "password", - "active": true + "operation": "alter_user", + "role": "role_name", + "username": "hdb_user", + "password": "password", + "active": true } ``` ### Response: 200 + ```json { - "message": "updated 1 of 1 records", - "new_attributes": [], - "txn_time": 1611615114397.988, - "update_hashes": [ - "hdb_user" - ], - "skipped_hashes": [] + "message": "updated 1 of 1 records", + "new_attributes": [], + "txn_time": 1611615114397.988, + "update_hashes": ["hdb_user"], + "skipped_hashes": [] } ``` --- ## Drop User + Deletes an existing user by username. [Learn more about Harper roles here.](../security/users-and-roles.md) _Operation is restricted to super_user roles only_ -* operation _(required)_ - must always be `drop_user` -* username _(required)_ - username assigned to the user +- operation _(required)_ - must always be `drop_user` +- username _(required)_ - username assigned to the user ### Body + ```json { - "operation": "drop_user", - "username": "sgoldberg" + "operation": "drop_user", + "username": "sgoldberg" } ``` ### Response: 200 + ```json { - "message": "sgoldberg successfully deleted" + "message": "sgoldberg successfully deleted" } ``` diff --git a/docs/developers/real-time.md b/docs/developers/real-time.md index 0c711db0..9090bbd5 100644 --- a/docs/developers/real-time.md +++ b/docs/developers/real-time.md @@ -5,9 +5,11 @@ Harper provides real-time access to data and messaging. This allows clients to monitor and subscribe to data for changes in real-time as well as handling data-oriented messaging. Harper supports multiple standardized protocols to facilitate diverse standards-based client interaction. Harper real-time communication is based around database tables. Declared tables are the basis for monitoring data, and defining "topics" for publishing and subscribing to messages. Declaring a table that establishes a topic can be as simple as adding a table with no attributes to your [schema.graphql in a Harper application folder](./applications/README.md): + ``` type MyTopic @table @export ``` + You can then subscribe to records or sub-topics in this topic/namespace, as well as save data and publish messages, with the protocols discussed below. ### Content Negotiation @@ -57,6 +59,7 @@ Harper supports QoS 0 and 1 for publishing and subscribing. Harper supports multi-level topics, both for subscribing and publishing. Harper also supports multi-level wildcards, so you can subscribe to /`my-resource/#` to receive notifications for `my-resource/some-id` as well as `my-resource/nested/id`, or you can subscribe to `my-resource/nested/#` and receive the latter, but not the former, topic messages. Harper currently only supports trailing multi-level wildcards (no single-level wildcards with '\*'). #### Events + JavaScript components can also listen for MQTT events. This is available on the server.mqtt.events object. For example, to set up a listener/callback for when MQTT clients connect and authorize, we can do: ```javascript @@ -64,11 +67,13 @@ server.mqtt.events.on('connected', (session, socket) => { console.log('client connected with id', session.clientId); }); ``` + The following MQTT events are available: -* `connection` - When a client initially establishes a TCP or WS connection to the server -* `connected` - When a client establishes an authorized MQTT connection -* `auth-failed` - When a client fails to authenticate -* `disconnected` - When a client disconnects from the server + +- `connection` - When a client initially establishes a TCP or WS connection to the server +- `connected` - When a client establishes an authorized MQTT connection +- `auth-failed` - When a client fails to authenticate +- `disconnected` - When a client disconnects from the server ### Ordering @@ -140,7 +145,7 @@ eventSource.onmessage = (event) => { ### MQTT Feature Support Matrix | Feature | Support | -|--------------------------------------------------------------------|----------------------------------------------------------------| +| ------------------------------------------------------------------ | -------------------------------------------------------------- | | Connections, protocol negotiation, and acknowledgement with v3.1.1 | :heavy_check_mark: | | Connections, protocol negotiation, and acknowledgement with v5 | :heavy_check_mark: | | Secure MQTTS | :heavy_check_mark: | diff --git a/docs/developers/replication/README.md b/docs/developers/replication/README.md index 90d85216..0c780123 100644 --- a/docs/developers/replication/README.md +++ b/docs/developers/replication/README.md @@ -33,10 +33,9 @@ You can also use the [operations API](../operations-api/clustering.md) to dynami ```json { - "operation": "add_node", - "hostname": "server-two" + "operation": "add_node", + "hostname": "server-two" } - ``` These operations will also dynamically generating certificates as needed, if there are no existing signed certificates, or if the existing certificates are not valid for the new node. @@ -56,8 +55,8 @@ By default, all tables within a replicated database will be replicated. Transact ```graphql type LocalTableForNode @table(replicate: false) { - id: ID! - name: String! + id: ID! + name: String! } ``` @@ -77,6 +76,7 @@ Harper supports the highest levels of security through public key infrastructure #### Provide your own certificates If you want to secure your Harper connections with your own signed certificates, you can easily do so. Whether you have certificates from a public authority (like Let's Encrypt or Digicert) or a corporate certificate authority, you can use them to authenticate nodes securely. You can then allow nodes to authorize each other by checking the certificate against the standard list of root certificate authorities by enabling the `enableRootCAs` option in the config: + ``` replication enableRootCAs: true @@ -121,13 +121,13 @@ Example configuration: ```json { - "operation": "add_node", - "hostname": "server-two", - "verify_tls": false, - "authorization": { - "username": "admin", - "password": "password" - } + "operation": "add_node", + "hostname": "server-two", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password" + } } ``` @@ -135,8 +135,8 @@ When you connect to another node (e.g., `server-two`), Harper uses secure WebSoc If you’re working with a fresh install, you’ll need to set `verify_tls` to `false` temporarily, so the self-signed certificate is accepted. Once the connection is made, Harper will automatically handle the certificate signing process: -* It creates a certificate signing request (CSR), sends it to `server-two`, which then signs it and returns the signed certificate along with the certificate authority (CA). -* The signed certificate is stored for future connections between the nodes, ensuring secure communication. +- It creates a certificate signing request (CSR), sends it to `server-two`, which then signs it and returns the signed certificate along with the certificate authority (CA). +- The signed certificate is stored for future connections between the nodes, ensuring secure communication. **Important:** Your credentials are not stored—they are discarded immediately after use. @@ -152,9 +152,9 @@ To utilize the `revoked_certificates` attribute in the `hdb_nodes` table, you ca ```json { - "operation": "update_node", - "hostname": "server-two", - "revoked_certificates": ["1769F7D6A"] + "operation": "update_node", + "hostname": "server-two", + "revoked_certificates": ["1769F7D6A"] } ``` @@ -176,8 +176,8 @@ Nodes can be removed from the cluster using the [`remove_node` operation](../ope ```json { - "operation": "remove_node", - "hostname": "server-two" + "operation": "remove_node", + "hostname": "server-two" } ``` @@ -213,14 +213,16 @@ Example configuration: ```json { - "operation": "add_node", - "hostname": "server-two", - "subscriptions": [{ - "database": "dev", - "table": "my-table", - "publish": true, - "subscribe": false - }] + "operation": "add_node", + "hostname": "server-two", + "subscriptions": [ + { + "database": "dev", + "table": "my-table", + "publish": true, + "subscribe": false + } + ] } ``` @@ -230,14 +232,16 @@ Here we are updating the subscription to receive transactions on the `dev.my-tab ```json { - "operation": "update_node", - "hostname": "server-two", - "subscriptions": [{ - "database": "dev", - "table": "my-table", - "publish": true, - "subscribe": true - }] + "operation": "update_node", + "hostname": "server-two", + "subscriptions": [ + { + "database": "dev", + "table": "my-table", + "publish": true, + "subscribe": true + } + ] } ``` @@ -247,7 +251,7 @@ You can monitor the status of replication through the operations API. You can us ```json { - "operation": "cluster_status" + "operation": "cluster_status" } ``` @@ -259,14 +263,14 @@ You may also specify a `start_time` in the `add_node` to specify that when a dat **Advanced Configuration** -You can also check the configuration of the replication system, including the current known nodes and certificates, by querying the hdb\_nodes and hdb\_certificate table: +You can also check the configuration of the replication system, including the current known nodes and certificates, by querying the hdb_nodes and hdb_certificate table: ```json { - "operation": "search_by_value", - "database": "system", - "table": "hdb_nodes", - "search_attribute": "name", - "search_value": "*" + "operation": "search_by_value", + "database": "system", + "table": "hdb_nodes", + "search_attribute": "name", + "search_value": "*" } ``` diff --git a/docs/developers/replication/sharding.md b/docs/developers/replication/sharding.md index cfd9edd7..7e14dee6 100644 --- a/docs/developers/replication/sharding.md +++ b/docs/developers/replication/sharding.md @@ -5,17 +5,22 @@ There are two main ways to setup sharding in Harper. The approach is to use dyna The second approach is define specific shards, where each node is assigned to a specific shard, and each record is replicated to the nodes in that shard based on the primary key, regardless of where the data was written or accessed, or content. This approach is more static, but can be more efficient for certain use cases, and means that the location of data can always be predictably determined based on the primary key. ## Configuration For Dynamic Sharding + By default, Harper will replicate all data to all nodes. However, replication can easily be configured for "sharding", or storing different data in different locations or nodes. The simplest way to configure sharding and limit replication to improve performance and efficiency is to configure a replication-to count. This will limit the number of nodes that data is replicated to. For example, to specify that writes should replicate to 2 other nodes besides the node that first stored the data, you can set the `replicateTo` to 2 in the `replication` section of the `harperdb-config.yaml` file: + ```yaml replication: replicateTo: 2 ``` + This will ensure that data is replicated to two other nodes, so that each record will be stored on three nodes in total. -With a sharding configuration (or customization below) in place, requests will for records that don't reside on the server handling requests will automatically be forwarded to the appropriate node. This will be done transparently, so that the client will not need to know where the data is stored. +With a sharding configuration (or customization below) in place, requests will for records that don't reside on the server handling requests will automatically be forwarded to the appropriate node. This will be done transparently, so that the client will not need to know where the data is stored. ## Replication Control with Headers -With the REST interface, replication levels and destinations can also specified with the `X-Replicate-To` header. This can be used to indicate the number of additional nodes that data should be replicated to, or to specify the nodes that data should be replicated to. The `X-Replicate-To` header can be used with the `POST` and `PUT` methods. This header can also specify if the response should wait for confirmation from other nodes, and how many, with the `confirm` parameter. For example, to specify that data should be replicated to two other nodes, and the response should be returned once confirmation is received from one other node, you can use the following header: + +With the REST interface, replication levels and destinations can also specified with the `X-Replicate-To` header. This can be used to indicate the number of additional nodes that data should be replicated to, or to specify the nodes that data should be replicated to. The `X-Replicate-To` header can be used with the `POST` and `PUT` methods. This header can also specify if the response should wait for confirmation from other nodes, and how many, with the `confirm` parameter. For example, to specify that data should be replicated to two other nodes, and the response should be returned once confirmation is received from one other node, you can use the following header: + ```http PUT /MyTable/3 X-Replicate-To: 2;confirm=1 @@ -24,35 +29,44 @@ X-Replicate-To: 2;confirm=1 ``` You can also explicitly specify destination nodes by providing a comma-separated list of node hostnames. For example, to specify that data should be replicated to nodes `node1` and `node2`, you can use the following header: + ```http PUT /MyTable/3 X-Replicate-To: node1,node2 ``` + (This can also be used with the `confirm` parameter.) ## Replication Control with Operations + Likewise, you can specify replicateTo and confirm parameters in the operation object when using the Harper API. For example, to specify that data should be replicated to two other nodes, and the response should be returned once confirmation is received from one other node, you can use the following operation object: + ```json { - "operation": "update", - "schema": "dev", - "table": "MyTable", - "hashValues": [3], - "record": { - "name": "John Doe" - }, - "replicateTo": 2, - "replicatedConfirmation": 1 + "operation": "update", + "schema": "dev", + "table": "MyTable", + "hashValues": [3], + "record": { + "name": "John Doe" + }, + "replicateTo": 2, + "replicatedConfirmation": 1 } ``` + or you can specify nodes: + ```json ..., "replicateTo": ["node-1", "node-2"] ... ``` + ## Programmatic Replication Control + Additionally, you can specify `replicateTo` and `replicatedConfirmation` parameters programmatically in the context of a resource. For example, you can define a put method: + ```javascript class MyTable extends tables.MyTable { put(record) { @@ -65,12 +79,16 @@ class MyTable extends tables.MyTable { ``` ## Configuration for Static Sharding + Alternatively, you can configure static sharding, where each node is assigned to a specific shard, and each record is replicated to the nodes in that shard based on the primary key. The `shard` is identified by a number. To configure the shard for each node, you can specify the shard number in the `replication`'s `shard` in the configuration: + ```yaml replication: shard: 1 ``` -Alternatively, you can configure the `shard` under the `replication` `routes`. This allows you to assign a specific shard id based on the routing configuration. + +Alternatively, you can configure the `shard` under the `replication` `routes`. This allows you to assign a specific shard id based on the routing configuration. + ```yaml replication: routes: @@ -79,52 +97,64 @@ replication: - hostname: node2 shard: 2 ``` + Or you can specify a `shard` number by including that property in an `add_node` operation or `set_node` operation, to dynamically assign a node to a shard. -You can then specify shard number in the `setResidency` or `setResidencyById` functions below. +You can then specify shard number in the `setResidency` or `setResidencyById` functions below. ## Custom Sharding -You can also define a custom sharding strategy by specifying a function to compute the "residency" or location of where records should be stored and reside. To do this we use the `setResidency` method, providing a function that will determine the residency of each record. The function you provide will be called with the record entry, and should return an array of nodes that the record should be replicated to (using their hostname). For example, to shard records based on the value of the `id` field, you can use the following code: + +You can also define a custom sharding strategy by specifying a function to compute the "residency" or location of where records should be stored and reside. To do this we use the `setResidency` method, providing a function that will determine the residency of each record. The function you provide will be called with the record entry, and should return an array of nodes that the record should be replicated to (using their hostname). For example, to shard records based on the value of the `id` field, you can use the following code: + ```javascript MyTable.setResidency((record) => { - return record.id % 2 === 0 ? ['node1'] : ['node2']; + return record.id % 2 === 0 ? ['node1'] : ['node2']; }); ``` + With this approach, the record metadata, which includes the residency information, and any indexed properties, will be replicated to all nodes, but the full record will only be replicated to the nodes specified by the residency function. The `setResidency` function can alternately return a shard number, which will replicate the data to all the nodes in that shard: + ```javascript MyTable.setResidency((record) => { - return record.id % 2 === 0 ? 1 : 2; + return record.id % 2 === 0 ? 1 : 2; }); ``` ### Custom Sharding By Primary Key -Alternately you can define a custom sharding strategy based on the primary key alone. This allows records to be retrieved without needing access to the record data or metadata. With this approach, data will only be replicated to the nodes specified by the residency function (the record metadata doesn't need to replicated to all nodes). To do this, you can use the `setResidencyById` method, providing a function that will determine the residency or shard of each record based on the primary key. The function you provide will be called with the primary key, and should return a `shard` number or an array of nodes that the record should be replicated to (using their hostname). For example, to shard records based on the value of the primary key, you can use the following code: + +Alternately you can define a custom sharding strategy based on the primary key alone. This allows records to be retrieved without needing access to the record data or metadata. With this approach, data will only be replicated to the nodes specified by the residency function (the record metadata doesn't need to replicated to all nodes). To do this, you can use the `setResidencyById` method, providing a function that will determine the residency or shard of each record based on the primary key. The function you provide will be called with the primary key, and should return a `shard` number or an array of nodes that the record should be replicated to (using their hostname). For example, to shard records based on the value of the primary key, you can use the following code: ```javascript MyTable.setResidencyById((id) => { - return id % 2 === 0 ? 1 : 2; // return shard number + return id % 2 === 0 ? 1 : 2; // return shard number }); ``` + or + ```javascript MyTable.setResidencyById((id) => { - return id % 2 === 0 ? ['node1'] : ['node2']; // return array of node hostnames + return id % 2 === 0 ? ['node1'] : ['node2']; // return array of node hostnames }); ``` ### Disabling Cross-Node Access -Normally sharding allows data to be stored in specific nodes, but still allows access to the data from any node. However, you can also disable cross-node access so that data is only returned if is stored on the node where it is accessed. To do this, you can set the `replicateFrom` property on the context of operation to `false`: + +Normally sharding allows data to be stored in specific nodes, but still allows access to the data from any node. However, you can also disable cross-node access so that data is only returned if is stored on the node where it is accessed. To do this, you can set the `replicateFrom` property on the context of operation to `false`: + ```json { - "operation": "search_by_id", - "table": "MyTable", - "ids": [3], - "replicateFrom": false + "operation": "search_by_id", + "table": "MyTable", + "ids": [3], + "replicateFrom": false } ``` + Or use a header with the REST API: + ```http GET /MyTable/3 X-Replicate-From: none diff --git a/docs/developers/rest.md b/docs/developers/rest.md index 9150d9f3..c40a8c9a 100644 --- a/docs/developers/rest.md +++ b/docs/developers/rest.md @@ -8,11 +8,11 @@ Resources, including tables, can be configured as RESTful endpoints. Make sure y The default path structure provides access to resources at several levels: -* `/my-resource` - The root path of a resource usually has a description of the resource (like a describe operation for a table). -* `/my-resource/` - The trailing slash in a path indicates it is a collection of the records. The root collection for a table represents all the records in a table, and usually you will append query parameters to query and search for more specific records. -* `/my-resource/record-id` - This resource locator represents a specific record, referenced by its id. This is typically how you can retrieve, update, and delete individual records. -* `/my-resource/record-id/` - Again, a trailing slash indicates a collection; here it is the collection of the records that begin with the specified id prefix. -* `/my-resource/record-id/with/multiple/parts` - A record id can consist of multiple path segments. +- `/my-resource` - The root path of a resource usually has a description of the resource (like a describe operation for a table). +- `/my-resource/` - The trailing slash in a path indicates it is a collection of the records. The root collection for a table represents all the records in a table, and usually you will append query parameters to query and search for more specific records. +- `/my-resource/record-id` - This resource locator represents a specific record, referenced by its id. This is typically how you can retrieve, update, and delete individual records. +- `/my-resource/record-id/` - Again, a trailing slash indicates a collection; here it is the collection of the records that begin with the specified id prefix. +- `/my-resource/record-id/with/multiple/parts` - A record id can consist of multiple path segments. ### GET @@ -147,17 +147,17 @@ Note that some HTTP clients may be overly aggressive in encoding query parameter Here is a full list of the supported FIQL-style operators/comparators: -* `==`: equal -* `=lt=`: less than -* `=le=`: less than or equal -* `=gt=`: greater than -* `=ge=`: greater than or equal -* `=ne=`, !=: not equal -* `=ct=`: contains the value (for strings) -* `=sw=`, `==*`: starts with the value (for strings) -* `=ew=`: ends with the value (for strings) -* `=`, `===`: strict equality (no type conversion) -* `!==`: strict inequality (no type conversion) +- `==`: equal +- `=lt=`: less than +- `=le=`: less than or equal +- `=gt=`: greater than +- `=ge=`: greater than or equal +- `=ne=`, !=: not equal +- `=ct=`: contains the value (for strings) +- `=sw=`, `==*`: starts with the value (for strings) +- `=ew=`: ends with the value (for strings) +- `=`, `===`: strict equality (no type conversion) +- `!==`: strict inequality (no type conversion) #### Unions @@ -184,7 +184,7 @@ GET /Product/?rating=5&[tag=fast|tag=scalable|tag=efficient] And the tags could be safely generated from user inputs in a tag array like: ```javascript -let url = `/Product/?rating=5[${tags.map(encodeURIComponent).join('|')}]` +let url = `/Product/?rating=5[${tags.map(encodeURIComponent).join('|')}]`; ``` More complex queries can be created by further nesting groups: @@ -201,11 +201,11 @@ Harper has several special query functions that use "call" syntax. These can be This function allows you to specify which properties should be included in the responses. This takes several forms: -* `?select(property)`: This will return the values of the specified property directly in the response (will not be put in an object). -* `?select(property1,property2)`: This returns the records as objects, but limited to the specified properties. -* `?select([property1,property2,...])`: This returns the records as arrays of the property values in the specified properties. -* `?select(property1,)`: This can be used to specify that objects should be returned with the single specified property. -* `?select(property{subProperty1,subProperty2{subSubProperty,..}},...)`: This can be used to specify which sub-properties should be included in nested objects and joined/references records. +- `?select(property)`: This will return the values of the specified property directly in the response (will not be put in an object). +- `?select(property1,property2)`: This returns the records as objects, but limited to the specified properties. +- `?select([property1,property2,...])`: This returns the records as arrays of the property values in the specified properties. +- `?select(property1,)`: This can be used to specify that objects should be returned with the single specified property. +- `?select(property{subProperty1,subProperty2{subSubProperty,..}},...)`: This can be used to specify which sub-properties should be included in nested objects and joined/references records. To get a list of product names with a category of software: @@ -330,20 +330,20 @@ PUT /Product/123 Content-Type: application/json { "id": "123", "resellerIds": ["first-reseller-id", "second-reseller-id", "last-reseller-id"], -...} +...} ``` #### Type Conversion Queries parameters are simply text, so there are several features for converting parameter values to properly typed values for performing correct searches. For the FIQL comparators, which includes `==`, `!=`, `=gt=`, `=lt=`, `=ge=`, `=gt=`, the parser will perform type conversion, according to the following rules: -* `name==null`: Will convert the value to `null` for searching. -* `name==123`: Will convert the value to a number _if_ the attribute is untyped (there is no type specified in a GraphQL schema, or the type is specified to be `Any`). -* `name==true`: Will convert the value to a boolean _if_ the attribute is untyped (there is no type specified in a GraphQL schema, or the type is specified to be `Any`). -* `name==number:123`: Will explicitly convert the value after "number:" to a number. -* `name==boolean:true`: Will explicitly convert the value after "boolean:" to a boolean. -* `name==string:some%20text`: Will explicitly keep the value after "string:" as a string (and perform URL component decoding) -* `name==date:2024-01-05T20%3A07%3A27.955Z`: Will explicitly convert the value after "date:" to a Date object. +- `name==null`: Will convert the value to `null` for searching. +- `name==123`: Will convert the value to a number _if_ the attribute is untyped (there is no type specified in a GraphQL schema, or the type is specified to be `Any`). +- `name==true`: Will convert the value to a boolean _if_ the attribute is untyped (there is no type specified in a GraphQL schema, or the type is specified to be `Any`). +- `name==number:123`: Will explicitly convert the value after "number:" to a number. +- `name==boolean:true`: Will explicitly convert the value after "boolean:" to a boolean. +- `name==string:some%20text`: Will explicitly keep the value after "string:" as a string (and perform URL component decoding) +- `name==date:2024-01-05T20%3A07%3A27.955Z`: Will explicitly convert the value after "date:" to a Date object. If the attribute specifies a type (like `Float`) in the schema definition, the value will always be converted to the specified type before searching. diff --git a/docs/developers/security/README.md b/docs/developers/security/README.md index 3e0cf039..9e3b3b4a 100644 --- a/docs/developers/security/README.md +++ b/docs/developers/security/README.md @@ -2,8 +2,8 @@ Harper uses role-based, attribute-level security to ensure that users can only gain access to the data they’re supposed to be able to access. Our granular permissions allow for unparalleled flexibility and control, and can actually lower the total cost of ownership compared to other database solutions, since you no longer have to replicate subsets of your data to isolate use cases. -* [JWT Authentication](jwt-auth.md) -* [Basic Authentication](basic-auth.md) -* [mTLS Authentication](mtls-auth.md) -* [Configuration](configuration.md) -* [Users and Roles](users-and-roles.md) +- [JWT Authentication](jwt-auth.md) +- [Basic Authentication](basic-auth.md) +- [mTLS Authentication](mtls-auth.md) +- [Configuration](configuration.md) +- [Users and Roles](users-and-roles.md) diff --git a/docs/developers/security/basic-auth.md b/docs/developers/security/basic-auth.md index df884e26..83b1746e 100644 --- a/docs/developers/security/basic-auth.md +++ b/docs/developers/security/basic-auth.md @@ -2,7 +2,7 @@ Harper uses Basic Auth and JSON Web Tokens (JWTs) to secure our HTTP requests. In the context of an HTTP transaction, **basic access authentication** is a method for an HTTP user agent to provide a username and password when making a request. -\*\* _**You do not need to log in separately. Basic Auth is added to each HTTP request like create\_database, create\_table, insert etc… via headers.**_ \*\* +\*\* _**You do not need to log in separately. Basic Auth is added to each HTTP request like create_database, create_table, insert etc… via headers.**_ \*\* A header is added to each HTTP request. The header key is **“Authorization”** the header value is **“Basic <\>”** @@ -13,46 +13,41 @@ In the below code sample, you can see where we add the authorization header to t _Note: This function uses btoa. Learn about_ [_btoa here_](https://developer.mozilla.org/en-US/docs/Web/API/btoa)_._ ```javascript -function callHarperDB(call_object, operation, callback){ - - const options = { - "method": "POST", - "hostname": call_object.endpoint_url, - "port": call_object.endpoint_port, - "path": "/", - "headers": { - "content-type": "application/json", - "authorization": "Basic " + btoa(call_object.username + ':' + call_object.password), - "cache-control": "no-cache" - - } - }; - - const http_req = http.request(options, function (hdb_res) { - let chunks = []; - - hdb_res.on("data", function (chunk) { - chunks.push(chunk); - }); - - hdb_res.on("end", function () { - const body = Buffer.concat(chunks); - if (isJson(body)) { - return callback(null, JSON.parse(body)); - } else { - return callback(body, null); - - } - - }); - }); - - http_req.on("error", function (chunk) { - return callback("Failed to connect", null); - }); - - http_req.write(JSON.stringify(operation)); - http_req.end(); - +function callHarperDB(call_object, operation, callback) { + const options = { + method: 'POST', + hostname: call_object.endpoint_url, + port: call_object.endpoint_port, + path: '/', + headers: { + 'content-type': 'application/json', + 'authorization': 'Basic ' + btoa(call_object.username + ':' + call_object.password), + 'cache-control': 'no-cache', + }, + }; + + const http_req = http.request(options, function (hdb_res) { + let chunks = []; + + hdb_res.on('data', function (chunk) { + chunks.push(chunk); + }); + + hdb_res.on('end', function () { + const body = Buffer.concat(chunks); + if (isJson(body)) { + return callback(null, JSON.parse(body)); + } else { + return callback(body, null); + } + }); + }); + + http_req.on('error', function (chunk) { + return callback('Failed to connect', null); + }); + + http_req.write(JSON.stringify(operation)); + http_req.end(); } ``` diff --git a/docs/developers/security/configuration.md b/docs/developers/security/configuration.md index c146fe4c..3debfeb3 100644 --- a/docs/developers/security/configuration.md +++ b/docs/developers/security/configuration.md @@ -4,18 +4,19 @@ Harper was set up to require very minimal configuration to work out of the box. ## CORS -Harper allows for managing [cross-origin HTTP requests](https://developer.mozilla.org/en-US/docs/Web/HTTP/Access\_control\_CORS). By default, Harper enables CORS for all domains if you need to disable CORS completely or set up an access list of domains you can do the following: +Harper allows for managing [cross-origin HTTP requests](https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS). By default, Harper enables CORS for all domains if you need to disable CORS completely or set up an access list of domains you can do the following: 1. Open the harperdb-config.yaml file, which can be found in \, the location you specified during install. 2. In harperdb-config.yaml there should be 2 entries under `operationsApi.network`: cors and corsAccessList. - * `cors` + - `cors` 1. To turn off, change to: `cors: false` 2. To turn on, change to: `cors: true` - * `corsAccessList` + - `corsAccessList` 1. The `corsAccessList` will only be recognized by the system when `cors` is `true` - 2. To create an access list you set `corsAccessList` to a comma-separated list of domains. + 2. To create an access list you set `corsAccessList` to a comma-separated list of domains. + + i.e. `corsAccessList` is `http://harpersystems.dev,http://products.harpersystems.dev` - i.e. `corsAccessList` is `http://harpersystems.dev,http://products.harpersystems.dev` 3. To clear out the access list and allow all domains: `corsAccessList` is `[null]` ## SSL diff --git a/docs/developers/security/jwt-auth.md b/docs/developers/security/jwt-auth.md index 1fb527a7..0b5c4de9 100644 --- a/docs/developers/security/jwt-auth.md +++ b/docs/developers/security/jwt-auth.md @@ -4,8 +4,8 @@ Harper uses token based authentication with JSON Web Tokens, JWTs. This consists of two primary operations `create_authentication_tokens` and `refresh_operation_token`. These generate two types of tokens, as follows: -* The `operation_token` which is used to authenticate all Harper operations in the Bearer Token Authorization Header. The default expiry is one day. -* The `refresh_token` which is used to generate a new `operation_token` upon expiry. This token is used in the Bearer Token Authorization Header for the `refresh_operation_token` operation only. The default expiry is thirty days. +- The `operation_token` which is used to authenticate all Harper operations in the Bearer Token Authorization Header. The default expiry is one day. +- The `refresh_token` which is used to generate a new `operation_token` upon expiry. This token is used in the Bearer Token Authorization Header for the `refresh_operation_token` operation only. The default expiry is thirty days. The `create_authentication_tokens` operation can be used at any time to refresh both tokens in the event that both have expired or been lost. @@ -15,9 +15,9 @@ Users must initially create tokens using their Harper credentials. The following ```json { - "operation": "create_authentication_tokens", - "username": "username", - "password": "password" + "operation": "create_authentication_tokens", + "username": "username", + "password": "password" } ``` @@ -37,8 +37,8 @@ An example expected return object is: ```json { - "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDUwNjQ2MDAsInN1YiI6Im9wZXJhdGlvbiJ9.MpQA-9CMjA-mn-7mHyUXSuSC_-kqMqJXp_NDiKLFtbtMRbodCuY3DzH401rvy_4vb0yCELf0B5EapLVY1545sv80nxSl6FoZFxQaDWYXycoia6zHpiveR8hKlmA6_XTWHJbY2FM1HAFrdtt3yUTiF-ylkdNbPG7u7fRjTmHfsZ78gd2MNWIDkHoqWuFxIyqk8XydQpsjULf2Uacirt9FmHfkMZ-Jr_rRpcIEW0FZyLInbm6uxLfseFt87wA0TbZ0ofImjAuaW_3mYs-3H48CxP152UJ0jByPb0kHsk1QKP7YHWx1-Wce9NgNADfG5rfgMHANL85zvkv8sJmIGZIoSpMuU3CIqD2rgYnMY-L5dQN1fgfROrPMuAtlYCRK7r-IpjvMDQtRmCiNG45nGsM4DTzsa5GyDrkGssd5OBhl9gr9z9Bb5HQVYhSKIOiy72dK5dQNBklD4eGLMmo-u322zBITmE0lKaBcwYGJw2mmkYcrjDOmsDseU6Bf_zVUd9WF3FqwNkhg4D7nrfNSC_flalkxPHckU5EC_79cqoUIX2ogufBW5XgYbU4WfLloKcIpb51YTZlZfwBHlHPSyaq_guaXFaeCUXKq39_i1n0HRF_mRaxNru0cNDFT9Fm3eD7V8axFijSVAMDyQs_JR7SY483YDKUfN4l-vw-EVynImr4", - "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDc1NzAyMDAsInN1YiI6InJlZnJlc2gifQ.acaCsk-CJWIMLGDZdGnsthyZsJfQ8ihXLyE8mTji8PgGkpbwhs7e1O0uitMgP_pGjHq2tey1BHSwoeCL49b18WyMIB10hK-q2BXGKQkykltjTrQbg7VsdFi0h57mGfO0IqAwYd55_hzHZNnyJMh4b0iPQFDwU7iTD7x9doHhZAvzElpkWbc_NKVw5_Mw3znjntSzbuPN105zlp4Niurin-_5BnukwvoJWLEJ-ZlF6hE4wKhaMB1pWTJjMvJQJE8khTTvlUN8tGxmzoaDYoe1aCGNxmDEQnx8Y5gKzVd89sylhqi54d2nQrJ2-ElfEDsMoXpR01Ps6fNDFtLTuPTp7ixj8LvgL2nCjAg996Ga3PtdvXJAZPDYCqqvaBkZZcsiqOgqLV0vGo3VVlfrcgJXQImMYRr_Inu0FCe47A93IAWuQTs-KplM1KdGJsHSnNBV6oe6QEkROJT5qZME-8xhvBYvOXqp9Znwg39bmiBCMxk26Ce66_vw06MNgoa3D5AlXPWemfdVKPZDnj_aLVjZSs0gAfFElcVn7l9yjWJOaT2Muk26U8bJl-2BEq_DSclqKHODuYM5kkPKIdE4NFrsqsDYuGxcA25rlNETFyl0q-UXj1aoz_joy5Hdnr4mFELmjnoo4jYQuakufP9xeGPsj1skaodKl0mmoGcCD6v1F60" + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDUwNjQ2MDAsInN1YiI6Im9wZXJhdGlvbiJ9.MpQA-9CMjA-mn-7mHyUXSuSC_-kqMqJXp_NDiKLFtbtMRbodCuY3DzH401rvy_4vb0yCELf0B5EapLVY1545sv80nxSl6FoZFxQaDWYXycoia6zHpiveR8hKlmA6_XTWHJbY2FM1HAFrdtt3yUTiF-ylkdNbPG7u7fRjTmHfsZ78gd2MNWIDkHoqWuFxIyqk8XydQpsjULf2Uacirt9FmHfkMZ-Jr_rRpcIEW0FZyLInbm6uxLfseFt87wA0TbZ0ofImjAuaW_3mYs-3H48CxP152UJ0jByPb0kHsk1QKP7YHWx1-Wce9NgNADfG5rfgMHANL85zvkv8sJmIGZIoSpMuU3CIqD2rgYnMY-L5dQN1fgfROrPMuAtlYCRK7r-IpjvMDQtRmCiNG45nGsM4DTzsa5GyDrkGssd5OBhl9gr9z9Bb5HQVYhSKIOiy72dK5dQNBklD4eGLMmo-u322zBITmE0lKaBcwYGJw2mmkYcrjDOmsDseU6Bf_zVUd9WF3FqwNkhg4D7nrfNSC_flalkxPHckU5EC_79cqoUIX2ogufBW5XgYbU4WfLloKcIpb51YTZlZfwBHlHPSyaq_guaXFaeCUXKq39_i1n0HRF_mRaxNru0cNDFT9Fm3eD7V8axFijSVAMDyQs_JR7SY483YDKUfN4l-vw-EVynImr4", + "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDc1NzAyMDAsInN1YiI6InJlZnJlc2gifQ.acaCsk-CJWIMLGDZdGnsthyZsJfQ8ihXLyE8mTji8PgGkpbwhs7e1O0uitMgP_pGjHq2tey1BHSwoeCL49b18WyMIB10hK-q2BXGKQkykltjTrQbg7VsdFi0h57mGfO0IqAwYd55_hzHZNnyJMh4b0iPQFDwU7iTD7x9doHhZAvzElpkWbc_NKVw5_Mw3znjntSzbuPN105zlp4Niurin-_5BnukwvoJWLEJ-ZlF6hE4wKhaMB1pWTJjMvJQJE8khTTvlUN8tGxmzoaDYoe1aCGNxmDEQnx8Y5gKzVd89sylhqi54d2nQrJ2-ElfEDsMoXpR01Ps6fNDFtLTuPTp7ixj8LvgL2nCjAg996Ga3PtdvXJAZPDYCqqvaBkZZcsiqOgqLV0vGo3VVlfrcgJXQImMYRr_Inu0FCe47A93IAWuQTs-KplM1KdGJsHSnNBV6oe6QEkROJT5qZME-8xhvBYvOXqp9Znwg39bmiBCMxk26Ce66_vw06MNgoa3D5AlXPWemfdVKPZDnj_aLVjZSs0gAfFElcVn7l9yjWJOaT2Muk26U8bJl-2BEq_DSclqKHODuYM5kkPKIdE4NFrsqsDYuGxcA25rlNETFyl0q-UXj1aoz_joy5Hdnr4mFELmjnoo4jYQuakufP9xeGPsj1skaodKl0mmoGcCD6v1F60" } ``` @@ -86,7 +86,7 @@ The `refresh_token` also expires at a set interval, but a longer interval. Once Token timeouts are configurable in [harperdb-config.yaml](../../deployments/configuration.md) with the following parameters: -* `operationsApi.authentication.operationTokenTimeout`: Defines the length of time until the operation\_token expires (default 1d). -* `operationsApi.authentication.refreshTokenTimeout`: Defines the length of time until the refresh\_token expires (default 30d). +- `operationsApi.authentication.operationTokenTimeout`: Defines the length of time until the operation_token expires (default 1d). +- `operationsApi.authentication.refreshTokenTimeout`: Defines the length of time until the refresh_token expires (default 30d). A full list of valid values for both parameters can be found [here](https://github.com/vercel/ms). diff --git a/docs/developers/security/mtls-auth.md b/docs/developers/security/mtls-auth.md index 4504f372..49bf17b9 100644 --- a/docs/developers/security/mtls-auth.md +++ b/docs/developers/security/mtls-auth.md @@ -1,3 +1,3 @@ # mTLS Authentication -Harper supports mTLS authentication for incoming connections. When enabled in the [HTTP config settings](../../deployments/configuration.md#http) the client certificate will be checked against the certificate authority specified with `tls.certificateAuthority`. If the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. The [HTTP config settings](../../deployments/configuration.md#http) allow you to determine if mTLS is required for all connections or optional. \ No newline at end of file +Harper supports mTLS authentication for incoming connections. When enabled in the [HTTP config settings](../../deployments/configuration.md#http) the client certificate will be checked against the certificate authority specified with `tls.certificateAuthority`. If the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. The [HTTP config settings](../../deployments/configuration.md#http) allow you to determine if mTLS is required for all connections or optional. diff --git a/docs/developers/security/users-and-roles.md b/docs/developers/security/users-and-roles.md index d83469d6..4d045f1e 100644 --- a/docs/developers/security/users-and-roles.md +++ b/docs/developers/security/users-and-roles.md @@ -15,65 +15,66 @@ Role permissions in Harper are broken into two categories – permissions around **Built-In Roles** -There are three built-in roles within Harper. See full breakdown of operations restricted to only super\_user roles [here](users-and-roles.md#Role-Based-Operation-Restrictions). +There are three built-in roles within Harper. See full breakdown of operations restricted to only super_user roles [here](users-and-roles.md#Role-Based-Operation-Restrictions). -* `super_user` - This role provides full access to all operations and methods within a Harper instance, this can be considered the admin role. - * This role provides full access to all Database Definition operations and the ability to run Database Manipulation operations across the entire database schema with no restrictions. -* `cluster_user` - This role is an internal system role type that is managed internally to allow clustered instances to communicate with one another. - * This role is an internally managed role to facilitate communication between clustered instances. -* `structure_user` - This role provides specific access for creation and deletion of data. - * When defining this role type you can either assign a value of true which will allow the role to create and drop databases & tables. Alternatively the role type can be assigned a string array. The values in this array are databases and allows the role to only create and drop tables in the designated databases. +- `super_user` - This role provides full access to all operations and methods within a Harper instance, this can be considered the admin role. + - This role provides full access to all Database Definition operations and the ability to run Database Manipulation operations across the entire database schema with no restrictions. +- `cluster_user` - This role is an internal system role type that is managed internally to allow clustered instances to communicate with one another. + - This role is an internally managed role to facilitate communication between clustered instances. +- `structure_user` - This role provides specific access for creation and deletion of data. + - When defining this role type you can either assign a value of true which will allow the role to create and drop databases & tables. Alternatively the role type can be assigned a string array. The values in this array are databases and allows the role to only create and drop tables in the designated databases. **User-Defined Roles** -In addition to built-in roles, admins (i.e. users assigned to the super\_user role) can create customized roles for other users to interact with and manipulate the data within explicitly defined tables and attributes. +In addition to built-in roles, admins (i.e. users assigned to the super_user role) can create customized roles for other users to interact with and manipulate the data within explicitly defined tables and attributes. -* Unless the user-defined role is given `super_user` permissions, permissions must be defined explicitly within the request body JSON. -* Describe operations will return metadata for all databases, tables, and attributes that a user-defined role has CRUD permissions for. +- Unless the user-defined role is given `super_user` permissions, permissions must be defined explicitly within the request body JSON. +- Describe operations will return metadata for all databases, tables, and attributes that a user-defined role has CRUD permissions for. **Role Permissions** When creating a new, user-defined role in a Harper instance, you must provide a role name and the permissions to assign to that role. _Reminder, only super users can create and manage roles._ -* `role` name used to easily identify the role assigned to individual users. +- `role` name used to easily identify the role assigned to individual users. - _Roles can be altered/dropped based on the role name used in and returned from a successful `add_role` , `alter_role`, or `list_roles` operation._ -* `permissions` used to explicitly define CRUD access to existing table data. + _Roles can be altered/dropped based on the role name used in and returned from a successful `add_role` , `alter_role`, or `list_roles` operation._ + +- `permissions` used to explicitly define CRUD access to existing table data. Example JSON for `add_role` request ```json { - "operation":"add_role", - "role":"software_developer", - "permission":{ - "super_user":false, - "database_name":{ - "tables": { - "table_name1": { - "read":true, - "insert":true, - "update":true, - "delete":false, - "attribute_permissions":[ - { - "attribute_name":"attribute1", - "read":true, - "insert":true, - "update":true - } - ] - }, - "table_name2": { - "read":true, - "insert":true, - "update":true, - "delete":false, - "attribute_permissions":[] - } - } - } - } + "operation": "add_role", + "role": "software_developer", + "permission": { + "super_user": false, + "database_name": { + "tables": { + "table_name1": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "attribute1", + "read": true, + "insert": true, + "update": true + } + ] + }, + "table_name2": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [] + } + } + } + } } ``` @@ -81,12 +82,13 @@ Example JSON for `add_role` request There are two parts to a permissions set: -* `super_user` – boolean value indicating if role should be provided super\_user access. +- `super_user` – boolean value indicating if role should be provided super_user access. + + _If `super_user` is set to true, there should be no additional database-specific permissions values included since the role will have access to the entire database schema. If permissions are included in the body of the operation, they will be stored within Harper, but ignored, as super_users have full access to the database._ - _If `super_user` is set to true, there should be no additional database-specific permissions values included since the role will have access to the entire database schema. If permissions are included in the body of the operation, they will be stored within Harper, but ignored, as super\_users have full access to the database._ -* `permissions`: Database tables that a role should have specific CRUD access to should be included in the final, database-specific `permissions` JSON. +- `permissions`: Database tables that a role should have specific CRUD access to should be included in the final, database-specific `permissions` JSON. - _For user-defined roles (i.e. non-super\_user roles, blank permissions will result in the user being restricted from accessing any of the database schema._ + _For user-defined roles (i.e. non-super_user roles, blank permissions will result in the user being restricted from accessing any of the database schema._ **Table Permissions JSON** @@ -118,141 +120,144 @@ Each table that a role should be given some level of CRUD permissions to must be **Important Notes About Attribute Permissions** 1. If there are attribute-specific CRUD permissions that need to be enforced on a table, those need to be explicitly described in the `attribute_permissions` array. -2. If a non-hash attribute is given some level of CRUD access, that same access will be assigned to the table’s `hash_attribute` (also referred to as the `primary_key`), even if it is not explicitly defined in the permissions JSON. +2. If a non-hash attribute is given some level of CRUD access, that same access will be assigned to the table’s `hash_attribute` (also referred to as the `primary_key`), even if it is not explicitly defined in the permissions JSON. + + _See table_name1’s permission set for an example of this – even though the table’s hash attribute is not specifically defined in the attribute_permissions array, because the role has CRUD access to ‘attribute1’, the role will have the same access to the table’s hash attribute._ + +3. If attribute-level permissions are set – _i.e. attribute_permissions.length > 0_ – any table attribute not explicitly included will be assumed to have not CRUD access (with the exception of the `hash_attribute` described in #2). + + _See table_name1’s permission set for an example of this – in this scenario, the role will have the ability to create, insert and update ‘attribute1’ and the table’s hash attribute but no other attributes on that table._ - _See table\_name1’s permission set for an example of this – even though the table’s hash attribute is not specifically defined in the attribute\_permissions array, because the role has CRUD access to ‘attribute1’, the role will have the same access to the table’s hash attribute._ -3. If attribute-level permissions are set – _i.e. attribute\_permissions.length > 0_ – any table attribute not explicitly included will be assumed to have not CRUD access (with the exception of the `hash_attribute` described in #2). +4. If an `attribute_permissions` array is empty, the role’s access to a table’s attributes will be based on the table-level CRUD permissions. - _See table\_name1’s permission set for an example of this – in this scenario, the role will have the ability to create, insert and update ‘attribute1’ and the table’s hash attribute but no other attributes on that table._ -4. If an `attribute_permissions` array is empty, the role’s access to a table’s attributes will be based on the table-level CRUD permissions. + _See table_name2’s permission set for an example of this._ - _See table\_name2’s permission set for an example of this._ 5. The `__createdtime__` and `__updatedtime__` attributes that Harper manages internally can have read perms set but, if set, all other attribute-level permissions will be ignored. 6. Please note that DELETE permissions are not included as a part of an individual attribute-level permission set. That is because it is not possible to delete individual attributes from a row, rows must be deleted in full. - * If a role needs the ability to delete rows from a table, that permission should be set on the table-level. - * The practical approach to deleting an individual attribute of a row would be to set that attribute to null via an update statement. + - If a role needs the ability to delete rows from a table, that permission should be set on the table-level. + - The practical approach to deleting an individual attribute of a row would be to set that attribute to null via an update statement. ## Role-Based Operation Restrictions -The table below includes all API operations available in Harper and indicates whether or not the operation is restricted to super\_user roles. - -_Keep in mind that non-super\_user roles will also be restricted within the operations they do have access to by the database-level CRUD permissions set for the roles._ - -| Databases and Tables | Restricted to Super\_Users | -|----------------------| :------------------------: | -| describe\_all | | -| describe\_database | | -| describe\_table | | -| create\_database | X | -| drop\_database | X | -| create\_table | X | -| drop\_table | X | -| create\_attribute | | -| drop\_attribute | X | - -| NoSQL Operations | Restricted to Super\_Users | -| ---------------------- | :------------------------: | -| insert | | -| update | | -| upsert | | -| delete | | -| search\_by\_hash | | -| search\_by\_value | | -| search\_by\_conditions | | - -| SQL Operations | Restricted to Super\_Users | -| -------------- | :------------------------: | -| select | | -| insert | | -| update | | -| delete | | - -| Bulk Operations | Restricted to Super\_Users | -| ---------------- | :------------------------: | -| csv\_data\_load | | -| csv\_file\_load | | -| csv\_url\_load | | -| import\_from\_s3 | | - -| Users and Roles | Restricted to Super\_Users | -| --------------- | :------------------------: | -| list\_roles | X | -| add\_role | X | -| alter\_role | X | -| drop\_role | X | -| list\_users | X | -| user\_info | | -| add\_user | X | -| alter\_user | X | -| drop\_user | X | - -| Clustering | Restricted to Super\_Users | -| ----------------------- | :------------------------: | -| cluster\_set\_routes | X | -| cluster\_get\_routes | X | -| cluster\_delete\_routes | X | -| add\_node | X | -| update\_node | X | -| cluster\_status | X | -| remove\_node | X | -| configure\_cluster | X | - -| Components | Restricted to Super\_Users | -| -------------------- | :------------------------: | -| get\_components | X | -| get\_component\_file | X | -| set\_component\_file | X | -| drop\_component | X | -| add\_component | X | -| package\_component | X | -| deploy\_component | X | - -| Custom Functions | Restricted to Super\_Users | -| ---------------------------------- | :------------------------: | -| custom\_functions\_status | X | -| get\_custom\_functions | X | -| get\_custom\_function | X | -| set\_custom\_function | X | -| drop\_custom\_function | X | -| add\_custom\_function\_project | X | -| drop\_custom\_function\_project | X | -| package\_custom\_function\_project | X | -| deploy\_custom\_function\_project | X | - -| Registration | Restricted to Super\_Users | -| ------------------ | :------------------------: | -| registration\_info | | -| get\_fingerprint | X | -| set\_license | X | - -| Jobs | Restricted to Super\_Users | -| ----------------------------- | :------------------------: | -| get\_job | | -| search\_jobs\_by\_start\_date | X | - -| Logs | Restricted to Super\_Users | -| --------------------------------- | :------------------------: | -| read\_log | X | -| read\_transaction\_log | X | -| delete\_transaction\_logs\_before | X | -| read\_audit\_log | X | -| delete\_audit\_logs\_before | X | - -| Utilities | Restricted to Super\_Users | -| ----------------------- | :------------------------: | -| delete\_records\_before | X | -| export\_local | X | -| export\_to\_s3 | X | -| system\_information | X | -| restart | X | -| restart\_service | X | -| get\_configuration | X | -| configure\_cluster | X | - -| Token Authentication | Restricted to Super\_Users | -| ------------------------------ | :------------------------: | -| create\_authentication\_tokens | | -| refresh\_operation\_token | | +The table below includes all API operations available in Harper and indicates whether or not the operation is restricted to super_user roles. + +_Keep in mind that non-super_user roles will also be restricted within the operations they do have access to by the database-level CRUD permissions set for the roles._ + +| Databases and Tables | Restricted to Super_Users | +| -------------------- | :-----------------------: | +| describe_all | | +| describe_database | | +| describe_table | | +| create_database | X | +| drop_database | X | +| create_table | X | +| drop_table | X | +| create_attribute | | +| drop_attribute | X | + +| NoSQL Operations | Restricted to Super_Users | +| -------------------- | :-----------------------: | +| insert | | +| update | | +| upsert | | +| delete | | +| search_by_hash | | +| search_by_value | | +| search_by_conditions | | + +| SQL Operations | Restricted to Super_Users | +| -------------- | :-----------------------: | +| select | | +| insert | | +| update | | +| delete | | + +| Bulk Operations | Restricted to Super_Users | +| --------------- | :-----------------------: | +| csv_data_load | | +| csv_file_load | | +| csv_url_load | | +| import_from_s3 | | + +| Users and Roles | Restricted to Super_Users | +| --------------- | :-----------------------: | +| list_roles | X | +| add_role | X | +| alter_role | X | +| drop_role | X | +| list_users | X | +| user_info | | +| add_user | X | +| alter_user | X | +| drop_user | X | + +| Clustering | Restricted to Super_Users | +| --------------------- | :-----------------------: | +| cluster_set_routes | X | +| cluster_get_routes | X | +| cluster_delete_routes | X | +| add_node | X | +| update_node | X | +| cluster_status | X | +| remove_node | X | +| configure_cluster | X | + +| Components | Restricted to Super_Users | +| ------------------ | :-----------------------: | +| get_components | X | +| get_component_file | X | +| set_component_file | X | +| drop_component | X | +| add_component | X | +| package_component | X | +| deploy_component | X | + +| Custom Functions | Restricted to Super_Users | +| ------------------------------- | :-----------------------: | +| custom_functions_status | X | +| get_custom_functions | X | +| get_custom_function | X | +| set_custom_function | X | +| drop_custom_function | X | +| add_custom_function_project | X | +| drop_custom_function_project | X | +| package_custom_function_project | X | +| deploy_custom_function_project | X | + +| Registration | Restricted to Super_Users | +| ----------------- | :-----------------------: | +| registration_info | | +| get_fingerprint | X | +| set_license | X | + +| Jobs | Restricted to Super_Users | +| ------------------------- | :-----------------------: | +| get_job | | +| search_jobs_by_start_date | X | + +| Logs | Restricted to Super_Users | +| ------------------------------ | :-----------------------: | +| read_log | X | +| read_transaction_log | X | +| delete_transaction_logs_before | X | +| read_audit_log | X | +| delete_audit_logs_before | X | + +| Utilities | Restricted to Super_Users | +| --------------------- | :-----------------------: | +| delete_records_before | X | +| export_local | X | +| export_to_s3 | X | +| system_information | X | +| restart | X | +| restart_service | X | +| get_configuration | X | +| configure_cluster | X | + +| Token Authentication | Restricted to Super_Users | +| ---------------------------- | :-----------------------: | +| create_authentication_tokens | | +| refresh_operation_token | | ## Error: Must execute as User @@ -260,4 +265,4 @@ _Keep in mind that non-super\_user roles will also be restricted within the oper This means that you installed Harper as `<>`. Because Harper stores files natively on the operating system, we only allow the Harper executable to be run by a single user. This prevents permissions issues on files. -For example if you installed as user\_a, but later wanted to run as user\_b. User\_b may not have access to the hdb files Harper needs. This also keeps Harper more secure as it allows you to lock files down to a specific user and prevents other users from accessing your files. +For example if you installed as user_a, but later wanted to run as user_b. User_b may not have access to the hdb files Harper needs. This also keeps Harper more secure as it allows you to lock files down to a specific user and prevents other users from accessing your files. diff --git a/docs/developers/sql-guide/README.md b/docs/developers/sql-guide/README.md index 0a9dcbbf..15fa7b22 100644 --- a/docs/developers/sql-guide/README.md +++ b/docs/developers/sql-guide/README.md @@ -68,9 +68,9 @@ DELETE FROM dev.dog Harper allows developers to join any number of tables and currently supports the following join types: -* INNER JOIN LEFT -* INNER JOIN LEFT -* OUTER JOIN +- INNER JOIN LEFT +- INNER JOIN LEFT +- OUTER JOIN Here’s a basic example joining two tables from our Get Started example- joining a dogs table with a breeds table: diff --git a/docs/developers/sql-guide/date-functions.md b/docs/developers/sql-guide/date-functions.md index 8500f8bb..3c3dce7f 100644 --- a/docs/developers/sql-guide/date-functions.md +++ b/docs/developers/sql-guide/date-functions.md @@ -40,6 +40,7 @@ Referencing this variable will evaluate as the current Unix Timestamp in millise "current_timestamp_result": 1587568845765 } ``` + ### DATE([date_string]) Formats and returns the date_string argument in UTC in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. @@ -64,9 +65,8 @@ If a date_string is not provided, the function will return the current UTC date/ Adds the defined amount of time to the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted interval values: Either string value (key or shorthand) can be passed as the interval argument. - | Key | Shorthand | -|--------------|-----------| +| ------------ | --------- | | years | y | | quarters | Q | | months | M | @@ -77,7 +77,6 @@ Adds the defined amount of time to the date provided in UTC and returns the resu | seconds | s | | milliseconds | ms | - ``` "SELECT DATE_ADD(1587568845765, 1, 'days') AS date_add_result" AND "SELECT DATE_ADD(1587568845765, 1, 'd') AS date_add_result" both return @@ -99,13 +98,14 @@ AS date_add_result2" returns Returns the difference between the two date values passed based on the interval as a Number. If an interval is not provided, the function will return the difference value in milliseconds. Accepted interval values: -* years -* months -* weeks -* days -* hours -* minutes -* seconds + +- years +- months +- weeks +- days +- hours +- minutes +- seconds ``` "SELECT DATE_DIFF(CURRENT_TIMESTAMP, 1650643129017, 'hours') @@ -132,7 +132,7 @@ AS date_format_result" returns Subtracts the defined amount of time from the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted date_sub interval values- Either string value (key or shorthand) can be passed as the interval argument. | Key | Shorthand | -|--------------|-----------| +| ------------ | --------- | | years | y | | quarters | Q | | months | M | @@ -143,7 +143,6 @@ Subtracts the defined amount of time from the date provided in UTC and returns t | seconds | s | | milliseconds | ms | - ``` "SELECT DATE_SUB(1587568845765, 2, 'years') AS date_sub_result" returns { @@ -155,15 +154,15 @@ Subtracts the defined amount of time from the date provided in UTC and returns t Extracts and returns the date_part requested as a String value. Accepted date_part values below show value returned for date = “2020-03-26T15:13:02.041+000” -| date_part | Example return value* | -|--------------|------------------------| -| year | “2020” | -| month | “3” | -| day | “26” | - | hour | “15” | -| minute | “13” | -| second | “2” | -| millisecond | “41” | +| date_part | Example return value\* | +| ----------- | ---------------------- | +| year | “2020” | +| month | “3” | +| day | “26” | +| hour | “15” | +| minute | “13” | +| second | “2” | +| millisecond | “41” | ``` "SELECT EXTRACT(1587568845765, 'year') AS extract_result" returns @@ -184,6 +183,7 @@ Returns the current Unix Timestamp in milliseconds. ``` ### GET_SERVER_TIME() + Returns the current date/time value based on the server’s timezone in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. ``` @@ -194,6 +194,7 @@ Returns the current date/time value based on the server’s timezone in `YYYY-MM ``` ### OFFSET_UTC(date, offset) + Returns the UTC date time value with the offset provided included in the return String value formatted as `YYYY-MM-DDTHH:mm:ss.SSSZZ`. The offset argument will be added as minutes unless the value is less than 16 and greater than -16, in which case it will be treated as hours. ``` @@ -211,6 +212,7 @@ Returns the UTC date time value with the offset provided included in the return ``` ### NOW() + Returns the current Unix Timestamp in milliseconds. ``` @@ -219,4 +221,3 @@ Returns the current Unix Timestamp in milliseconds. "now_result": 1587568845765 } ``` - diff --git a/docs/developers/sql-guide/features-matrix.md b/docs/developers/sql-guide/features-matrix.md index 12bc3510..853a18cd 100644 --- a/docs/developers/sql-guide/features-matrix.md +++ b/docs/developers/sql-guide/features-matrix.md @@ -8,77 +8,77 @@ Harper encourages developers to utilize other querying tools over SQL for perfor Harper provides access to most SQL functions, and we’re always expanding that list. Check below to see if we cover what you need. -| INSERT | | -| ---------------------------------- | - | -| Values - multiple values supported | ✔ | -| Sub-SELECT | ✗ | +| INSERT | | +| ---------------------------------- | --- | +| Values - multiple values supported | ✔ | +| Sub-SELECT | ✗ | -| UPDATE | | -| ---------------- | - | -| SET | ✔ | -| Sub-SELECT | ✗ | -| Conditions | ✔ | -| Date Functions\* | ✔ | -| Math Functions | ✔ | +| UPDATE | | +| ---------------- | --- | +| SET | ✔ | +| Sub-SELECT | ✗ | +| Conditions | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | -| DELETE | | -| ---------- | - | -| FROM | ✔ | -| Sub-SELECT | ✗ | -| Conditions | ✔ | +| DELETE | | +| ---------- | --- | +| FROM | ✔ | +| Sub-SELECT | ✗ | +| Conditions | ✔ | -| SELECT | | -| -------------------- | - | -| Column SELECT | ✔ | -| Aliases | ✔ | -| Aggregator Functions | ✔ | -| Date Functions\* | ✔ | -| Math Functions | ✔ | -| Constant Values | ✔ | -| Distinct | ✔ | -| Sub-SELECT | ✗ | +| SELECT | | +| -------------------- | --- | +| Column SELECT | ✔ | +| Aliases | ✔ | +| Aggregator Functions | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | +| Constant Values | ✔ | +| Distinct | ✔ | +| Sub-SELECT | ✗ | -| FROM | | -| ---------------- | - | -| Multi-table JOIN | ✔ | -| INNER JOIN | ✔ | -| LEFT OUTER JOIN | ✔ | -| LEFT INNER JOIN | ✔ | -| RIGHT OUTER JOIN | ✔ | -| RIGHT INNER JOIN | ✔ | -| FULL JOIN | ✔ | -| UNION | ✗ | -| Sub-SELECT | ✗ | -| TOP | ✔ | +| FROM | | +| ---------------- | --- | +| Multi-table JOIN | ✔ | +| INNER JOIN | ✔ | +| LEFT OUTER JOIN | ✔ | +| LEFT INNER JOIN | ✔ | +| RIGHT OUTER JOIN | ✔ | +| RIGHT INNER JOIN | ✔ | +| FULL JOIN | ✔ | +| UNION | ✗ | +| Sub-SELECT | ✗ | +| TOP | ✔ | -| WHERE | | -| -------------------------- | - | -| Multi-Conditions | ✔ | -| Wildcards | ✔ | -| IN | ✔ | -| LIKE | ✔ | -| Bit-wise Operators AND, OR | ✔ | -| Bit-wise Operators NOT | ✔ | -| NULL | ✔ | -| BETWEEN | ✔ | -| EXISTS,ANY,ALL | ✔ | -| Compare columns | ✔ | -| Compare constants | ✔ | -| Date Functions\* | ✔ | -| Math Functions | ✔ | -| Sub-SELECT | ✗ | +| WHERE | | +| -------------------------- | --- | +| Multi-Conditions | ✔ | +| Wildcards | ✔ | +| IN | ✔ | +| LIKE | ✔ | +| Bit-wise Operators AND, OR | ✔ | +| Bit-wise Operators NOT | ✔ | +| NULL | ✔ | +| BETWEEN | ✔ | +| EXISTS,ANY,ALL | ✔ | +| Compare columns | ✔ | +| Compare constants | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | +| Sub-SELECT | ✗ | -| GROUP BY | | -| --------------------- | - | -| Multi-Column GROUP BY | ✔ | +| GROUP BY | | +| --------------------- | --- | +| Multi-Column GROUP BY | ✔ | -| HAVING | | -| ----------------------------- | - | -| Aggregate function conditions | ✔ | +| HAVING | | +| ----------------------------- | --- | +| Aggregate function conditions | ✔ | -| ORDER BY | | -| --------------------- | - | -| Multi-Column ORDER BY | ✔ | -| Aliases | ✔ | -| Date Functions\* | ✔ | -| Math Functions | ✔ | +| ORDER BY | | +| --------------------- | --- | +| Multi-Column ORDER BY | ✔ | +| Aliases | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | diff --git a/docs/developers/sql-guide/functions.md b/docs/developers/sql-guide/functions.md index 8123e063..e0a3ee2e 100644 --- a/docs/developers/sql-guide/functions.md +++ b/docs/developers/sql-guide/functions.md @@ -7,147 +7,149 @@ Harper encourages developers to utilize other querying tools over SQL for perfor This SQL keywords reference contains the SQL functions available in Harper. ## Functions + ### Aggregate -| Keyword | Syntax | Description | -|-----------------|---------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------| -| AVG | AVG(_expression_) | Returns the average of a given numeric expression. | -| COUNT | SELECT COUNT(_column_name_) FROM _database.table_ WHERE _condition_ | Returns the number records that match the given criteria. Nulls are not counted. | -| GROUP_CONCAT | GROUP_CONCAT(_expression_) | Returns a string with concatenated values that are comma separated and that are non-null from a group. Will return null when there are non-null values. | -| MAX | SELECT MAX(_column_name_) FROM _database.table_ WHERE _condition_ | Returns largest value in a specified column. | -| MIN | SELECT MIN(_column_name_) FROM _database.table_ WHERE _condition_ | Returns smallest value in a specified column. | -| SUM | SUM(_column_name_) | Returns the sum of the numeric values provided. | -| ARRAY* | ARRAY(_expression_) | Returns a list of data as a field. | -| DISTINCT_ARRAY* | DISTINCT_ARRAY(_expression_) | When placed around a standard ARRAY() function, returns a distinct (deduplicated) results set. | +| Keyword | Syntax | Description | +| ---------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- | +| AVG | AVG(_expression_) | Returns the average of a given numeric expression. | +| COUNT | SELECT COUNT(_column_name_) FROM _database.table_ WHERE _condition_ | Returns the number records that match the given criteria. Nulls are not counted. | +| GROUP_CONCAT | GROUP*CONCAT(\_expression*) | Returns a string with concatenated values that are comma separated and that are non-null from a group. Will return null when there are non-null values. | +| MAX | SELECT MAX(_column_name_) FROM _database.table_ WHERE _condition_ | Returns largest value in a specified column. | +| MIN | SELECT MIN(_column_name_) FROM _database.table_ WHERE _condition_ | Returns smallest value in a specified column. | +| SUM | SUM(_column_name_) | Returns the sum of the numeric values provided. | +| ARRAY\* | ARRAY(_expression_) | Returns a list of data as a field. | +| DISTINCT_ARRAY\* | DISTINCT*ARRAY(\_expression*) | When placed around a standard ARRAY() function, returns a distinct (deduplicated) results set. | -*For more information on ARRAY() and DISTINCT_ARRAY() see [this blog](https://www.harperdb.io/post/sql-queries-to-complex-objects). +\*For more information on ARRAY() and DISTINCT_ARRAY() see [this blog](https://www.harperdb.io/post/sql-queries-to-complex-objects). ### Conversion -| Keyword | Syntax | Description | -|---------|--------------------------------------------------|------------------------------------------------------------------------| -| CAST | CAST(_expression AS datatype(length)_) | Converts a value to a specified datatype. | -| CONVERT | CONVERT(_data_type(length), expression, style_) | Converts a value from one datatype to a different, specified datatype. | - +| Keyword | Syntax | Description | +| ------- | ----------------------------------------------- | ---------------------------------------------------------------------- | +| CAST | CAST(_expression AS datatype(length)_) | Converts a value to a specified datatype. | +| CONVERT | CONVERT(_data_type(length), expression, style_) | Converts a value from one datatype to a different, specified datatype. | ### Date & Time -| Keyword | Syntax | Description | -|-------------------|-----------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| CURRENT_DATE | CURRENT_DATE() | Returns the current date in UTC in “YYYY-MM-DD” String format. | -| CURRENT_TIME | CURRENT_TIME() | Returns the current time in UTC in “HH:mm:ss.SSS” string format. | -| CURRENT_TIMESTAMP | CURRENT_TIMESTAMP | Referencing this variable will evaluate as the current Unix Timestamp in milliseconds. For more information, go here. | +| Keyword | Syntax | Description | +| ----------------- | ----------------- | --------------------------------------------------------------------------------------------------------------------- | +| CURRENT_DATE | CURRENT_DATE() | Returns the current date in UTC in “YYYY-MM-DD” String format. | +| CURRENT_TIME | CURRENT_TIME() | Returns the current time in UTC in “HH:mm:ss.SSS” string format. | +| CURRENT_TIMESTAMP | CURRENT_TIMESTAMP | Referencing this variable will evaluate as the current Unix Timestamp in milliseconds. For more information, go here. | + | -| DATE | DATE([_date_string_]) | Formats and returns the date_string argument in UTC in ‘YYYY-MM-DDTHH:mm:ss.SSSZZ’ string format. If a date_string is not provided, the function will return the current UTC date/time value in the return format defined above. For more information, go here. | +| DATE | DATE([_date_string_]) | Formats and returns the date*string argument in UTC in ‘YYYY-MM-DDTHH:mm:ss.SSSZZ’ string format. If a date_string is not provided, the function will return the current UTC date/time value in the return format defined above. For more information, go here. | | -| DATE_ADD | DATE_ADD(_date, value, interval_) | Adds the defined amount of time to the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted interval values: Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | +| DATE_ADD | DATE_ADD(\_date, value, interval*) | Adds the defined amount of time to the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted interval values: Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | | -| DATE_DIFF | DATEDIFF(_date_1, date_2[, interval]_) | Returns the difference between the two date values passed based on the interval as a Number. If an interval is not provided, the function will return the difference value in milliseconds. For more information, go here. | +| DATE*DIFF | DATEDIFF(\_date_1, date_2[, interval]*) | Returns the difference between the two date values passed based on the interval as a Number. If an interval is not provided, the function will return the difference value in milliseconds. For more information, go here. | | -| DATE_FORMAT | DATE_FORMAT(_date, format_) | Formats and returns a date value in the String format provided. Find more details on accepted format values in the moment.js docs. For more information, go here. | +| DATE*FORMAT | DATE_FORMAT(\_date, format*) | Formats and returns a date value in the String format provided. Find more details on accepted format values in the moment.js docs. For more information, go here. | | -| DATE_SUB | DATE_SUB(_date, format_) | Subtracts the defined amount of time from the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted date_sub interval values- Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | +| DATE*SUB | DATE_SUB(\_date, format*) | Subtracts the defined amount of time from the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted date*sub interval values- Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | | -| DAY | DAY(_date_) | Return the day of the month for the given date. | +| DAY | DAY(\_date*) | Return the day of the month for the given date. | | -| DAYOFWEEK | DAYOFWEEK(_date_) | Returns the numeric value of the weekday of the date given(“YYYY-MM-DD”).NOTE: 0=Sunday, 1=Monday, 2=Tuesday, 3=Wednesday, 4=Thursday, 5=Friday, and 6=Saturday. | -| EXTRACT | EXTRACT(_date, date_part_) | Extracts and returns the date_part requested as a String value. Accepted date_part values below show value returned for date = “2020-03-26T15:13:02.041+000” For more information, go here. | +| DAYOFWEEK | DAYOFWEEK(_date_) | Returns the numeric value of the weekday of the date given(“YYYY-MM-DD”).NOTE: 0=Sunday, 1=Monday, 2=Tuesday, 3=Wednesday, 4=Thursday, 5=Friday, and 6=Saturday. | +| EXTRACT | EXTRACT(_date, date_part_) | Extracts and returns the date*part requested as a String value. Accepted date_part values below show value returned for date = “2020-03-26T15:13:02.041+000” For more information, go here. | | -| GETDATE | GETDATE() | Returns the current Unix Timestamp in milliseconds. | -| GET_SERVER_TIME | GET_SERVER_TIME() | Returns the current date/time value based on the server’s timezone in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. | -| OFFSET_UTC | OFFSET_UTC(_date, offset_) | Returns the UTC date time value with the offset provided included in the return String value formatted as `YYYY-MM-DDTHH:mm:ss.SSSZZ`. The offset argument will be added as minutes unless the value is less than 16 and greater than -16, in which case it will be treated as hours. | -| NOW | NOW() | Returns the current Unix Timestamp in milliseconds. | +| GETDATE | GETDATE() | Returns the current Unix Timestamp in milliseconds. | +| GET_SERVER_TIME | GET_SERVER_TIME() | Returns the current date/time value based on the server’s timezone in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. | +| OFFSET_UTC | OFFSET_UTC(\_date, offset*) | Returns the UTC date time value with the offset provided included in the return String value formatted as `YYYY-MM-DDTHH:mm:ss.SSSZZ`. The offset argument will be added as minutes unless the value is less than 16 and greater than -16, in which case it will be treated as hours. | +| NOW | NOW() | Returns the current Unix Timestamp in milliseconds. | | -| HOUR | HOUR(_datetime_) | Returns the hour part of a given date in range of 0 to 838. | +| HOUR | HOUR(_datetime_) | Returns the hour part of a given date in range of 0 to 838. | | -| MINUTE | MINUTE(_datetime_) | Returns the minute part of a time/datetime in range of 0 to 59. | +| MINUTE | MINUTE(_datetime_) | Returns the minute part of a time/datetime in range of 0 to 59. | | -| MONTH | MONTH(_date_) | Returns month part for a specified date in range of 1 to 12. | +| MONTH | MONTH(_date_) | Returns month part for a specified date in range of 1 to 12. | | -| SECOND | SECOND(_datetime_) | Returns the seconds part of a time/datetime in range of 0 to 59. | -| YEAR | YEAR(_date_) | Returns the year part for a specified date. | +| SECOND | SECOND(_datetime_) | Returns the seconds part of a time/datetime in range of 0 to 59. | +| YEAR | YEAR(_date_) | Returns the year part for a specified date. | | ### Logical -| Keyword | Syntax | Description | -|---------|--------------------------------------------------|--------------------------------------------------------------------------------------------| -| IF | IF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | -| IIF | IIF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | -| IFNULL | IFNULL(_expression, alt_value_) | Returns a specified value if the expression is null. | -| NULLIF | NULLIF(_expression_1, expression_2_) | Returns null if expression_1 is equal to expression_2, if not equal, returns expression_1. | +| Keyword | Syntax | Description | +| ------- | ----------------------------------------------- | ------------------------------------------------------------------------------------------ | +| IF | IF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | +| IIF | IIF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | +| IFNULL | IFNULL(_expression, alt_value_) | Returns a specified value if the expression is null. | +| NULLIF | NULLIF(_expression_1, expression_2_) | Returns null if expression_1 is equal to expression_2, if not equal, returns expression_1. | ### Mathematical -| Keyword | Syntax | Description | -|---------|---------------------------------|-----------------------------------------------------------------------------------------------------| -| ABS | ABS(_expression_) | Returns the absolute value of a given numeric expression. | -| CEIL | CEIL(_number_) | Returns integer ceiling, the smallest integer value that is bigger than or equal to a given number. | -| EXP | EXP(_number_) | Returns e to the power of a specified number. | -| FLOOR | FLOOR(_number_) | Returns the largest integer value that is smaller than, or equal to, a given number. | -| RANDOM | RANDOM(_seed_) | Returns a pseudo random number. | -| ROUND | ROUND(_number,decimal_places_) | Rounds a given number to a specified number of decimal places. | -| SQRT | SQRT(_expression_) | Returns the square root of an expression. | - +| Keyword | Syntax | Description | +| ------- | ------------------------------ | --------------------------------------------------------------------------------------------------- | +| ABS | ABS(_expression_) | Returns the absolute value of a given numeric expression. | +| CEIL | CEIL(_number_) | Returns integer ceiling, the smallest integer value that is bigger than or equal to a given number. | +| EXP | EXP(_number_) | Returns e to the power of a specified number. | +| FLOOR | FLOOR(_number_) | Returns the largest integer value that is smaller than, or equal to, a given number. | +| RANDOM | RANDOM(_seed_) | Returns a pseudo random number. | +| ROUND | ROUND(_number,decimal_places_) | Rounds a given number to a specified number of decimal places. | +| SQRT | SQRT(_expression_) | Returns the square root of an expression. | ### String -| Keyword | Syntax | Description | -|-------------|------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| CONCAT | CONCAT(_string_1, string_2, ...., string_n_) | Concatenates, or joins, two or more strings together, resulting in a single string. | -| CONCAT_WS | CONCAT_WS(_separator, string_1, string_2, ...., string_n_) | Concatenates, or joins, two or more strings together with a separator, resulting in a single string. | -| INSTR | INSTR(_string_1, string_2_) | Returns the first position, as an integer, of string_2 within string_1. | -| LEN | LEN(_string_) | Returns the length of a string. | -| LOWER | LOWER(_string_) | Converts a string to lower-case. | -| REGEXP | SELECT _column_name_ FROM _database.table_ WHERE _column_name_ REGEXP _pattern_ | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | -| REGEXP_LIKE | SELECT _column_name_ FROM _database.table_ WHERE REGEXP_LIKE(_column_name, pattern_) | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | -| REPLACE | REPLACE(_string, old_string, new_string_) | Replaces all instances of old_string within new_string, with string. | -| SUBSTRING | SUBSTRING(_string, string_position, length_of_substring_) | Extracts a specified amount of characters from a string. | -| TRIM | TRIM([_character(s) FROM_] _string_) | Removes leading and trailing spaces, or specified character(s), from a string. | -| UPPER | UPPER(_string_) | Converts a string to upper-case. | +| Keyword | Syntax | Description | +| ----------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| CONCAT | CONCAT(_string_1, string_2, ...., string_n_) | Concatenates, or joins, two or more strings together, resulting in a single string. | +| CONCAT_WS | CONCAT*WS(\_separator, string_1, string_2, ...., string_n*) | Concatenates, or joins, two or more strings together with a separator, resulting in a single string. | +| INSTR | INSTR(_string_1, string_2_) | Returns the first position, as an integer, of string_2 within string_1. | +| LEN | LEN(_string_) | Returns the length of a string. | +| LOWER | LOWER(_string_) | Converts a string to lower-case. | +| REGEXP | SELECT _column_name_ FROM _database.table_ WHERE _column_name_ REGEXP _pattern_ | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | +| REGEXP_LIKE | SELECT _column_name_ FROM _database.table_ WHERE REGEXP*LIKE(\_column_name, pattern*) | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | +| REPLACE | REPLACE(_string, old_string, new_string_) | Replaces all instances of old_string within new_string, with string. | +| SUBSTRING | SUBSTRING(_string, string_position, length_of_substring_) | Extracts a specified amount of characters from a string. | +| TRIM | TRIM([_character(s) FROM_] _string_) | Removes leading and trailing spaces, or specified character(s), from a string. | +| UPPER | UPPER(_string_) | Converts a string to upper-case. | ## Operators + ### Logical Operators -| Keyword | Syntax | Description | -|----------|--------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------| -| BETWEEN | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ BETWEEN _value_1_ AND _value_2_ | (inclusive) Returns values(numbers, text, or dates) within a given range. | -| IN | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IN(_value(s)_) | Used to specify multiple values in a WHERE clause. | -| LIKE | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_n_ LIKE _pattern_ | Searches for a specified pattern within a WHERE clause. | +| Keyword | Syntax | Description | +| ------- | ------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- | +| BETWEEN | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ BETWEEN _value_1_ AND _value_2_ | (inclusive) Returns values(numbers, text, or dates) within a given range. | +| IN | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IN(_value(s)_) | Used to specify multiple values in a WHERE clause. | +| LIKE | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_n_ LIKE _pattern_ | Searches for a specified pattern within a WHERE clause. | ## Queries + ### General -| Keyword | Syntax | Description | -|-----------|--------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------| -| DISTINCT | SELECT DISTINCT _column_name(s)_ FROM _database.table_ | Returns only unique values, eliminating duplicate records. | -| FROM | FROM _database.table_ | Used to list the database(s), table(s), and any joins required for a SQL statement. | -| GROUP BY | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ GROUP BY _column_name(s)_ ORDER BY _column_name(s)_ | Groups rows that have the same values into summary rows. | -| HAVING | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ GROUP BY _column_name(s)_ HAVING _condition_ ORDER BY _column_name(s)_ | Filters data based on a group or aggregate function. | -| SELECT | SELECT _column_name(s)_ FROM _database.table_ | Selects data from table. | -| WHERE | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ | Extracts records based on a defined condition. | +| Keyword | Syntax | Description | +| -------- | -------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| DISTINCT | SELECT DISTINCT _column_name(s)_ FROM _database.table_ | Returns only unique values, eliminating duplicate records. | +| FROM | FROM _database.table_ | Used to list the database(s), table(s), and any joins required for a SQL statement. | +| GROUP BY | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ GROUP BY _column_name(s)_ ORDER BY _column_name(s)_ | Groups rows that have the same values into summary rows. | +| HAVING | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ GROUP BY _column_name(s)_ HAVING _condition_ ORDER BY _column_name(s)_ | Filters data based on a group or aggregate function. | +| SELECT | SELECT _column_name(s)_ FROM _database.table_ | Selects data from table. | +| WHERE | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ | Extracts records based on a defined condition. | ### Joins -| Keyword | Syntax | Description | -|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| CROSS JOIN | SELECT _column_name(s)_ FROM _database.table_1_ CROSS JOIN _database.table_2_ | Returns a paired combination of each row from _table_1_ with row from _table_2_. _Note: CROSS JOIN can return very large result sets and is generally considered bad practice._ | -| FULL OUTER | SELECT _column_name(s)_ FROM _database.table_1_ FULL OUTER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ WHERE _condition_ | Returns all records when there is a match in either _table_1_ (left table) or _table_2_ (right table). | -| [INNER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ INNER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return only matching records from _table_1_ (left table) and _table_2_ (right table). The INNER keyword is optional and does not affect the result. | -| LEFT [OUTER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ LEFT OUTER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return all records from _table_1_ (left table) and matching data from _table_2_ (right table). The OUTER keyword is optional and does not affect the result. | -| RIGHT [OUTER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ RIGHT OUTER JOIN _database.table_2_ ON _table_1.column_name = table_2.column_name_ | Return all records from _table_2_ (right table) and matching data from _table_1_ (left table). The OUTER keyword is optional and does not affect the result. | +| Keyword | Syntax | Description | +| ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| CROSS JOIN | SELECT _column_name(s)_ FROM _database.table_1_ CROSS JOIN _database.table_2_ | Returns a paired combination of each row from _table_1_ with row from _table_2_. _Note: CROSS JOIN can return very large result sets and is generally considered bad practice._ | +| FULL OUTER | SELECT _column_name(s)_ FROM _database.table_1_ FULL OUTER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ WHERE _condition_ | Returns all records when there is a match in either _table_1_ (left table) or _table_2_ (right table). | +| [INNER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ INNER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return only matching records from _table_1_ (left table) and _table_2_ (right table). The INNER keyword is optional and does not affect the result. | +| LEFT [OUTER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ LEFT OUTER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return all records from _table_1_ (left table) and matching data from _table_2_ (right table). The OUTER keyword is optional and does not affect the result. | +| RIGHT [OUTER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ RIGHT OUTER JOIN _database.table_2_ ON _table_1.column_name = table_2.column_name_ | Return all records from _table_2_ (right table) and matching data from _table_1_ (left table). The OUTER keyword is optional and does not affect the result. | ### Predicates -| Keyword | Syntax | Description | -|--------------|------------------------------------------------------------------------------|----------------------------| -| IS NOT NULL | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IS NOT NULL | Tests for non-null values. | -| IS NULL | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IS NULL | Tests for null values. | +| Keyword | Syntax | Description | +| ----------- | ----------------------------------------------------------------------------- | -------------------------- | +| IS NOT NULL | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IS NOT NULL | Tests for non-null values. | +| IS NULL | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IS NULL | Tests for null values. | ### Statements -| Keyword | Syntax | Description | -|---------|---------------------------------------------------------------------------------------------|-------------------------------------| +| Keyword | Syntax | Description | +| ------- | --------------------------------------------------------------------------------------------- | ----------------------------------- | | DELETE | DELETE FROM _database.table_ WHERE condition | Deletes existing data from a table. | | INSERT | INSERT INTO _database.table(column_name(s))_ VALUES(_value(s)_) | Inserts new records into a table. | | UPDATE | UPDATE _database.table_ SET _column_1 = value_1, column_2 = value_2, ....,_ WHERE _condition_ | Alters existing records in a table. | diff --git a/docs/developers/sql-guide/json-search.md b/docs/developers/sql-guide/json-search.md index cc68b8ec..f6cb15a3 100644 --- a/docs/developers/sql-guide/json-search.md +++ b/docs/developers/sql-guide/json-search.md @@ -4,11 +4,11 @@ Harper encourages developers to utilize other querying tools over SQL for perfor # SQL JSON Search -Harper automatically indexes all top level attributes in a row / object written to a table. However, any attributes which hold JSON data do not have their nested attributes indexed. In order to make searching and/or transforming these JSON documents easy, Harper offers a special SQL function called SEARCH\_JSON. The SEARCH\_JSON function works in SELECT & WHERE clauses allowing queries to perform powerful filtering on any element of your JSON by implementing the [JSONata library](http://docs.jsonata.org/overview.html) into our SQL engine. +Harper automatically indexes all top level attributes in a row / object written to a table. However, any attributes which hold JSON data do not have their nested attributes indexed. In order to make searching and/or transforming these JSON documents easy, Harper offers a special SQL function called SEARCH_JSON. The SEARCH_JSON function works in SELECT & WHERE clauses allowing queries to perform powerful filtering on any element of your JSON by implementing the [JSONata library](http://docs.jsonata.org/overview.html) into our SQL engine. ## Syntax -SEARCH\_JSON(_expression, attribute_) +SEARCH_JSON(_expression, attribute_) Executes the supplied string _expression_ against data of the defined top level _attribute_ for each row. The expression both filters and defines output from the JSON document. @@ -20,14 +20,14 @@ Here are two records in the database: ```json [ - { - "id": 1, - "name": ["Harper", "Penny"] - }, - { - "id": 2, - "name": ["Penny"] - } + { + "id": 1, + "name": ["Harper", "Penny"] + }, + { + "id": 2, + "name": ["Penny"] + } ] ``` @@ -60,37 +60,37 @@ A sample of this data from the movie The Avengers looks like ```json [ - { - "cast_id": 46, - "character": "Tony Stark / Iron Man", - "credit_id": "52fe4495c3a368484e02b251", - "gender": "male", - "id": 3223, - "name": "Robert Downey Jr.", - "order": 0 - }, - { - "cast_id": 2, - "character": "Steve Rogers / Captain America", - "credit_id": "52fe4495c3a368484e02b19b", - "gender": "male", - "id": 16828, - "name": "Chris Evans", - "order": 1 - }, - { - "cast_id": 307, - "character": "Bruce Banner / The Hulk", - "credit_id": "5e85e8083344c60015411cfa", - "gender": "male", - "id": 103, - "name": "Mark Ruffalo", - "order": 2 - } + { + "cast_id": 46, + "character": "Tony Stark / Iron Man", + "credit_id": "52fe4495c3a368484e02b251", + "gender": "male", + "id": 3223, + "name": "Robert Downey Jr.", + "order": 0 + }, + { + "cast_id": 2, + "character": "Steve Rogers / Captain America", + "credit_id": "52fe4495c3a368484e02b19b", + "gender": "male", + "id": 16828, + "name": "Chris Evans", + "order": 1 + }, + { + "cast_id": 307, + "character": "Bruce Banner / The Hulk", + "credit_id": "5e85e8083344c60015411cfa", + "gender": "male", + "id": 103, + "name": "Mark Ruffalo", + "order": 2 + } ] ``` -Let’s break down the SEARCH\_JSON function call in the SELECT: +Let’s break down the SEARCH_JSON function call in the SELECT: ``` SEARCH_JSON( @@ -113,7 +113,7 @@ SEARCH_JSON( ) ``` -The first argument passed to SEARCH\_JSON is the expression to execute against the second argument which is the cast attribute on the credits table. This expression will execute for every row. Looking into the expression it starts with “$\[…]” this tells the expression to iterate all elements of the cast array. +The first argument passed to SEARCH_JSON is the expression to execute against the second argument which is the cast attribute on the credits table. This expression will execute for every row. Looking into the expression it starts with “$\[…]” this tells the expression to iterate all elements of the cast array. Then the expression tells the function to only return entries where the name attribute matches any of the actors defined in the array: @@ -127,22 +127,22 @@ So far, we’ve iterated the array and filtered out rows, but we also want the r ```json [ - { - "actor": "Robert Downey Jr.", - "character": "Tony Stark / Iron Man" - }, - { - "actor": "Chris Evans", - "character": "Steve Rogers / Captain America" - }, - { - "actor": "Mark Ruffalo", - "character": "Bruce Banner / The Hulk" - } + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + } ] ``` -Just having the SEARCH\_JSON function in our SELECT is powerful, but given our criteria it would still return every other movie that doesn’t have our matching actors, in order to filter out the movies we do not want we also use SEARCH\_JSON in the WHERE clause. +Just having the SEARCH_JSON function in our SELECT is powerful, but given our criteria it would still return every other movie that doesn’t have our matching actors, in order to filter out the movies we do not want we also use SEARCH_JSON in the WHERE clause. This function call in the WHERE clause is similar, but we don’t need to perform the same transformation as occurred in the SELECT: @@ -168,6 +168,6 @@ SEARCH_JSON( As seen above we execute the same name filter against the cast array, the primary difference is we are wrapping the filtered results in $count(…). As it looks this returns a count of the results back which we then use against our SQL comparator of >= 2. -To see further SEARCH\_JSON examples in action view our Postman Collection that provides a [sample database & data with query examples](../operations-api/advanced-json-sql-examples.md). +To see further SEARCH_JSON examples in action view our Postman Collection that provides a [sample database & data with query examples](../operations-api/advanced-json-sql-examples.md). To learn more about how to build expressions check out the JSONata documentation: [http://docs.jsonata.org/overview](http://docs.jsonata.org/overview) diff --git a/docs/developers/sql-guide/reserved-word.md b/docs/developers/sql-guide/reserved-word.md index 10fef2d5..d285dc3e 100644 --- a/docs/developers/sql-guide/reserved-word.md +++ b/docs/developers/sql-guide/reserved-word.md @@ -4,7 +4,7 @@ Harper encourages developers to utilize other querying tools over SQL for perfor # Harper SQL Reserved Words -This is a list of reserved words in the SQL Parser. Use of these words or symbols may result in unexpected behavior or inaccessible tables/attributes. If any of these words must be used, any SQL call referencing a database, table, or attribute must have backticks (`…`) or brackets ([…]) around the variable. +This is a list of reserved words in the SQL Parser. Use of these words or symbols may result in unexpected behavior or inaccessible tables/attributes. If any of these words must be used, any SQL call referencing a database, table, or attribute must have backticks (`…`) or brackets ([…]) around the variable. For Example, for a table called `ASSERT` in the `data` database, a SQL select on that table would look like: @@ -20,184 +20,184 @@ SELECT * from data.[ASSERT] ### RESERVED WORD LIST -* ABSOLUTE -* ACTION -* ADD -* AGGR -* ALL -* ALTER -* AND -* ANTI -* ANY -* APPLY -* ARRAY -* AS -* ASSERT -* ASC -* ATTACH -* AUTOINCREMENT -* AUTO_INCREMENT -* AVG -* BEGIN -* BETWEEN -* BREAK -* BY -* CALL -* CASE -* CAST -* CHECK -* CLASS -* CLOSE -* COLLATE -* COLUMN -* COLUMNS -* COMMIT -* CONSTRAINT -* CONTENT -* CONTINUE -* CONVERT -* CORRESPONDING -* COUNT -* CREATE -* CROSS -* CUBE -* CURRENT_TIMESTAMP -* CURSOR -* DATABASE -* DECLARE -* DEFAULT -* DELETE -* DELETED -* DESC -* DETACH -* DISTINCT -* DOUBLEPRECISION -* DROP -* ECHO -* EDGE -* END -* ENUM -* ELSE -* EXCEPT -* EXISTS -* EXPLAIN -* FALSE -* FETCH -* FIRST -* FOREIGN -* FROM -* GO -* GRAPH -* GROUP -* GROUPING -* HAVING -* HDB_HASH -* HELP -* IF -* IDENTITY -* IS -* IN -* INDEX -* INNER -* INSERT -* INSERTED -* INTERSECT -* INTO -* JOIN -* KEY -* LAST -* LET -* LEFT -* LIKE -* LIMIT -* LOOP -* MATCHED -* MATRIX -* MAX -* MERGE -* MIN -* MINUS -* MODIFY -* NATURAL -* NEXT -* NEW -* NOCASE -* NO -* NOT -* NULL -* OFF -* ON -* ONLY -* OFFSET -* OPEN -* OPTION -* OR -* ORDER -* OUTER -* OVER -* PATH -* PARTITION -* PERCENT -* PLAN -* PRIMARY -* PRINT -* PRIOR -* QUERY -* READ -* RECORDSET -* REDUCE -* REFERENCES -* RELATIVE -* REPLACE -* REMOVE -* RENAME -* REQUIRE -* RESTORE -* RETURN -* RETURNS -* RIGHT -* ROLLBACK -* ROLLUP -* ROW -* SCHEMA -* SCHEMAS -* SEARCH -* SELECT -* SEMI -* SET -* SETS -* SHOW -* SOME -* SOURCE -* STRATEGY -* STORE -* SYSTEM -* SUM -* TABLE -* TABLES -* TARGET -* TEMP -* TEMPORARY -* TEXTSTRING -* THEN -* TIMEOUT -* TO -* TOP -* TRAN -* TRANSACTION -* TRIGGER -* TRUE -* TRUNCATE -* UNION -* UNIQUE -* UPDATE -* USE -* USING -* VALUE -* VERTEX -* VIEW -* WHEN -* WHERE -* WHILE -* WITH -* WORK +- ABSOLUTE +- ACTION +- ADD +- AGGR +- ALL +- ALTER +- AND +- ANTI +- ANY +- APPLY +- ARRAY +- AS +- ASSERT +- ASC +- ATTACH +- AUTOINCREMENT +- AUTO_INCREMENT +- AVG +- BEGIN +- BETWEEN +- BREAK +- BY +- CALL +- CASE +- CAST +- CHECK +- CLASS +- CLOSE +- COLLATE +- COLUMN +- COLUMNS +- COMMIT +- CONSTRAINT +- CONTENT +- CONTINUE +- CONVERT +- CORRESPONDING +- COUNT +- CREATE +- CROSS +- CUBE +- CURRENT_TIMESTAMP +- CURSOR +- DATABASE +- DECLARE +- DEFAULT +- DELETE +- DELETED +- DESC +- DETACH +- DISTINCT +- DOUBLEPRECISION +- DROP +- ECHO +- EDGE +- END +- ENUM +- ELSE +- EXCEPT +- EXISTS +- EXPLAIN +- FALSE +- FETCH +- FIRST +- FOREIGN +- FROM +- GO +- GRAPH +- GROUP +- GROUPING +- HAVING +- HDB_HASH +- HELP +- IF +- IDENTITY +- IS +- IN +- INDEX +- INNER +- INSERT +- INSERTED +- INTERSECT +- INTO +- JOIN +- KEY +- LAST +- LET +- LEFT +- LIKE +- LIMIT +- LOOP +- MATCHED +- MATRIX +- MAX +- MERGE +- MIN +- MINUS +- MODIFY +- NATURAL +- NEXT +- NEW +- NOCASE +- NO +- NOT +- NULL +- OFF +- ON +- ONLY +- OFFSET +- OPEN +- OPTION +- OR +- ORDER +- OUTER +- OVER +- PATH +- PARTITION +- PERCENT +- PLAN +- PRIMARY +- PRINT +- PRIOR +- QUERY +- READ +- RECORDSET +- REDUCE +- REFERENCES +- RELATIVE +- REPLACE +- REMOVE +- RENAME +- REQUIRE +- RESTORE +- RETURN +- RETURNS +- RIGHT +- ROLLBACK +- ROLLUP +- ROW +- SCHEMA +- SCHEMAS +- SEARCH +- SELECT +- SEMI +- SET +- SETS +- SHOW +- SOME +- SOURCE +- STRATEGY +- STORE +- SYSTEM +- SUM +- TABLE +- TABLES +- TARGET +- TEMP +- TEMPORARY +- TEXTSTRING +- THEN +- TIMEOUT +- TO +- TOP +- TRAN +- TRANSACTION +- TRIGGER +- TRUE +- TRUNCATE +- UNION +- UNIQUE +- UPDATE +- USE +- USING +- VALUE +- VERTEX +- VIEW +- WHEN +- WHERE +- WHILE +- WITH +- WORK diff --git a/docs/developers/sql-guide/sql-geospatial-functions.md b/docs/developers/sql-guide/sql-geospatial-functions.md index ad9ddf8e..d45ef30a 100644 --- a/docs/developers/sql-guide/sql-geospatial-functions.md +++ b/docs/developers/sql-guide/sql-geospatial-functions.md @@ -6,16 +6,11 @@ Harper encourages developers to utilize other querying tools over SQL for perfor Harper geospatial features require data to be stored in a single column using the [GeoJSON standard](http://geojson.org/), a standard commonly used in geospatial technologies. Geospatial functions are available to be used in SQL statements. - - If you are new to GeoJSON you should check out the full specification here: http://geojson.org/. There are a few important things to point out before getting started. - - -1) All GeoJSON coordinates are stored in `[longitude, latitude]` format. -2) Coordinates or GeoJSON geometries must be passed as string when written directly in a SQL statement. -3) Note if you are using Postman for you testing. Due to limitations in the Postman client, you will need to escape quotes in your strings and your SQL will need to be passed on a single line. - +1. All GeoJSON coordinates are stored in `[longitude, latitude]` format. +2. Coordinates or GeoJSON geometries must be passed as string when written directly in a SQL statement. +3. Note if you are using Postman for you testing. Due to limitations in the Postman client, you will need to escape quotes in your strings and your SQL will need to be passed on a single line. In the examples contained in the left-hand navigation, database and table names may change, but all GeoJSON data will be stored in a column named geo_data. @@ -24,14 +19,17 @@ In the examples contained in the left-hand navigation, database and table names The geoArea() function returns the area of one or more features in square meters. ### Syntax + geoArea(_geoJSON_) ### Parameters + | Parameter | Description | -|-----------|---------------------------------| +| --------- | ------------------------------- | | geoJSON | Required. One or more features. | #### Example 1 + Calculate the area, in square meters, of a manually passed GeoJSON polygon. ``` @@ -50,6 +48,7 @@ SELECT geoArea('{ ``` #### Example 2 + Find all records that have an area less than 1 square mile (or 2589988 square meters). ``` @@ -58,18 +57,22 @@ WHERE geoArea(geo_data) < 2589988 ``` # geoLength + Takes a GeoJSON and measures its length in the specified units (default is kilometers). ## Syntax + geoLength(_geoJSON_[_, units_]) ## Parameters -| Parameter | Description | -|------------|-----------------------------------------------------------------------------------------------------------------------| -| geoJSON | Required. GeoJSON to measure. | -| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +| Parameter | Description | +| --------- | --------------------------------------------------------------------------------------------------------------------- | +| geoJSON | Required. GeoJSON to measure. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | ### Example 1 + Calculate the length, in kilometers, of a manually passed GeoJSON linestring. ``` @@ -87,6 +90,7 @@ SELECT geoLength('{ ``` ### Example 2 + Find all data plus the calculated length in miles of the GeoJSON, restrict the response to only lengths less than 5 miles, and return the data in order of lengths smallest to largest. ``` @@ -95,19 +99,24 @@ FROM dev.locations WHERE geoLength(geo_data, 'miles') < 5 ORDER BY length ASC ``` + # geoDifference + Returns a new polygon with the difference of the second polygon clipped from the first polygon. ## Syntax + geoDifference(_polygon1, polygon2_) ## Parameters -| Parameter | Description | -|------------|----------------------------------------------------------------------------| -| polygon1 | Required. Polygon or MultiPolygon GeoJSON feature. | -| polygon2 | Required. Polygon or MultiPolygon GeoJSON feature to remove from polygon1. | + +| Parameter | Description | +| --------- | -------------------------------------------------------------------------- | +| polygon1 | Required. Polygon or MultiPolygon GeoJSON feature. | +| polygon2 | Required. Polygon or MultiPolygon GeoJSON feature to remove from polygon1. | ### Example + Return a GeoJSON Polygon that removes City Park (_polygon2_) from Colorado (_polygon1_). ``` @@ -149,19 +158,23 @@ SELECT geoDifference('{ ``` # geoDistance + Calculates the distance between two points in units (default is kilometers). ## Syntax + geoDistance(_point1, point2_[_, units_]) ## Parameters -| Parameter | Description | -|------------|-----------------------------------------------------------------------------------------------------------------------| -| point1 | Required. GeoJSON Point specifying the origin. | -| point2 | Required. GeoJSON Point specifying the destination. | -| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +| Parameter | Description | +| --------- | --------------------------------------------------------------------------------------------------------------------- | +| point1 | Required. GeoJSON Point specifying the origin. | +| point2 | Required. GeoJSON Point specifying the destination. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | ### Example 1 + Calculate the distance, in miles, between Harper’s headquarters and the Washington Monument. ``` @@ -169,6 +182,7 @@ SELECT geoDistance('[-104.979127,39.761563]', '[-77.035248,38.889475]', 'miles') ``` ### Example 2 + Find all locations that are within 40 kilometers of a given point, return that distance in miles, and sort by distance in an ascending order. ``` @@ -179,20 +193,24 @@ ORDER BY distance ASC ``` # geoNear + Determines if point1 and point2 are within a specified distance from each other, default units are kilometers. Returns a Boolean. ## Syntax + geoNear(_point1, point2, distance_[_, units_]) ## Parameters -| Parameter | Description | -|------------|-----------------------------------------------------------------------------------------------------------------------| -| point1 | Required. GeoJSON Point specifying the origin. | -| point2 | Required. GeoJSON Point specifying the destination. | -| distance | Required. The maximum distance in units as an integer or decimal. | -| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +| Parameter | Description | +| --------- | --------------------------------------------------------------------------------------------------------------------- | +| point1 | Required. GeoJSON Point specifying the origin. | +| point2 | Required. GeoJSON Point specifying the destination. | +| distance | Required. The maximum distance in units as an integer or decimal. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | ### Example 1 + Return all locations within 50 miles of a given point. ``` @@ -202,6 +220,7 @@ WHERE geoNear('[-104.979127,39.761563]', geo_data, 50, 'miles') ``` ### Example 2 + Return all locations within 2 degrees of the earth of a given point. (Each degree lat/long is about 69 miles [111 kilometers]). Return all data and the distance in miles, sorted by ascending distance. ``` @@ -212,18 +231,22 @@ ORDER BY distance ASC ``` # geoContains + Determines if geo2 is completely contained by geo1. Returns a Boolean. ## Syntax + geoContains(_geo1, geo2_) ## Parameters -| Parameter | Description | -|------------|-----------------------------------------------------------------------------------| -| geo1 | Required. Polygon or MultiPolygon GeoJSON feature. | -| geo2 | Required. Polygon or MultiPolygon GeoJSON feature tested to be contained by geo1. | + +| Parameter | Description | +| --------- | --------------------------------------------------------------------------------- | +| geo1 | Required. Polygon or MultiPolygon GeoJSON feature. | +| geo2 | Required. Polygon or MultiPolygon GeoJSON feature tested to be contained by geo1. | ### Example 1 + Return all locations within the state of Colorado (passed as a GeoJSON string). ``` @@ -248,6 +271,7 @@ WHERE geoContains('{ ``` ### Example 2 + Return all locations which contain Harper Headquarters. ``` @@ -274,18 +298,22 @@ WHERE geoContains(geo_data, '{ ``` # geoEqual + Determines if two GeoJSON features are the same type and have identical X,Y coordinate values. For more information see https://developers.arcgis.com/documentation/spatial-references/. Returns a Boolean. ## Syntax + geoEqual(_geo1_, _geo2_) ## Parameters -| Parameter | Description | -|------------|----------------------------------------| -| geo1 | Required. GeoJSON geometry or feature. | -| geo2 | Required. GeoJSON geometry or feature. | + +| Parameter | Description | +| --------- | -------------------------------------- | +| geo1 | Required. GeoJSON geometry or feature. | +| geo2 | Required. GeoJSON geometry or feature. | ### Example + Find Harper Headquarters within all locations within the database. ``` @@ -312,18 +340,22 @@ WHERE geoEqual(geo_data, '{ ``` # geoCrosses + Determines if the geometries cross over each other. Returns boolean. ## Syntax + geoCrosses(_geo1, geo2_) ## Parameters -| Parameter | Description | -|------------|-----------------------------------------| -| geo1 | Required. GeoJSON geometry or feature. | -| geo2 | Required. GeoJSON geometry or feature. | + +| Parameter | Description | +| --------- | -------------------------------------- | +| geo1 | Required. GeoJSON geometry or feature. | +| geo2 | Required. GeoJSON geometry or feature. | ### Example + Find all locations that cross over a highway. ``` @@ -357,16 +389,19 @@ WHERE geoCrosses( Converts a series of coordinates into a GeoJSON of the specified type. ## Syntax + geoConvert(_coordinates, geo_type_[, _properties_]) ## Parameters -| Parameter | Description | -|--------------|------------------------------------------------------------------------------------------------------------------------------------| -| coordinates | Required. One or more coordinates | -| geo_type | Required. GeoJSON geometry type. Options are ‘point’, ‘lineString’, ‘multiLineString’, ‘multiPoint’, ‘multiPolygon’, and ‘polygon’ | -| properties | Optional. Escaped JSON array with properties to be added to the GeoJSON output. | + +| Parameter | Description | +| ----------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| coordinates | Required. One or more coordinates | +| geo_type | Required. GeoJSON geometry type. Options are ‘point’, ‘lineString’, ‘multiLineString’, ‘multiPoint’, ‘multiPolygon’, and ‘polygon’ | +| properties | Optional. Escaped JSON array with properties to be added to the GeoJSON output. | ### Example + Convert a given coordinate into a GeoJSON point with specified properties. ``` diff --git a/docs/getting-started/README.md b/docs/getting-started/README.md index 829e55ca..4273fdec 100644 --- a/docs/getting-started/README.md +++ b/docs/getting-started/README.md @@ -7,6 +7,7 @@ Follow the steps in this documentation to discover how Harper can simplify your For more advanced concepts in Harper, see our [blog](https://www.harpersystems.dev/blog). ## Harper Basics + @@ -38,4 +39,4 @@ For more advanced concepts in Harper, see our [blog](https://www.harpersystems.d -
harper-concepts
\ No newline at end of file + diff --git a/docs/getting-started/first-harper-app.md b/docs/getting-started/first-harper-app.md index 7b5f4827..c7859eef 100644 --- a/docs/getting-started/first-harper-app.md +++ b/docs/getting-started/first-harper-app.md @@ -1,7 +1,9 @@ # Create Your First Application + Now that you've set up Harper, let's build a simple API. Harper lets you build powerful APIs with minimal effort. In just a few minutes, you'll have a functional REST API with automatic validation, indexing, and querying—all without writing a single line of code. ## Setup Your Project + Start by cloning the Harper application template: ```bash @@ -10,6 +12,7 @@ cd my-app ``` ## Creating our first Table + The core of a Harper application is the database, so let's create a database table. A quick and expressive way to define a table is through a [GraphQL Schema](https://graphql.org/learn/schema). Using your editor of choice, edit the file named `schema.graphql` in the root of the application directory, `my-app`, that we created above. To create a table, we will need to add a `type` of `@table` named `Dog` (and you can remove the example table in the template): @@ -35,10 +38,13 @@ Now we tell Harper to run this as an application: ```bash harperdb dev . # tell Harper cli to run current directory as an application in dev mode ``` + Harper will now create the `Dog` table and its `id` attribute we just defined. Not only is this an easy way to create a table, but this schema is included in our application, which will ensure that this table exists wherever we deploy this application (to any Harper instance). ## Adding Attributes to our Table + Next, let's expand our `Dog` table by adding additional typed attributes for dog `name`, `breed` and `age`. + ```graphql type Dog @table { id: ID @primaryKey @@ -65,6 +71,7 @@ type Dog @table @sealed { ``` ## Adding an Endpoint + Now that we have a running application with a database (with data if you imported any data), let's make this data accessible from a RESTful URL by adding an endpoint. To do this, we simply add the `@export` directive to our `Dog` table: ```graphql @@ -94,6 +101,7 @@ Content-Type: application/json With this a record will be created and the auto-assigned id will be available through the `Location` header. If you added a record, you can visit the path `/Dog/` to view that record. Alternately, the curl command curl `http://localhost:9926/Dog/` will achieve the same thing. ## Authenticating Endpoints + Now that you've created your first API endpoints, it's important to ensure they're protected. Without authentication, anyone could potentially access, misuse, or overload your APIs, whether by accident or malicious intent. Authentication verifies who is making the request and enables you to control access based on identity, roles, or permissions. It’s a foundational step in building secure, reliable applications. Endpoints created with Harper automatically support `Basic`, `Cookie`, and `JWT` authentication methods. See the documentation on [security](../developers/security/) for more information on different levels of access. @@ -101,6 +109,7 @@ Endpoints created with Harper automatically support `Basic`, `Cookie`, and `JWT` By default, Harper also automatically authorizes all requests from loopback IP addresses (from the same computer) as the superuser, to make it simple to interact for local development. If you want to test authentication/authorization, or enforce stricter security, you may want to disable the [`authentication.authorizeLocal` setting](../deployments/configuration.md#authentication). ### Content Negotiation + These endpoints support various content types, including `JSON`, `CBOR`, `MessagePack` and `CSV`. Simply include an `Accept` header in your requests with the preferred content type. We recommend `CBOR` as a compact, efficient encoding with rich data types, but `JSON` is familiar and great for web application development, and `CSV` can be useful for exporting data to spreadsheets or other processing. Harper works with other important standard HTTP headers as well, and these endpoints are even capable of caching interaction: @@ -119,12 +128,12 @@ In order to maintain reasonable query speed on a database as it grows in size, i ```graphql type Dog @table { - id: ID @primaryKey - name: String @indexed - breed: String @indexed - owner: String - age: Int - tricks: [String] + id: ID @primaryKey + name: String @indexed + breed: String @indexed + owner: String + age: Int + tricks: [String] } ``` @@ -154,4 +163,5 @@ Congratulations, you now have created a secure database application backend with > Additionally, you may now use GraphQL (over HTTP) to create queries. See the documentation for that new feature [here](../technical-details/reference/graphql.md). ## Key Takeaway -Harper's schema-driven approach means you can build production-ready APIs in minutes, not hours. Start with pure schema definitions to get 90% of your functionality, then add custom code only where needed. This gives you the best of both worlds: rapid development with the flexibility to customize when required. \ No newline at end of file + +Harper's schema-driven approach means you can build production-ready APIs in minutes, not hours. Start with pure schema definitions to get 90% of your functionality, then add custom code only where needed. This gives you the best of both worlds: rapid development with the flexibility to customize when required. diff --git a/docs/getting-started/harper-concepts.md b/docs/getting-started/harper-concepts.md index 8537224e..208950c7 100644 --- a/docs/getting-started/harper-concepts.md +++ b/docs/getting-started/harper-concepts.md @@ -3,20 +3,24 @@ As you begin your journey with Harper, there are a few concepts and definitions that you should understand. ## Components + Harper components are a core Harper concept defined as flexible JavaScript based extensions of the highly extensible core Harper platform. They are executed by Harper directly and have complete access to the Harper [Global APIs](../technical-details/reference/globals.md) (such as Resource, databases, and tables). A key aspect to components are their extensibility; components can be built on other components. For example, a [Harper Application](../developers/applications/README.md) is a component that uses many other components. The [application template](https://github.com/HarperDB/application-template) demonstrates many of Harper's built-in components such as [rest](../technical-details/reference/components/built-in-extensions.md#rest) (for automatic REST endpoint generation), [graphqlSchema](../technical-details/reference/components/built-in-extensions.md#graphqlschema) (for table schema definitions), and many more. ## Applications + Applications are a subset of components that cannot be used directly and must depend on other extensions. Examples include defining schemas (using [graphqlSchema](../technical-details/reference/components/built-in-extensions.md#graphqlschema) built-in extension), defining custom resources (using [jsResource](../technical-details/reference/components/built-in-extensions.md#jsresource) built-in extension), hosting static files (using [static](../technical-details/reference/components/built-in-extensions.md#static) built-in extension), enabling REST querying of resources (using [rest](../technical-details/reference/components/built-in-extensions.md#rest) built-in extension), and running [Next.js](https://github.com/HarperDB/nextjs), [Astro](https://github.com/HarperDB/astro), or [Apollo](https://github.com/HarperDB/apollo) applications through their respective extensions. ## Resources + Resources in Harper encompass databases, tables, and schemas that store and structure data within the system. The concept is central to Harper's data management capabilities, with custom resources being enabled by the built-in jsResource extension. Resources represent the data layer of the Harper ecosystem and provide the foundation for data operations across applications built with the platform. ## Server -Harper is a multi-protocol server, handling incoming requests from clients and serving data from the data model. Harper supports multiple server protocols, with components for serving REST/HTTP (including Server-Sent Events), MQTT, WebSockets, and the Operations API (and custom server components can be added). Harper uses separate layers for the data model and the servers. The data model, which is defined with resources, can be exported and be used as the source for any of the servers. A single table or other resource can then be accessed and modified through REST, MQTT, SSE, or any other server protocol, for a powerful integrated model with multiple forms of access. + +Harper is a multi-protocol server, handling incoming requests from clients and serving data from the data model. Harper supports multiple server protocols, with components for serving REST/HTTP (including Server-Sent Events), MQTT, WebSockets, and the Operations API (and custom server components can be added). Harper uses separate layers for the data model and the servers. The data model, which is defined with resources, can be exported and be used as the source for any of the servers. A single table or other resource can then be accessed and modified through REST, MQTT, SSE, or any other server protocol, for a powerful integrated model with multiple forms of access. Networking in Harper handles different communication protocols including HTTP, WebSocket, and MQTT, as well as event-driven systems. These networking capabilities enable Harper applications to communicate with other services, receive requests, send responses, and participate in real-time data exchange. The networking layer is fundamental to Harper's functionality as a versatile application platform. -__ +\_\_ -As you go through Harper, you will pick up more knowledge of other advanced areas along the way, but with these concepts, you're now ready to create your first application. \ No newline at end of file +As you go through Harper, you will pick up more knowledge of other advanced areas along the way, but with these concepts, you're now ready to create your first application. diff --git a/docs/getting-started/install-harper.md b/docs/getting-started/install-harper.md index d92762cb..43f4e6cc 100644 --- a/docs/getting-started/install-harper.md +++ b/docs/getting-started/install-harper.md @@ -3,6 +3,7 @@ There are three ways to install a Harper instance: using a package manager like npm, deploying it as a Docker container, and offline installation. Below is a step-by-step tutorial for each method. ## Installing via NPM + Before you begin, ensure you have [Node.js](https://nodejs.org/) LTS version or newer. Node.js comes with npm, which will be used to install Harper. Open your terminal or command prompt and install Harper globally by executing the command below. Installing globally allows the `harperdb` command to be accessible from anywhere on your machine, making it easier to manage multiple projects. @@ -22,6 +23,7 @@ This launches Harper as a standalone, where you can define your schemas, endpoin At this point, your local Harper instance is up and running, giving you the ability to develop and test your database applications using your favorite local development tools, including debuggers and version control systems. ## Installing via Docker + Using Docker to run Harper is an efficient way to manage a containerized instance that encapsulates all of Harper’s functionality. First, ensure that Docker is installed and running on your system. If it isn’t, download it from the [official Docker website](https://docs.docker.com/engine/install/) and complete the installation process. Next, open your terminal and pull the latest Harper image by running the following command: @@ -39,7 +41,9 @@ docker run -d -p 9925:9925 harperdb/harperdb In this command, the `-d` flag runs the container in detached mode, allowing it to operate in the background, and the `-p 9925:9925` flag maps port 9925 on your local machine to port 9925 within the container, which is Harper’s default port. This port mapping lets you interact with the Harper instance directly from your local environment. ### How to Use this Image + [Harper configuration settings⁠](https://harperdb.io/docs/reference/configuration-file/) can be passed as Docker run environment variables. If no environment variables are provided, Harper will operate with default configuration settings, such as: + - ROOTPATH=/home/harperdb/hdb - OPERATIONSAPI_NETWORK_PORT=9925 - HDB_ADMIN_USERNAME=HDB_ADMIN @@ -55,6 +59,7 @@ Test your Harper instance is up and running by querying `curl http://localhost:9 {% endhint %} ### Example Deployments + To run a Harper container in the background with persistent storage and exposed ports, you can use a command like this: ```bash @@ -101,6 +106,7 @@ docker run --rm harperdb/harperdb /bin/bash -c "harperdb version" This command runs the container momentarily to print the version information, then removes the container automatically when finished. ### Logs and Troubleshooting + To verify that the container is running properly, you can check your running containers with: ```bash @@ -116,6 +122,7 @@ docker logs Once verified, you can access your Harper instance by opening your web browser and navigating to http://localhost:9925 (or the appropriate port based on your configuration). ### Raw binary installation + There's a different way to install Harper. You can choose your version and download the npm package and install it directly (you’ll still need Node.js and NPM). Click [this link](https://products-harperdb-io.s3.us-east-2.amazonaws.com/index.html) to download and install the package. Once you’ve downloaded the .tgz file, run the following command from the directory where you’ve placed it: ```bash diff --git a/docs/getting-started/what-is-harper.md b/docs/getting-started/what-is-harper.md index 9777484b..01fd4931 100644 --- a/docs/getting-started/what-is-harper.md +++ b/docs/getting-started/what-is-harper.md @@ -52,4 +52,4 @@ For use cases like real-time sports updates, flight tracking, and zero-day softw Capturing, storing, and processing real-time data streams from client and IoT systems typically requires a stack of technology. Harper’s selective data replication and self-healing connections make for an ideal multi-tier system where edge and cloud systems both run Harper, making everything more performant. -[We’re happy](https://www.harpersystems.dev/contact) to walk you through how to do this. \ No newline at end of file +[We’re happy](https://www.harpersystems.dev/contact) to walk you through how to do this. diff --git a/docs/index.md b/docs/index.md index 30fdf86b..17d4e100 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1 +1 @@ - \ No newline at end of file + diff --git a/docs/technical-details/reference/README.md b/docs/technical-details/reference/README.md index 1b5c2065..360fb23f 100644 --- a/docs/technical-details/reference/README.md +++ b/docs/technical-details/reference/README.md @@ -2,24 +2,24 @@ This section contains technical details and reference materials for Harper. -* [Analytics](technical-details/reference/analytics.md) -* [Architecture](technical-details/reference/architecture.md) -* [Blob](technical-details/reference/blob.md) -* [Content Types](technical-details/reference/content-types.md) -* [Components](technical-details/reference/components/README.md) - * [Applications](technical-details/reference/components/applications.md) - * [Built-In Extensions](technical-details/reference/components/built-in-extensions.md) - * [Configuration](technical-details/reference/components/configuration.md) - * [Extensions](technical-details/reference/components/extensions.md) - * [(Experimental) Plugins](technical-details/reference/components/plugins.md) -* [Data Types](technical-details/reference/data-types.md) -* [Dynamic Schema](technical-details/reference/dynamic-schema.md) -* [Globals](technical-details/reference/globals.md) -* [GraphQL](technical-details/reference/graphql.md) -* [Headers](technical-details/reference/headers.md) -* [Limits](technical-details/reference/limits.md) -* [Resources](technical-details/reference/resources/README.md) - * [Migration](technical-details/reference/resources/migration.md) - * [Instance Binding](technical-details/reference/resources/instance-binding.md) -* [Storage Algorithm](technical-details/reference/storage-algorithm.md) -* [Transactions](technical-details/reference/transactions.md) +- [Analytics](technical-details/reference/analytics.md) +- [Architecture](technical-details/reference/architecture.md) +- [Blob](technical-details/reference/blob.md) +- [Content Types](technical-details/reference/content-types.md) +- [Components](technical-details/reference/components/README.md) + - [Applications](technical-details/reference/components/applications.md) + - [Built-In Extensions](technical-details/reference/components/built-in-extensions.md) + - [Configuration](technical-details/reference/components/configuration.md) + - [Extensions](technical-details/reference/components/extensions.md) + - [(Experimental) Plugins](technical-details/reference/components/plugins.md) +- [Data Types](technical-details/reference/data-types.md) +- [Dynamic Schema](technical-details/reference/dynamic-schema.md) +- [Globals](technical-details/reference/globals.md) +- [GraphQL](technical-details/reference/graphql.md) +- [Headers](technical-details/reference/headers.md) +- [Limits](technical-details/reference/limits.md) +- [Resources](technical-details/reference/resources/README.md) + - [Migration](technical-details/reference/resources/migration.md) + - [Instance Binding](technical-details/reference/resources/instance-binding.md) +- [Storage Algorithm](technical-details/reference/storage-algorithm.md) +- [Transactions](technical-details/reference/transactions.md) diff --git a/docs/technical-details/reference/analytics.md b/docs/technical-details/reference/analytics.md index 07ab82c2..e63bb0fd 100644 --- a/docs/technical-details/reference/analytics.md +++ b/docs/technical-details/reference/analytics.md @@ -4,7 +4,7 @@ Harper provides extensive telemetry and analytics data to help monitor the statu Harper collects statistics for all operations, URL endpoints, and messaging topics, aggregating information by thread, operation, resource, and methods, in real-time. These statistics are logged in the `hdb_raw_analytics` and `hdb_analytics` table in the `system` database. -There are two "levels" of analytics in the Harper analytics table: the first is the immediate level of raw direct logging of real-time statistics. These analytics entries are recorded once a second (when there is activity) by each thread, and include all recorded activity in the last second, along with system resource information. The records have a primary key that is the timestamp in milliseconds since epoch. This can be queried (with `superuser` permission) using the search\_by\_conditions operation (this will search for 10 seconds worth of analytics) on the `hdb_raw_analytics` table: +There are two "levels" of analytics in the Harper analytics table: the first is the immediate level of raw direct logging of real-time statistics. These analytics entries are recorded once a second (when there is activity) by each thread, and include all recorded activity in the last second, along with system resource information. The records have a primary key that is the timestamp in milliseconds since epoch. This can be queried (with `superuser` permission) using the search_by_conditions operation (this will search for 10 seconds worth of analytics) on the `hdb_raw_analytics` table: ``` POST http://localhost:9925 @@ -100,14 +100,14 @@ And a summary record looks like: The following are general resource usage statistics that are tracked: -* memory - This includes RSS, heap, buffer and external data usage. -* utilization - How much of the time the worker was processing requests. -* mqtt-connections - The number of MQTT connections. +- memory - This includes RSS, heap, buffer and external data usage. +- utilization - How much of the time the worker was processing requests. +- mqtt-connections - The number of MQTT connections. The following types of information is tracked for each HTTP request: -* success - How many requests returned a successful response (20x response code). TTFB - Time to first byte in the response to the client. -* transfer - Time to finish the transfer of the data to the client. -* bytes-sent - How many bytes of data were sent to the client. +- success - How many requests returned a successful response (20x response code). TTFB - Time to first byte in the response to the client. +- transfer - Time to finish the transfer of the data to the client. +- bytes-sent - How many bytes of data were sent to the client. Requests are categorized by operation name, for the operations API, by the resource (name) with the REST API, and by command for the MQTT interface. diff --git a/docs/technical-details/reference/architecture.md b/docs/technical-details/reference/architecture.md index 2a0a4ed4..d82c4b50 100644 --- a/docs/technical-details/reference/architecture.md +++ b/docs/technical-details/reference/architecture.md @@ -34,5 +34,5 @@ Harper's architecture consists of resources, which includes tables and user defi │ │ │ │ │ └────────────────┘ │ │ │ - └────────────────────────────────────────┘ + └────────────────────────────────────────┘ ``` diff --git a/docs/technical-details/reference/blob.md b/docs/technical-details/reference/blob.md index ecf46f3e..d99b666c 100644 --- a/docs/technical-details/reference/blob.md +++ b/docs/technical-details/reference/blob.md @@ -1,6 +1,7 @@ # Blob Blobs are binary large objects that can be used to store any type of unstructured/binary data and is designed for large content. Blobs support streaming and feature better performance for content larger than about 20KB. Blobs are built off the native JavaScript `Blob` type, and HarperDB extends the native `Blob` type for integrated storage with the database. To use blobs, you would generally want to declare a field as a `Blob` type in your schema: + ```graphql type MyTable { id: Any! @primaryKey @@ -14,6 +15,7 @@ You can then create a blob which writes the binary data to disk, and can then be let blob = await createBlob(largeBuffer); await MyTable.put({ id: 'my-record', data: blob }); ``` + The `data` attribute in this example is a blob reference, and can be used like any other attribute in the record, but it is stored separately, and the data must be accessed asynchronously. You can retrieve the blob data with the standard `Blob` methods: ```javascript @@ -33,6 +35,7 @@ export class MyEndpoint extends MyTable { } } ``` + One of the important characteristics of blobs is they natively support asynchronous streaming of data. This is important for both creation and retrieval of large data. When we create a blob with `createBlob`, the returned blob will create the storage entry, but the data will be streamed to storage. This means that you can create a blob from a buffer or from a stream. You can also create a record that references a blob before the blob is fully written to storage. For example, you can create a blob from a stream: ```javascript @@ -44,6 +47,7 @@ let record = await MyTable.get('my-record'); // we now have a record that gives us access to the blob. We can asynchronously access the blob's data or stream the data, and it will be available as blob the stream is written to the blob. let stream = record.data.stream(); ``` + This can be powerful functionality for large media content, where content can be streamed into storage as it streamed out in real-time to users as it is received. Alternately, we can also wait for the blob to be fully written to storage before creating a record that references the blob: @@ -55,9 +59,10 @@ await blob.save(MyTable); await MyTable.put({ id: 'my-record', data: blob }); ``` -Note that this means that blobs are _not_ atomic or [ACID](https://en.wikipedia.org/wiki/ACID) compliant; streaming functionality achieves the opposite behavior of ACID/atomic writes that would prevent access to data as it is being written. +Note that this means that blobs are _not_ atomic or [ACID](https://en.wikipedia.org/wiki/ACID) compliant; streaming functionality achieves the opposite behavior of ACID/atomic writes that would prevent access to data as it is being written. ### Error Handling + Because blobs can be streamed and referenced prior to their completion, there is a chance that an error or interruption could occur while streaming data to the blob (after the record is committed). We can create an error handler for the blob to handle the case of an interrupted blob: ```javascript @@ -80,6 +85,7 @@ export class MyEndpoint extends MyTable { ### Blob `size` Blobs that are created from streams may not have the standard `size` property available, because the size may not be known while data is being streamed. Consequently, the `size` property may be undefined until the size is determined. You can listen for the `size` event to be notified when the size is available: + ```javascript let record = await MyTable.get('my-record'); let blob = record.data; @@ -93,4 +99,4 @@ if (blob.size === undefined) { ``` -See the [configuration](../../deployments/configuration.md) documentation for more information on configuring where blob are stored. +See the [configuration](../../deployments/configuration.md) documentation for more information on configuring where blob are stored. diff --git a/docs/technical-details/reference/components/README.md b/docs/technical-details/reference/components/README.md index 702c35a8..47f0e87d 100644 --- a/docs/technical-details/reference/components/README.md +++ b/docs/technical-details/reference/components/README.md @@ -32,4 +32,4 @@ This technical reference section has detailed information on various component s - [`@harperdb/nextjs`](https://github.com/HarperDB/nextjs) - [`@harperdb/apollo`](https://github.com/HarperDB/apollo) -- [`@harperdb/astro`](https://github.com/HarperDB/astro) \ No newline at end of file +- [`@harperdb/astro`](https://github.com/HarperDB/astro) diff --git a/docs/technical-details/reference/components/applications.md b/docs/technical-details/reference/components/applications.md index 588a4aaf..707ebad7 100644 --- a/docs/technical-details/reference/components/applications.md +++ b/docs/technical-details/reference/components/applications.md @@ -27,23 +27,23 @@ Stop execution for either of these processes by sending a SIGINT (generally CTRL Alternatively, to mimic interfacing with a hosted Harper instance, use operation commands instead. 1. Start up Harper with `harperdb` -2. _Deploy_ the application to the local instance by executing: - - ```sh - harperdb deploy \ - project= \ - package= \ - restart=true - ``` - - * Make sure to omit the `target` option so that it _deploys_ to the Harper instance running locally - * The `package=` option creates a symlink to the application simplifying restarts - * By default, the `deploy` operation command will _deploy_ the current directory by packaging it up and streaming the bytes. By specifying `package`, it skips this and references the file path directly - * The `restart=true` option automatically restarts Harper threads after the application is deployed - * If set to `'rolling'`, a rolling restart will be triggered after the application is deployed +2. _Deploy_ the application to the local instance by executing: + + ```sh + harperdb deploy \ + project= \ + package= \ + restart=true + ``` + - Make sure to omit the `target` option so that it _deploys_ to the Harper instance running locally + - The `package=` option creates a symlink to the application simplifying restarts + - By default, the `deploy` operation command will _deploy_ the current directory by packaging it up and streaming the bytes. By specifying `package`, it skips this and references the file path directly + - The `restart=true` option automatically restarts Harper threads after the application is deployed + - If set to `'rolling'`, a rolling restart will be triggered after the application is deployed + 3. In another terminal, use the `harperdb restart` command to restart the instance's threads at any time - * With `package=`, the application source is symlinked so changes will automatically be picked up between restarts - * If `package` was omitted, run the `deploy` command again with any new changes + - With `package=`, the application source is symlinked so changes will automatically be picked up between restarts + - If `package` was omitted, run the `deploy` command again with any new changes 4. To remove the application use `harperdb drop_component project=` Similar to the previous section, if the main thread needs to be restarted, start and stop the Harper instance manually (with the application deployed). Upon Harper startup, the application will automatically be loaded and executed across all threads. @@ -103,11 +103,11 @@ A local application can be deployed to a remote instance by **omitting** the `pa Furthermore, the `package` field can be set to any valid [npm dependency value](https://docs.npmjs.com/cli/v11/configuring-npm/package-json#dependencies). -* For applications deployed to npm, specify the package name: `package="@harperdb/status-check"` -* For applications on GitHub, specify the URL: `package="https://github.com/HarperDB/status-check"`, or the shorthand `package=HarperDB/status-check` -* Private repositories also work if the correct SSH keys are on the server: `package="git+ssh://git@github.com:HarperDB/secret-applications.git"` - * Reference the [SSH Key](../operations-api/components.md#add-ssh-key) operations for more information on managing SSH keys on a remote instance -* Even tarball URLs are supported: `package="https://example.com/application.tar.gz"` +- For applications deployed to npm, specify the package name: `package="@harperdb/status-check"` +- For applications on GitHub, specify the URL: `package="https://github.com/HarperDB/status-check"`, or the shorthand `package=HarperDB/status-check` +- Private repositories also work if the correct SSH keys are on the server: `package="git+ssh://git@github.com:HarperDB/secret-applications.git"` + - Reference the [SSH Key](../operations-api/components.md#add-ssh-key) operations for more information on managing SSH keys on a remote instance +- Even tarball URLs are supported: `package="https://example.com/application.tar.gz"` > When using git tags, we highly recommend that you use the semver directive to ensure consistent and reliable installation by npm. In addition to tags, you can also reference branches or commit numbers. @@ -133,7 +133,7 @@ The configuration is very similar to that of `config.yaml`. Entries are comprise ```yaml status-check: - package: "@harperdb/status-check" + package: '@harperdb/status-check' ``` The key difference between this and a component's `config.yaml` is that the name does **not** need to be associated with a `package.json` dependency. When Harper starts up, it transforms these configurations into a `package.json` file, and then executes a form of `npm install`. Thus, the `package: ` can be any valid dependency syntax such as npm packages, GitHub repos, tarballs, and local directories are all supported. @@ -142,7 +142,7 @@ Given a root config like: ```yaml myGithubComponent: - package: HarperDB-Add-Ons/package#v2.2.0 # install from GitHub + package: HarperDB-Add-Ons/package#v2.2.0 # install from GitHub myNPMComponent: package: harperdb # install from npm myTarBall: @@ -157,13 +157,13 @@ Harper will generate a `package.json` like: ```json { - "dependencies": { - "myGithubComponent": "github:HarperDB-Add-Ons/package#v2.2.0", - "myNPMComponent": "npm:harperdb", - "myTarBall": "file:/Users/harper/cool-component.tar", - "myLocal": "file:/Users/harper/local", - "myWebsite": "https://harperdb-component" - } + "dependencies": { + "myGithubComponent": "github:HarperDB-Add-Ons/package#v2.2.0", + "myNPMComponent": "npm:harperdb", + "myTarBall": "file:/Users/harper/cool-component.tar", + "myLocal": "file:/Users/harper/local", + "myWebsite": "https://harperdb-component" + } } ``` diff --git a/docs/technical-details/reference/components/built-in-extensions.md b/docs/technical-details/reference/components/built-in-extensions.md index 22d8ca14..07489600 100644 --- a/docs/technical-details/reference/components/built-in-extensions.md +++ b/docs/technical-details/reference/components/built-in-extensions.md @@ -181,4 +181,4 @@ component/ ``` -The HTML files will be available at `localhost/static/index.html` and `localhost/static/blog.html` respectively. \ No newline at end of file +The HTML files will be available at `localhost/static/index.html` and `localhost/static/blog.html` respectively. diff --git a/docs/technical-details/reference/components/extensions.md b/docs/technical-details/reference/components/extensions.md index 644c2e03..c1c85d05 100644 --- a/docs/technical-details/reference/components/extensions.md +++ b/docs/technical-details/reference/components/extensions.md @@ -28,14 +28,14 @@ Any [Resource Extension](#resource-extension) can be configured with the `files` > Harper relies on the [fast-glob](https://github.com/mrmlnc/fast-glob) library for glob pattern matching. -- **files** - `string | string[] | Object` - *required* - A [glob pattern](https://github.com/mrmlnc/fast-glob?tab=readme-ov-file#pattern-syntax) string, array of glob pattern strings, or a more expressive glob options object determining the set of files and directories to be resolved for the extension. If specified as an object, the `source` property is required. By default, Harper **matches files and directories**; this is configurable using the `only` option. - - **source** - `string | string[]` - *required* - The glob pattern string or array of strings. - - **only** - `'all' | 'files' | 'directories'` - *optional* - The glob pattern will match only the specified entry type. Defaults to `'all'`. - - **ignore** - `string[]` - *optional* - An array of glob patterns to exclude from matches. This is an alternative way to use negative patterns. Defaults to `[]`. -- **urlPath** - `string` - *optional* - A base URL path to prepend to the resolved `files` entries. +- **files** - `string | string[] | Object` - _required_ - A [glob pattern](https://github.com/mrmlnc/fast-glob?tab=readme-ov-file#pattern-syntax) string, array of glob pattern strings, or a more expressive glob options object determining the set of files and directories to be resolved for the extension. If specified as an object, the `source` property is required. By default, Harper **matches files and directories**; this is configurable using the `only` option. + - **source** - `string | string[]` - _required_ - The glob pattern string or array of strings. + - **only** - `'all' | 'files' | 'directories'` - _optional_ - The glob pattern will match only the specified entry type. Defaults to `'all'`. + - **ignore** - `string[]` - _optional_ - An array of glob patterns to exclude from matches. This is an alternative way to use negative patterns. Defaults to `[]`. +- **urlPath** - `string` - _optional_ - A base URL path to prepend to the resolved `files` entries. - If the value starts with `./`, such as `'./static/'`, the component name will be included in the base url path - If the value is `.`, then the component name will be the base url path - - Note: `..` is an invalid pattern and will result in an error + - Note: `..` is an invalid pattern and will result in an error - Otherwise, the value here will be base url path. Leading and trailing `/` characters will be handled automatically (`/static/`, `/static`, and `static/` are all equivalent to `static`) For example, to configure the [static](./built-in-extensions.md#static) component to serve all HTML files from the `web` source directory on the `static` URL endpoint: @@ -60,6 +60,7 @@ graphqlSchema: The `files` option also supports a more complex options object. These additional fields enable finer control of the glob pattern matching. For example, to match files within `web`, and omit any within the `web/images` directory, the configuration could be: + ```yaml static: files: @@ -89,28 +90,29 @@ export function setupDirectory() {} function handleDirectory() {} function setupFile() {} -module.exports = { handleDirectory, setupFile } +module.exports = { handleDirectory, setupFile }; ``` When returned by a [Protocol Extension](#protocol-extension), these methods should be defined on the object instead: ```js export function start() { - return { - handleFile () {} - } + return { + handleFile() {}, + }; } ``` #### `handleFile(contents, urlPath, absolutePath, resources): void | Promise` + #### `setupFile(contents, urlPath, absolutePath, resources): void | Promise` These methods are for processing individual files. They can be async. > Remember! -> +> > `setupFile()` is executed **once** on the **main thread** during the main start sequence. -> +> > `handleFile()` is executed on **worker threads** and is executed again during restarts. Parameters: @@ -124,6 +126,7 @@ Parameters: Returns: `void | Promise` #### `handleDirectory(urlPath, absolutePath, resources): boolean | void | Promise` + #### `setupDirectory(urlPath, absolutePath, resources): boolean | void | Promise` These methods are for processing directories. They can be async. @@ -131,9 +134,9 @@ These methods are for processing directories. They can be async. If the function returns or resolves a truthy value, then the component loading sequence will end and no other entries within the directory will be processed. > Remember! -> +> > `setupFile()` is executed **once** on the **main thread** during the main start sequence. -> +> > `handleFile()` is executed on **worker threads** and is executed again during restarts. Parameters: @@ -170,6 +173,7 @@ Many protocol extensions will use the `port` and `securePort` options for config A Protocol Extension is made up of two distinct methods, [`start()`](#startoptions-resourceextension--promiseresourceextension) and [`startOnMainThread()`](#startonmainthreadoptions-resourceextension--promiseresourceextension). Similar to a Resource Extension, the `start()` method is executed on _all worker threads_, and _executed again on restarts_. The `startOnMainThread()` method is **only** executed **once** during the initial system start sequence. These methods have identical `options` object parameter, and can both return a Resource Extension (i.e. an object containing one or more of the methods listed above). #### `start(options): ResourceExtension | Promise` + #### `startOnMainThread(options): ResourceExtension | Promise` Parameters: diff --git a/docs/technical-details/reference/components/plugins.md b/docs/technical-details/reference/components/plugins.md index f57fb42a..bbacd845 100644 --- a/docs/technical-details/reference/components/plugins.md +++ b/docs/technical-details/reference/components/plugins.md @@ -26,49 +26,54 @@ This is a functional example of how the `handleComponent()` method and `scope` a ```js export function handleComponent(scope) { - const staticFiles = new Map(); - - scope.options.on('change', (key, value, config) => { - if (key[0] === 'files' || key[0] === 'urlPath') { - // If the files or urlPath options change, we need to reinitialize the static files map - staticFiles.clear(); - logger.info(`Static files reinitialized due to change in ${key.join('.')}`); - } - }); - - scope.handleEntry((entry) => { - if (entry.entryType === 'directory') { - logger.info(`Cannot serve directories. Update the files option to only match files.`); - return; - } - - switch (entry.eventType) { - case 'add': - case 'change': - // Store / Update the file contents in memory for serving - staticFiles.set(entry.urlPath, entry.contents); - break; - case 'unlink': - // Remove the file from memory when it is deleted - staticFiles.delete(entry.urlPath); - break; - } - }); - - scope.server.http((req, next) => { - if (req.method !== 'GET') return next(req); - - // Attempt to retrieve the requested static file from memory - const staticFile = staticFiles.get(req.pathname); - - return staticFile ? { - statusCode: 200, - body: staticFile, - } : { - statusCode: 404, - body: 'File not found', - } - }, { runFirst: true }); + const staticFiles = new Map(); + + scope.options.on('change', (key, value, config) => { + if (key[0] === 'files' || key[0] === 'urlPath') { + // If the files or urlPath options change, we need to reinitialize the static files map + staticFiles.clear(); + logger.info(`Static files reinitialized due to change in ${key.join('.')}`); + } + }); + + scope.handleEntry((entry) => { + if (entry.entryType === 'directory') { + logger.info(`Cannot serve directories. Update the files option to only match files.`); + return; + } + + switch (entry.eventType) { + case 'add': + case 'change': + // Store / Update the file contents in memory for serving + staticFiles.set(entry.urlPath, entry.contents); + break; + case 'unlink': + // Remove the file from memory when it is deleted + staticFiles.delete(entry.urlPath); + break; + } + }); + + scope.server.http( + (req, next) => { + if (req.method !== 'GET') return next(req); + + // Attempt to retrieve the requested static file from memory + const staticFile = staticFiles.get(req.pathname); + + return staticFile + ? { + statusCode: 200, + body: staticFile, + } + : { + statusCode: 404, + body: 'File not found', + }; + }, + { runFirst: true } + ); } ``` @@ -112,8 +117,8 @@ Closes all associated entry handlers, the associated `scope.options` instance, e Parameters: -- **files** - [`FilesOptions`](#interface-filesoptions) | [`FileAndURLPathConfig`](#interface-fileandurlpathconfig) | `onEntryEventHandler` - *optional* -- **handler** - `onEntryEventHandler` - *optional* +- **files** - [`FilesOptions`](#interface-filesoptions) | [`FileAndURLPathConfig`](#interface-fileandurlpathconfig) | `onEntryEventHandler` - _optional_ +- **handler** - `onEntryEventHandler` - _optional_ Returns: `EntryHandler` - An instance of the `EntryHandler` class that can be used to handle entries within the scope. @@ -128,21 +133,28 @@ For example, ```js export function handleComponent(scope) { - // Get the default EntryHandler instance - const defaultEntryHandler = scope.handleEntry(); - - // Assign a handler for the 'all' event on the default EntryHandler - scope.handleEntry((entry) => { /* ... */ }); - - // Create a new EntryHandler for the 'src/**/*.js' files option with a custom `'all'` event handler. - const customEntryHandler = scope.handleEntry({ - files: 'src/**/*.js', - }, (entry) => { /* ... */ }); - - // Create another custom EntryHandler for the 'src/**/*.ts' files option, but without a `'all'` event handler. - const anotherCustomEntryHandler = scope.handleEntry({ - files: 'src/**/*.ts', - }); + // Get the default EntryHandler instance + const defaultEntryHandler = scope.handleEntry(); + + // Assign a handler for the 'all' event on the default EntryHandler + scope.handleEntry((entry) => { + /* ... */ + }); + + // Create a new EntryHandler for the 'src/**/*.js' files option with a custom `'all'` event handler. + const customEntryHandler = scope.handleEntry( + { + files: 'src/**/*.js', + }, + (entry) => { + /* ... */ + } + ); + + // Create another custom EntryHandler for the 'src/**/*.ts' files option, but without a `'all'` event handler. + const anotherCustomEntryHandler = scope.handleEntry({ + files: 'src/**/*.ts', + }); } ``` @@ -159,7 +171,7 @@ Then the default `EntryHandler` instances would be created to handle all entries Returns: `void` -Request a Harper restart. This **does not** restart the instance immediately, but rather indicates to the user that a restart is required. This should be called when the plugin cannot handle the entry event and wants to indicate to the user that the Harper instance should be restarted. +Request a Harper restart. This **does not** restart the instance immediately, but rather indicates to the user that a restart is required. This should be called when the plugin cannot handle the entry event and wants to indicate to the user that the Harper instance should be restarted. This method is called automatically by the `scope` instance if the user has not defined an `scope.options.on('change')` handler or any event handlers for the default `EntryHandler` instance. @@ -183,13 +195,13 @@ An `OptionsWatcher` instance associated with the component using the plugin. Emi ## Interface: `FilesOptionsObject` -- **source** - `string` | `string[]` - *required* - The glob pattern string or array of strings. -- **ignore** - `string` | `string[]` - *optional* - An array of glob patterns to exclude from matches. This is an alternative way to use negative patterns. Defaults to `[]`. +- **source** - `string` | `string[]` - _required_ - The glob pattern string or array of strings. +- **ignore** - `string` | `string[]` - _optional_ - An array of glob patterns to exclude from matches. This is an alternative way to use negative patterns. Defaults to `[]`. ## Interface: `FileAndURLPathConfig` -- **files** - `FilesOptions` - *required* - A glob pattern string, array of glob pattern strings, or a more expressive glob options object determining the set of files and directories to be resolved for the plugin. -- **urlPath** - `string` - *optional* - A base URL path to prepend to the resolved `files` entries. +- **files** - `FilesOptions` - _required_ - A glob pattern string, array of glob pattern strings, or a more expressive glob options object determining the set of files and directories to be resolved for the plugin. +- **urlPath** - `string` - _optional_ - A base URL path to prepend to the resolved `files` entries. ## Class: `OptionsWatcher` @@ -218,9 +230,9 @@ For example, if the `files` option for `customPlugin` is changed to `web/**/*.js ```js scope.options.on('change', (key, value, config) => { - key // ['files'] - value // 'web/**/*.js' - config // { files: 'web/**/*.js' } + key; // ['files'] + value; // 'web/**/*.js' + config; // { files: 'web/**/*.js' } }); ``` @@ -251,6 +263,7 @@ Closes the options watcher, removing all listeners and preventing any further ev ### `options.get(key)` Parameters: + - **key** - `string[]` - The key of the option to get, split into parts (e.g. `foo.bar` is represented as `['foo', 'bar']`). Returns: [`ConfigValue`](#interface-configvalue) | `undefined` @@ -295,25 +308,25 @@ An effective pattern for this event is: ```js async function handleComponent(scope) { - scope.handleEntry((entry) => { - switch(entry.eventType) { - case 'add': - // Handle file addition - break; - case 'change': - // Handle file change - break; - case 'unlink': - // Handle file deletion - break; - case 'addDir': - // Handle directory addition - break; - case 'unlinkDir': - // Handle directory deletion - break; - } - }); + scope.handleEntry((entry) => { + switch (entry.eventType) { + case 'add': + // Handle file addition + break; + case 'change': + // Handle file change + break; + case 'unlink': + // Handle file deletion + break; + case 'addDir': + // Handle directory addition + break; + case 'unlinkDir': + // Handle directory deletion + break; + } + }); } ``` @@ -380,6 +393,7 @@ Closes the entry handler, removing all listeners and preventing any further even ### `entryHandler.update(config)` Parameters: + - **config** - [`FilesOption`](#interface-filesoption) | [`FileAndURLPathConfig`](#interface-fileandurlpathconfig) - The configuration object for the entry handler. This method will update an existing entry handler to watch new entries. It will close the underlying watcher and create a new one, but will maintain any existing listeners on the EntryHandler instance itself. @@ -404,7 +418,7 @@ Extends [`BaseEntry`](#interface-baseentry) - **contents** - `Buffer` - The contents of the file. -A specific extension of the `BaseEntry` interface representing a file entry. We automatically read the contents of the file so the user doesn't have to bother with FS operations. +A specific extension of the `BaseEntry` interface representing a file entry. We automatically read the contents of the file so the user doesn't have to bother with FS operations. There is no `DirectoryEntry` since there is no other important metadata aside from the `BaseEntry` properties. If a user wants the contents of a directory, they should adjust the pattern to resolve files instead. diff --git a/docs/technical-details/reference/content-types.md b/docs/technical-details/reference/content-types.md index 442e12a3..bc6ae084 100644 --- a/docs/technical-details/reference/content-types.md +++ b/docs/technical-details/reference/content-types.md @@ -22,4 +22,4 @@ MessagePack is another efficient binary format like CBOR, with support for all H Comma-separated values is an easy to use and understand format that can be readily imported into spreadsheets or used for data processing. CSV lacks hierarchical structure for most data types, and shouldn't be used for frequent/production use, but when you need it, it is available. -In addition, with the REST interface, you can use file-style extensions to indicate an encoding like http://host/path.csv to indicate CSV encoding. See the [REST documentation](../../developers/rest.md) for more information on how to do this. \ No newline at end of file +In addition, with the REST interface, you can use file-style extensions to indicate an encoding like http://host/path.csv to indicate CSV encoding. See the [REST documentation](../../developers/rest.md) for more information on how to do this. diff --git a/docs/technical-details/reference/data-types.md b/docs/technical-details/reference/data-types.md index f329416a..10c2c59d 100644 --- a/docs/technical-details/reference/data-types.md +++ b/docs/technical-details/reference/data-types.md @@ -16,10 +16,10 @@ Strings, or text, are a sequence of any unicode characters and are internally en Numbers can be stored as signed integers up to a 1000 bits of precision (about 300 digits) or floating point with 64-bit floating point precision, and numbers are automatically stored using the most optimal type. With JSON, numbers are automatically parsed and stored in the most appropriate format. Custom components and applications may use BigInt numbers to store/access integers that are larger than 53-bit. The following GraphQL schema type name are supported: -* `Float` - Any number that can be represented with [64-bit double precision floating point number](https://en.wikipedia.org/wiki/Double-precision\_floating-point\_format) ("double") -* `Int` - Any integer between from -2147483648 to 2147483647 -* `Long` - Any integer between from -9007199254740992 to 9007199254740992 -* `BigInt` - Any integer (negative or positive) with less than 300 digits +- `Float` - Any number that can be represented with [64-bit double precision floating point number](https://en.wikipedia.org/wiki/Double-precision_floating-point_format) ("double") +- `Int` - Any integer between from -2147483648 to 2147483647 +- `Long` - Any integer between from -9007199254740992 to 9007199254740992 +- `BigInt` - Any integer (negative or positive) with less than 300 digits Note that `BigInt` is a distinct and separate type from standard numbers in JavaScript, so custom code should handle this type appropriately. @@ -44,10 +44,12 @@ Dates can be stored as a specific data type. This is not supported in JSON, but Binary data can be stored in property values as well, with two different data types that are available: ### Bytes + JSON doesn’t have any support for encoding binary data, but MessagePack and CBOR support binary data in data structures, and this will be preserved in HarperDB. Custom Functions can also store binary data by using NodeJS’s Buffer or Uint8Array instances to hold the binary data. The GraphQL schema type name is `Bytes`. ### Blobs -Binary data can also be stored with [`Blob`s](blob.md), which can scale much better for larger content than `Bytes`, as it is designed to be streamed and does not need to be held entirely in memory. It is recommended that `Blob`s are used for content larger than 20KB. + +Binary data can also be stored with [`Blob`s](blob.md), which can scale much better for larger content than `Bytes`, as it is designed to be streamed and does not need to be held entirely in memory. It is recommended that `Blob`s are used for content larger than 20KB. ## Explicit Map/Set diff --git a/docs/technical-details/reference/dynamic-schema.md b/docs/technical-details/reference/dynamic-schema.md index 084b5476..32a7faf2 100644 --- a/docs/technical-details/reference/dynamic-schema.md +++ b/docs/technical-details/reference/dynamic-schema.md @@ -12,8 +12,8 @@ Harper databases hold a collection of tables together in a single file that are Harper tables group records together with a common data pattern. To create a table users must provide a table name and a primary key. -* **Table Name**: Used to identify the table. -* **Primary Key**: This is a required attribute that serves as the unique identifier for a record and is also known as the `hash_attribute` in Harper operations API. +- **Table Name**: Used to identify the table. +- **Primary Key**: This is a required attribute that serves as the unique identifier for a record and is also known as the `hash_attribute` in Harper operations API. ## Primary Key @@ -27,8 +27,8 @@ With tables that are using dynamic schemas, additional attributes are reflexivel Harper automatically creates two audit attributes used on each record if the table is created without a schema. -* `__createdtime__`: The time the record was created in [Unix Epoch with milliseconds](https://www.epochconverter.com/) format. -* `__updatedtime__`: The time the record was updated in [Unix Epoch with milliseconds](https://www.epochconverter.com/) format. +- `__createdtime__`: The time the record was created in [Unix Epoch with milliseconds](https://www.epochconverter.com/) format. +- `__updatedtime__`: The time the record was updated in [Unix Epoch with milliseconds](https://www.epochconverter.com/) format. ### Dynamic Schema Example @@ -60,7 +60,7 @@ At this point the table does not have structure beyond what we provided, so the **dev.dog** -![](../../../images/reference/dynamic\_schema\_2\_create\_table.png.webp) +![](../../../images/reference/dynamic_schema_2_create_table.png.webp) **Insert Record** @@ -81,7 +81,7 @@ With a single record inserted and new attributes defined, our table now looks li **dev.dog** -![](../../../images/reference/dynamic\_schema\_3\_insert\_record.png.webp) +![](../../../images/reference/dynamic_schema_3_insert_record.png.webp) Indexes have been automatically created for `dog_name` and `owner_name` attributes. @@ -105,7 +105,7 @@ In this case, there is no change to the schema. Our table now looks like this: **dev.dog** -![](../../../images/reference/dynamic\_schema\_4\_insert\_additional\_record.png.webp) +![](../../../images/reference/dynamic_schema_4_insert_additional_record.png.webp) **Update Existing Record** @@ -126,7 +126,7 @@ Now we have a new attribute called `weight_lbs`. Our table now looks like this: **dev.dog** -![](../../../images/reference/dynamic\_schema\_5\_update\_existing\_record.png.webp) +![](../../../images/reference/dynamic_schema_5_update_existing_record.png.webp) **Query Table with SQL** @@ -141,4 +141,4 @@ Now if we query for all records where `weight_lbs` is `null` we expect to get ba This results in the expected two records being returned. -![](../../../images/reference/dynamic\_schema\_6\_query\_table\_with\_sql.png.webp) +![](../../../images/reference/dynamic_schema_6_query_table_with_sql.png.webp) diff --git a/docs/technical-details/reference/globals.md b/docs/technical-details/reference/globals.md index bc3700f0..80fed1c3 100644 --- a/docs/technical-details/reference/globals.md +++ b/docs/technical-details/reference/globals.md @@ -34,7 +34,7 @@ async function getRecord() { } ``` -It is recommended that you [define a database](../../developers/applications/defining-schemas.md) for all the tables that are required to exist in your application. This will ensure that the tables exist on the `tables` object. Also note that the property names follow a CamelCase convention for use in JavaScript and in the GraphQL Schemas, but these are translated to snake\_case for the actual table names, and converted back to CamelCase when added to the `tables` object. +It is recommended that you [define a database](../../developers/applications/defining-schemas.md) for all the tables that are required to exist in your application. This will ensure that the tables exist on the `tables` object. Also note that the property names follow a CamelCase convention for use in JavaScript and in the GraphQL Schemas, but these are translated to snake_case for the actual table names, and converted back to CamelCase when added to the `tables` object. ## `databases` @@ -72,13 +72,14 @@ Returns an array of server instances based on the specified `options.port` and ` Example: ```js -server.http((request, next) => { - return request.url === '/graphql' - ? handleGraphQLRequest(request) - : next(request); -}, { - runFirst: true, // run this handler first -}); +server.http( + (request, next) => { + return request.url === '/graphql' ? handleGraphQLRequest(request) : next(request); + }, + { + runFirst: true, // run this handler first + } +); ``` #### `RequestListener` @@ -92,16 +93,19 @@ The HTTP request listener to be added to the middleware chain. To continue chain The `Request` and `Response` classes are based on the WHATWG APIs for the [`Request`](https://developer.mozilla.org/en-US/docs/Web/API/Request) and [`Response`](https://developer.mozilla.org/en-US/docs/Web/API/Response) classes. Requests and responses are based on these standard-based APIs to facilitate reuse with modern web code. While Node.js' HTTP APIs are powerful low-level APIs, the `Request`/`Response` APIs provide excellent composability characteristics, well suited for layered middleware and for clean mapping to [RESTful method handlers](./resource.md) with promise-based responses, as well as interoperability with other standards-based APIs like [streams](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream) used with [`Blob`s](https://developer.mozilla.org/en-US/docs/Web/API/Blob). However, the Harper implementation of these classes is not a direct implementation of the WHATWG APIs, but implements additional/distinct properties for the the Harper server environment: #### `Request` + A `Request` object is passed to the direct static REST handlers, and preserved as the context for instance methods, and has the following properties: + - `url` - This is the request target, which is the portion of the URL that was received by the server. If a client sends a request to `http://example.com:8080/path?query=string`, the actual received request is `GET /path?query=string` and the `url` property will be `/path?query=string`. - `method` - This is the HTTP method of the request. This is a string like `GET`, `POST`, `PUT`, `DELETE`, etc. - `headers` - This is a [`Headers`](https://developer.mozilla.org/en-US/docs/Web/API/Headers) object that contains the headers of the request. - `pathname` - This is the path portion of the URL, without the query string. For example, if the URL is `/path?query=string`, the `pathname` will be `/path`. - `protocol` - This is the protocol of the request, like `http` or `https`. -- `data` - This is the deserialized body of the request (based on the type of data specified by [`Content-Type`](./content-types.md) header). +- `data` - This is the deserialized body of the request (based on the type of data specified by [`Content-Type`](./content-types.md) header). - `ip` - This is the remote IP address of the client that made the request (or the remote IP address of the last proxy to connect to Harper). - `host` - This is the host of the request, like `example.com`. - `sendEarlyHints(link: string, headers?: object): void` - This method sends an early hints response to the client, prior to actually returning a response. This is useful for sending a link header to the client to indicate that another resource should be preloaded. The `headers` argument can be used to send additional headers with the early hints response, in addition to the `link`. This is generally most helpful in a cache resolution function, where you can send hints _if_ the data is not in the cache and is resolving from an origin: + ```javascript class Origin { async get(request) { @@ -113,6 +117,7 @@ class Origin { } Cache.sourcedFrom(Origin); ``` + - `login(username, password): Promise` - This method can be called to start an authenticated session. The login will authenticate the user by username and password. If the authentication was successful, a session will be created and a cookie will be set on the response header that references the session. All subsequent requests from the client that sends the cookie in requests will be authenticated as the user that logged in and the session record will be attached to the request. This method returns a promise that resolves when the login is successful, and rejects if the login is unsuccessful. - `session` - This is the session object that is associated with current cookie-maintained session. This object is used to store session data for the current session. This is `Table` record instance, and can be updated by calling `request.session.update({ key: value })` or session can be retrieved with `request.session.get()`. If the cookie has not been set yet, a cookie will be set the first time a session is updated or a login occurs. - `_nodeRequest` - This is the underlying Node.js [`http.IncomingMessage`](https://nodejs.org/api/http.html#http_class_http_incomingmessage) object. This can be used to access the raw request data, such as the raw headers, raw body, etc. However, this is discouraged and should be used with caution since it will likely break any other server handlers that depends on the layered `Request` call with `Response` return pattern. @@ -121,9 +126,10 @@ Cache.sourcedFrom(Origin); #### `Response` REST methods can directly return data that is serialized and returned to users, or it can return a `Response` object (or a promise to a `Response`), or it can return a `Response`-like object with the following properties (or again, a promise to it): + - `status` - This is the HTTP status code of the response. This is a number like `200`, `404`, `500`, etc. - `headers` - This is a [`Headers`](https://developer.mozilla.org/en-US/docs/Web/API/Headers) object that contains the headers of the response. -- `data` - This is the data to be returned of the response. This will be serialized using Harper's [content negotiation](./content-types.md). +- `data` - This is the data to be returned of the response. This will be serialized using Harper's [content negotiation](./content-types.md). - `body` - Alternately (to `data`), the raw body can be returned as a `Buffer`, string, stream (Node.js or [`ReadableStream`](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream)), or a [`Blob`](https://developer.mozilla.org/en-US/docs/Web/API/Blob). #### `HttpOptions` @@ -132,9 +138,9 @@ Type: `Object` Properties: -* `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` -* `port` - _optional_ - `number` - Specify which HTTP server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` -* `securePort` - _optional_ - `number` - Specify which HTTPS server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` +- `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` +- `port` - _optional_ - `number` - Specify which HTTP server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` +- `securePort` - _optional_ - `number` - Specify which HTTPS server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` #### `HttpServer` @@ -152,8 +158,8 @@ Node.js socket server connection listener as documented in [`net.createServer`]( #### `SocketOptions` -* `port` - _optional_ - `number` - Specify the port for the [`net.Server`](https://nodejs.org/api/net.html#class-netserver) instance. -* `securePort` - _optional_ - `number` - Specify the port for the [`tls.Server`](https://nodejs.org/api/tls.html#class-tlsserver) instance. +- `port` - _optional_ - `number` - Specify the port for the [`net.Server`](https://nodejs.org/api/net.html#class-netserver) instance. +- `securePort` - _optional_ - `number` - Specify the port for the [`tls.Server`](https://nodejs.org/api/tls.html#class-tlsserver) instance. #### `SocketServer` @@ -185,10 +191,10 @@ Type: `(ws: WebSocket, request: Request, chainCompletion: ChainCompletion, next: The WebSocket connection listener. -* The `ws` argument is the [WebSocket](https://github.com/websockets/ws/blob/master/doc/ws.md#class-websocket) instance as defined by the `ws` module. -* The `request` argument is Harper's transformation of the `IncomingMessage` argument of the standard ['connection'](https://github.com/websockets/ws/blob/master/doc/ws.md#event-connection) listener event for a WebSocket server. -* The `chainCompletion` argument is a `Promise` of the associated HTTP server's request chain. Awaiting this promise enables the user to ensure the HTTP request has finished being processed before operating on the WebSocket. -* The `next` argument is similar to that of other `next` arguments in Harper's server middlewares. To continue execution of the WebSocket connection listener middleware chain, pass all of the other arguments to this one such as: `next(ws, request, chainCompletion)` +- The `ws` argument is the [WebSocket](https://github.com/websockets/ws/blob/master/doc/ws.md#class-websocket) instance as defined by the `ws` module. +- The `request` argument is Harper's transformation of the `IncomingMessage` argument of the standard ['connection'](https://github.com/websockets/ws/blob/master/doc/ws.md#event-connection) listener event for a WebSocket server. +- The `chainCompletion` argument is a `Promise` of the associated HTTP server's request chain. Awaiting this promise enables the user to ensure the HTTP request has finished being processed before operating on the WebSocket. +- The `next` argument is similar to that of other `next` arguments in Harper's server middlewares. To continue execution of the WebSocket connection listener middleware chain, pass all of the other arguments to this one such as: `next(ws, request, chainCompletion)` #### `WsOptions` @@ -196,10 +202,10 @@ Type: `Object` Properties: -* `maxPayload` - _optional_ - `number` - Set the max payload size for the WebSocket server. Defaults to 100 MB. -* `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` -* `port` - _optional_ - `number` - Specify which WebSocket server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` -* `securePort` - _optional_ - `number` - Specify which WebSocket secure server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` +- `maxPayload` - _optional_ - `number` - Set the max payload size for the WebSocket server. Defaults to 100 MB. +- `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` +- `port` - _optional_ - `number` - Specify which WebSocket server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` +- `securePort` - _optional_ - `number` - Specify which WebSocket secure server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` ### `server.upgrade(listener: UpgradeListener, options: UpgradeOptions): void` @@ -240,9 +246,9 @@ Type: `Object` Properties: -* `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` -* `port` - _optional_ - `number` - Specify which HTTP server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` -* `securePort` - _optional_ - `number` - Specify which HTTP secure server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` +- `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` +- `port` - _optional_ - `number` - Specify which HTTP server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` +- `securePort` - _optional_ - `number` - Specify which HTTP secure server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` ### `server.config` @@ -252,23 +258,28 @@ This provides access to the Harper configuration object. This comes from the [ha This records the provided value as a metric into Harper's analytics. Harper efficiently records and tracks these metrics and makes them available through [analytics API](analytics.md). The values are aggregated and statistical information is computed when many operations are performed. The optional parameters can be used to group statistics. For the parameters, make sure you are not grouping on too fine of a level for useful aggregation. The parameters are: -* `value` - This is a numeric value for the metric that is being recorded. This can be a value measuring time or bytes, for example. -* `metric` - This is the name of the metric. -* `path` - This is an optional path (like a URL path). For a URL like /my-resource/, you would typically include a path of "my-resource", not including the id so you can group by all the requests to "my-resource" instead of individually aggregating by each individual id. -* `method` - Optional method to group by. -* `type` - Optional type to group by. +- `value` - This is a numeric value for the metric that is being recorded. This can be a value measuring time or bytes, for example. +- `metric` - This is the name of the metric. +- `path` - This is an optional path (like a URL path). For a URL like /my-resource/, you would typically include a path of "my-resource", not including the id so you can group by all the requests to "my-resource" instead of individually aggregating by each individual id. +- `method` - Optional method to group by. +- `type` - Optional type to group by. ### `server.getUser(username): Promise` + This returns the user object with permissions/authorization information based on the provided username. This does not verify the password, so it is generally used for looking up users by username. If you want to verify a user by password, use [`server.authenticateUser`](globals.md#serverauthenticateuserusername-password-user). ### `server.authenticateUser(username, password): Promise` + This returns the user object with permissions/authorization information based on the provided username. The password will be verified before returning the user object (if the password is incorrect, an error will be thrown). ### `server.resources: Resources` + This provides access to the map of all registered resources. This is the central registry in Harper for registering any resources to be exported for use by REST, MQTT, or other components. Components that want to register resources should use the `server.resources.set(name, resource)` method to add to this map. Exported resources can be found by passing in a path to `server.resources.getMatch(path)` which will find any resource that matches the path or beginning of the path. #### `server.resources.set(name, resource, exportTypes?)` + Register a resource with the server. For example: + ``` class NewResource extends Resource { } @@ -276,8 +287,11 @@ server.resources.set('NewResource', Resource); // or limit usage: server.resources.set('NewResource', Resource, { rest: true, mqtt: false, 'my-protocol': true }); ``` + #### `server.resources.getMatch(path, exportType?)` + Find a resource that matches the path. For example: + ``` server.resources.getMatch('/NewResource/some-id'); // or specify the export/protocol type, to allow it to be limited: @@ -297,10 +311,13 @@ Parameters: Returns a `Promise` with the operation's response as per the [Operations API documentation](https://docs.harperdb.io/docs/developers/operations-api). ### `server.nodes` + Returns an array of node objects registered in the cluster ### `server.shards` + Returns map of shard number to an array of its associated nodes ### `server.hostname` + Returns the hostname of the current node diff --git a/docs/technical-details/reference/graphql.md b/docs/technical-details/reference/graphql.md index a88ee75d..d5f56efe 100644 --- a/docs/technical-details/reference/graphql.md +++ b/docs/technical-details/reference/graphql.md @@ -11,6 +11,7 @@ This automatically enables a `/graphql` endpoint that can be used for GraphQL qu Queries can either be `GET` or `POST` requests, and both follow essentially the same request format. `GET` requests must use search parameters, and `POST` requests use the request body. For example, to request the GraphQL Query: + ```graphql query GetDogs { Dog { @@ -87,6 +88,7 @@ The Harper GraphQL Querying system takes many liberties from the GraphQL specifi In variable definitions, the querying system will ensure non-null values exist (and error appropriately), but it will not do any type checking of the value itself. For example, the variable `$name: String!` states that `name` should be a non-null, string value. + - If the request does not contain the `name` variable, an error will be returned - If the request provides `null` for the `name` variable, an error will be returned - If the request provides any non-string value for the `name` variable, i.e. `1`, `true`, `{ foo: "bar" }`, the behavior is undefined and an error may or may not be returned. @@ -96,6 +98,7 @@ For example, the variable `$name: String!` states that `name` should be a non-nu - Fragments will generally extend non-specified types, and the querying system will do no validity checking on them. For example, `fragment Fields on Any { ... }` is just as valid as `fragment Fields on MadeUpTypeName { ... }`. See the Fragments sections for more details. The only notable place the querying system will do some level of type analysis is the transformation of arguments into a query. + - Objects will be transformed into properly nested attributes - Strings and Boolean values are passed through as their AST values - Float and Int values will be parsed using the JavaScript `parseFloat` and `parseInt` methods respectively. @@ -170,6 +173,7 @@ query GetDog($id: ID!) { ``` And as a properly formed request: + ```http POST /graphql/ Content-Type: application/json @@ -184,6 +188,7 @@ Accept: application/graphql-response+json ``` The REST equivalent would be: + ```http GET /Dog/?id==0&select(name,breed,owner{name}) # or @@ -207,6 +212,7 @@ query GetDog { ``` Would be equivalent to + ```http GET /Dog/?owner.name==John&select(name,breed,owner{name}) ``` @@ -216,7 +222,7 @@ And finally, we can put all of these together to create semi-complex, equality b The following query has two variables and will return all dogs who have the specified name as well as the specified owner name. ```graphql -query GetDog($dogName: String!, $ownerName: String! ) { +query GetDog($dogName: String!, $ownerName: String!) { Dog(name: $dogName, owner: { name: $ownerName }) { name breed @@ -241,4 +247,4 @@ query GetDog($dogName: String!, $ownerName: String! ) { ### Directives -> Coming soon! \ No newline at end of file +> Coming soon! diff --git a/docs/technical-details/reference/headers.md b/docs/technical-details/reference/headers.md index f2ad7882..431ff477 100644 --- a/docs/technical-details/reference/headers.md +++ b/docs/technical-details/reference/headers.md @@ -2,7 +2,7 @@ All Harper API responses include headers that are important for interoperability and debugging purposes. The following headers are returned with all Harper API responses: -| Key | Example Value | Description | -|-------------------|------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------| -| server-timing | db;dur=7.165 | This reports the duration of the operation, in milliseconds. This follows the standard for Server-Timing and can be consumed by network monitoring tools. | -| content-type | application/json | This reports the MIME type of the returned content, which is negotiated based on the requested content type in the Accept header. | +| Key | Example Value | Description | +| ------------- | ---------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | +| server-timing | db;dur=7.165 | This reports the duration of the operation, in milliseconds. This follows the standard for Server-Timing and can be consumed by network monitoring tools. | +| content-type | application/json | This reports the MIME type of the returned content, which is negotiated based on the requested content type in the Accept header. | diff --git a/docs/technical-details/reference/limits.md b/docs/technical-details/reference/limits.md index f2882462..f34746e2 100644 --- a/docs/technical-details/reference/limits.md +++ b/docs/technical-details/reference/limits.md @@ -29,4 +29,5 @@ Additionally, they cannot contain the first 31 non-printing characters. Spaces a Harper limits the number of total indexed attributes across tables (including the primary key of each table) to 10,000 per database. ## Primary Keys -The maximum length of a primary key is 1978 bytes or 659 characters (whichever is shortest). \ No newline at end of file + +The maximum length of a primary key is 1978 bytes or 659 characters (whichever is shortest). diff --git a/docs/technical-details/reference/resources/README.md b/docs/technical-details/reference/resources/README.md index 7507bf38..7d212ce7 100644 --- a/docs/technical-details/reference/resources/README.md +++ b/docs/technical-details/reference/resources/README.md @@ -8,19 +8,19 @@ Conceptually, a Resource class provides an interface for accessing, querying, mo Resource classes also have static methods, which are generally the preferred way to externally interact with tables and resources. The static methods handle parsing paths and query strings, starting a transaction as necessary, performing access authorization checks (if required), creating a resource instance, and calling the instance methods. This general rule for how to interact with resources: -* If you want to _act upon_ a table or resource, querying or writing to it, then use the static methods to initially access or write data. For example, you could use `MyTable.get(34)` to access the record with a primary key of `34`. -* If you want to _define custom behavior_ for a table or resource (to control how a resource responds to queries/writes), then extend the class and override/define instance methods. +- If you want to _act upon_ a table or resource, querying or writing to it, then use the static methods to initially access or write data. For example, you could use `MyTable.get(34)` to access the record with a primary key of `34`. +- If you want to _define custom behavior_ for a table or resource (to control how a resource responds to queries/writes), then extend the class and override/define instance methods. The Resource API is heavily influenced by the REST/HTTP API, and the methods and properties of the Resource class are designed to map to and be used in a similar way to how you would interact with a RESTful API. The REST-based API is a little different from traditional Create-Read-Update-Delete (CRUD) APIs that were designed with single-server interactions in mind. Semantics that attempt to guarantee no existing record or overwrite-only behavior require locks that don't scale well in distributed database. Centralizing writes around `put` calls provides much more scalable, simple, and consistent behavior in a distributed eventually consistent database. You can generally think of CRUD operations mapping to REST operations like this: -* Read - `get` -* Create with a known primary key - `put` -* Create with a generated primary key - `post`/`create` -* Update (Full) - `put` -* Update (Partial) - `patch` -* Delete - `delete` +- Read - `get` +- Create with a known primary key - `put` +- Create with a generated primary key - `post`/`create` +- Update (Full) - `put` +- Update (Partial) - `patch` +- Delete - `delete` The RESTful HTTP server and other server interfaces will directly call resource methods of the same name to fulfill incoming requests so resources can be defined as endpoints for external interaction. When resources are used by the server interfaces, the static method will be executed (which starts a transaction and does access checks), which will then create the resource instance and call the corresponding instance method. Paths (URL, MQTT topics) are mapped to different resource instances. Using a path that specifies an ID like `/MyResource/3492` will be mapped an instance of MyResource, and will call the instance methods like `get(target)`, `put(target, data)`, and `post(target, data)`, where target is based on the `/3492` part of the path. @@ -40,7 +40,7 @@ export class MyExternalData extends Resource { // send the data into the external source } delete(target) { - // delete an entity in the external data source + // delete an entity in the external data source } subscribe(subscription) { // if the external data source is capable of real-time notification of changes, can subscribe @@ -149,7 +149,6 @@ If `get` is called for a single record (for a request like `/Table/some-id`), th This performs a query on this resource or table. By default, this is called by `get(query)` from a collection resource. When this is called for the root resource (like `/Table/`) it searches through all records in the table. You can define or override this method to define how records should be queried. The default `search` method on tables (`super.search(query)`) will perform a query and return an `AsyncIterable` of results. The `query` object can be used to specify the desired query. - ### `put(target: RequestTarget | Id, data: object): void|Response` This will assign the provided record or data to this resource, and is called for HTTP PUT requests. You can define or override this method to define how records should be updated. The default `put` method on tables (`super.put(target, data)`) writes the record to the table (updating or inserting depending on if the record previously existed) as part of the current transaction for the resource instance. @@ -163,6 +162,7 @@ This will update the existing record with the provided data's properties, and is ### `update(target: RequestTarget, updates?: object): Updatable` This can be called to get an Updatable class for updating a record. An `Updatable` instance provides direct access to record properties as properties on `Updatable` instance. The properties can also be modified and any changes are tracked and written to the record when the transaction commits. For example, if we wanted to update the quantify of a product in the Product table, in response to a post, we could write: + ```javascript class ... { post(target, data) { @@ -172,12 +172,15 @@ class ... { } } ``` -In addition, the `Updatable` class has the following methods. + +In addition, the `Updatable` class has the following methods. + ### `Updatable` class - + #### `addTo(property, value)` This adds the provided value to the specified property using conflict-free data type (CRDT) incrementation. This ensures that even if multiple calls are simultaneously made to increment a value, the resulting merge of data changes from different threads and nodes will properly sum all the added values. We could improve the example above to reliably ensure the quantity is decremented even when it occurs in multiple nodes simultaneously: + ```javascript class ... { static loadAsInstance = false; @@ -187,7 +190,9 @@ class ... { } } ``` + #### `subtractFrom(property, value)` + This functions exactly the same as `addTo`, except it subtracts the value. The `Updatable` also inherits the `getUpdatedTime` and `getExpiresAt` methods from the `RecordObject` class. @@ -216,10 +221,10 @@ The returned (promise resolves to) Subscription object is an `AsyncIterable` tha The `SubscriptionRequest` object supports the following properties (all optional): -* `includeDescendants` - If this is enabled, this will create a subscription to all the record updates/messages that are prefixed with the id. For example, a subscription request of `{id:'sub', includeDescendants: true}` would return events for any update with an id/topic of the form sub/\* (like `sub/1`). -* `startTime` - This will begin the subscription at a past point in time, returning all updates/messages since the start time (a catch-up of historical messages). This can be used to resume a subscription, getting all messages since the last subscription. -* `previousCount` - This specifies the number of previous updates/messages to deliver. For example, `previousCount: 10` would return the last ten messages. Note that `previousCount` can not be used in conjunction with `startTime`. -* `omitCurrent` - Indicates that the current (or retained) record should _not_ be immediately sent as the first update in the subscription (if no `startTime` or `previousCount` was used). By default, the current record is sent as the first update. +- `includeDescendants` - If this is enabled, this will create a subscription to all the record updates/messages that are prefixed with the id. For example, a subscription request of `{id:'sub', includeDescendants: true}` would return events for any update with an id/topic of the form sub/\* (like `sub/1`). +- `startTime` - This will begin the subscription at a past point in time, returning all updates/messages since the start time (a catch-up of historical messages). This can be used to resume a subscription, getting all messages since the last subscription. +- `previousCount` - This specifies the number of previous updates/messages to deliver. For example, `previousCount: 10` would return the last ten messages. Note that `previousCount` can not be used in conjunction with `startTime`. +- `omitCurrent` - Indicates that the current (or retained) record should _not_ be immediately sent as the first update in the subscription (if no `startTime` or `previousCount` was used). By default, the current record is sent as the first update. ### `connect(target: RequestTarget, incomingMessages?: AsyncIterable): AsyncIterable` @@ -241,25 +246,25 @@ Returns the context for this resource. The context contains information about th The `Context` object has the following (potential) properties: -* `user` - This is the user object, which includes information about the username, role, and authorizations. -* `transaction` - The current transaction If the current method was triggered by an HTTP request, the following properties are available: -* `lastModified` - This value is used to indicate the last modified or updated timestamp of any resource(s) that are accessed and will inform the response's `ETag` (or `Last-Modified`) header. This can be updated by application code if it knows that modification should cause this timestamp to be updated. +- `user` - This is the user object, which includes information about the username, role, and authorizations. +- `transaction` - The current transaction If the current method was triggered by an HTTP request, the following properties are available: +- `lastModified` - This value is used to indicate the last modified or updated timestamp of any resource(s) that are accessed and will inform the response's `ETag` (or `Last-Modified`) header. This can be updated by application code if it knows that modification should cause this timestamp to be updated. When a resource gets a request through HTTP, the request object is the context, which has the following properties: -* `url` - The local path/URL of the request (this will not include the protocol or host name, but will start at the path and includes the query string). -* `method` - The method of the HTTP request. -* `headers` - This is an object with the headers that were included in the HTTP request. You can access headers by calling `context.headers.get(headerName)`. -* `responseHeaders` - This is an object with the headers that will be included in the HTTP response. You can set headers by calling `context.responseHeaders.set(headerName, value)`. -* `pathname` - This provides the path part of the URL (no querystring). -* `host` - This provides the host name of the request (from the `Host` header). -* `ip` - This provides the ip address of the client that made the request. -* `body` - This is the request body as a raw NodeJS Readable stream, if there is a request body. -* `data` - If the HTTP request had a request body, this provides a promise to the deserialized data from the request body. (Note that for methods that normally have a request body like `POST` and `PUT`, the resolved deserialized data is passed in as the main argument, but accessing the data from the context provides access to this for requests that do not traditionally have a request body like `DELETE`). +- `url` - The local path/URL of the request (this will not include the protocol or host name, but will start at the path and includes the query string). +- `method` - The method of the HTTP request. +- `headers` - This is an object with the headers that were included in the HTTP request. You can access headers by calling `context.headers.get(headerName)`. +- `responseHeaders` - This is an object with the headers that will be included in the HTTP response. You can set headers by calling `context.responseHeaders.set(headerName, value)`. +- `pathname` - This provides the path part of the URL (no querystring). +- `host` - This provides the host name of the request (from the `Host` header). +- `ip` - This provides the ip address of the client that made the request. +- `body` - This is the request body as a raw NodeJS Readable stream, if there is a request body. +- `data` - If the HTTP request had a request body, this provides a promise to the deserialized data from the request body. (Note that for methods that normally have a request body like `POST` and `PUT`, the resolved deserialized data is passed in as the main argument, but accessing the data from the context provides access to this for requests that do not traditionally have a request body like `DELETE`). When a resource is accessed as a data source: -* `requestContext` - For resources that are acting as a data source for another resource, this provides access to the context of the resource that is making a request for data from the data source resource. Note that it is generally not recommended to rely on this context. The resolved data may be used fulfilled many different requests, and relying on this first request context may not be representative of future requests. Also, source resolution may be triggered by various actions, not just specified endpoints (for example queries, operations, studio, etc.), so make sure you are not relying on specific request context information. +- `requestContext` - For resources that are acting as a data source for another resource, this provides access to the context of the resource that is making a request for data from the data source resource. Note that it is generally not recommended to rely on this context. The resolved data may be used fulfilled many different requests, and relying on this first request context may not be representative of future requests. Also, source resolution may be triggered by various actions, not just specified endpoints (for example queries, operations, studio, etc.), so make sure you are not relying on specific request context information. ### `operation(operationObject: Object, authorize?: boolean): Promise` @@ -280,7 +285,7 @@ The `get`, `put`, `delete`, `publish`, `subscribe`, and `connect` methods all ha This will retrieve a resource instance by id. For example, if you want to retrieve comments by id in the retrieval of a blog post you could do: ```javascript -const { MyTable, Comment } = tables; +const { MyTable, Comment } = tables; ... // in class: async get() { @@ -294,7 +299,7 @@ const { MyTable, Comment } = tables; Type definition for `Id`: ```typescript -Id = string|number|array +Id = string | number | array; ``` ### `get(query: Query, context?: Resource|Context)` @@ -381,28 +386,33 @@ There are additional methods that are only available on table classes (which are This defines the source for a table. This allows a table to function as a cache for an external resource. When a table is configured to have a source, any request for a record that is not found in the table will be delegated to the source resource to retrieve (via `get`) and the result will be cached/stored in the table. All writes to the table will also first be delegated to the source (if the source defines write functions like `put`, `delete`, etc.). The `options` parameter can include an `expiration` property that will configure the table with a time-to-live expiration window for automatic deletion or invalidation of older entries. The `options` parameter (also) supports: -* `expiration` - Default expiration time for records in seconds. -* `eviction` - Eviction time for records in seconds. -* `scanInterval` - Time period for scanning the table for records to evict. +- `expiration` - Default expiration time for records in seconds. +- `eviction` - Eviction time for records in seconds. +- `scanInterval` - Time period for scanning the table for records to evict. If the source resource implements subscription support, real-time invalidation can be performed to ensure the cache is guaranteed to be fresh (and this can eliminate or reduce the need for time-based expiration of data). ### `directURLMapping` + This property can be set to force the direct URL request target to be mapped to the resource primary key. Normally, URL resource targets are parsed, where the path is mapped to the primary key of the resource (and decoded using standard URL decoding), and any query string parameters are used to query that resource. But if this is turned on, the full URL is used as the primary key. For example: + ```javascript export class MyTable extends tables.MyTable { static directURLMapping = true; } ``` + ```http request GET /MyTable/test?foo=bar ``` + This will be mapped to the resource with a primary key of `test?foo=bar`, and no querying will be performed on that resource. ### `getRecordCount({ exactCount: boolean })` + This will return the number of records in the table. By default, this will return an approximate count of records, which is fast and efficient. If you want an exact count, you can pass `{ exactCount: true }` as the first argument, but this will be slower and more expensive. The return value will be a Promise that resolves to an object with a `recordCount` property, which is the number of records in the table. If this was not an exact count, it will also include `estimatedRange` array with estimate range of the count. -```javascript +````javascript ### `parsePath(path, context, query) {` @@ -412,7 +422,7 @@ This is called by static methods when they are responding to a URL (from HTTP re static parsePath(path) { return path; // return the path as the id } -``` +```` ### getRecordCount: Promise<{} @@ -436,10 +446,10 @@ const { Comment } = tables; export class BlogPost extends tables.BlogPost { post(comment) { // add a comment record to the comment table, using this resource as the source for the context - Comment.put(comment, this); + Comment.put(comment, this); this.comments.push(comment.id); // add the id for the record to our array of comment ids // Both of these actions will be committed atomically as part of the same transaction - } + } } ``` @@ -453,22 +463,27 @@ The `get`/`search` methods accept a Query object that can be used to specify a q This is an array of objects that specify the conditions to use the match records (if conditions are omitted or it is an empty array, this is a search for everything in the table). Each condition object can have the following properties: -* `attribute`: Name of the property/attribute to match on. -* `value`: The value to match. -* `comparator`: This can specify how the value is compared. This defaults to "equals", but can also be "greater\_than", "greater\_than\_equal", "less\_than", "less\_than\_equal", "starts\_with", "contains", "ends\_with", "between", and "not\_equal". -* `conditions`: An array of conditions, which follows the same structure as above. -* `operator`: Specifies the operator to apply to this set of conditions (`and` or `or`. This is optional and defaults to `and`). For example, a complex query might look like: +- `attribute`: Name of the property/attribute to match on. +- `value`: The value to match. +- `comparator`: This can specify how the value is compared. This defaults to "equals", but can also be "greater_than", "greater_than_equal", "less_than", "less_than_equal", "starts_with", "contains", "ends_with", "between", and "not_equal". +- `conditions`: An array of conditions, which follows the same structure as above. +- `operator`: Specifies the operator to apply to this set of conditions (`and` or `or`. This is optional and defaults to `and`). For example, a complex query might look like: For example, a more complex query might look like: ```javascript -Table.search({ conditions: [ - { attribute: 'price', comparator: 'less_than', value: 100 }, - { operator: 'or', conditions: [ - { attribute: 'rating', comparator: 'greater_than', value: 4 }, - { attribute: 'featured', value: true } - ]} -]}); +Table.search({ + conditions: [ + { attribute: 'price', comparator: 'less_than', value: 100 }, + { + operator: 'or', + conditions: [ + { attribute: 'rating', comparator: 'greater_than', value: 4 }, + { attribute: 'featured', value: true }, + ], + }, + ], +}); ``` **Chained Attributes/Properties** @@ -476,9 +491,7 @@ Table.search({ conditions: [ Chained attribute/property references can be used to search on properties within related records that are referenced by [relationship properties](../../developers/applications/defining-schemas.md) (in addition to the [schema documentation](../../developers/applications/defining-schemas.md), see the [REST documentation](../../developers/rest.md) for more of overview of relationships and querying). Chained property references are specified with an array, with each entry in the array being a property name for successive property references. For example, if a relationship property called `brand` has been defined that references a `Brand` table, we could search products by brand name: ```javascript -Product.search({ conditions: [ - { attribute: ['brand', 'name'], value: 'Harper' } -]}); +Product.search({ conditions: [{ attribute: ['brand', 'name'], value: 'Harper' }] }); ``` This effectively executes a join, searching on the `Brand` table and joining results with matching records in the `Product` table. Chained array properties can be used in any condition, as well nested/grouped conditions. The chain of properties may also be more than two entries, allowing for multiple relationships to be traversed, effectively joining across multiple tables. An array of chained properties can also be used as the `attribute` in the `sort` property, allowing for sorting by an attribute in a referenced joined tables. @@ -511,8 +524,8 @@ Table.search({ select: [ 'name', { name: 'related', select: ['description', 'id' The select properties can also include certain special properties: -* `$id` - This will specifically return the primary key of the record (regardless of name, even if there is no defined primary key attribute for the table). -* `$updatedtime` - This will return the last updated timestamp/version of the record (regardless of whether there is an attribute for the updated time). +- `$id` - This will specifically return the primary key of the record (regardless of name, even if there is no defined primary key attribute for the table). +- `$updatedtime` - This will return the last updated timestamp/version of the record (regardless of whether there is an attribute for the updated time). Alternately, the select value can be a string value, to specify that the value of the specified property should be returned for each iteration/element in the results. For example to just return an iterator of the `id`s of object: @@ -524,9 +537,9 @@ Table.search({ select: 'id', conditions: ...}) This defines the sort order, and should be an object that can have the following properties: -* `attributes`: The attribute to sort on. -* `descending`: If true, will sort in descending order (optional and defaults to `false`). -* `next`: Specifies the next sort order to resolve ties. This is an object that follows the same structure as `sort`. +- `attributes`: The attribute to sort on. +- `descending`: If true, will sort in descending order (optional and defaults to `false`). +- `next`: Specifies the next sort order to resolve ties. This is an object that follows the same structure as `sort`. #### `explain` @@ -550,8 +563,8 @@ let results = Product.search({ offset: 20, limit: 10, select: ['id', 'name', 'price', 'rating'], - sort: { attribute: 'price' } -}) + sort: { attribute: 'price' }, +}); for await (let record of results) { // iterate through each record in the query results } @@ -559,45 +572,50 @@ for await (let record of results) { `AsyncIterable`s can be returned from resource methods, and will be properly serialized in responses. When a query is performed, this will open/reserve a read transaction until the query results are iterated, either through your own `for await` loop or through serialization. Failing to iterate the results this will result in a long-lived read transaction which can degrade performance (including write performance), and may eventually be aborted. - ### `RequestTarget` + The `RequestTarget` class is used to represent a URL path that can be mapped to a resource. This is used by the REST interface to map a URL path to a resource class. All REST methods are called with a `RequestTarget` as the first argument, which is used to determine which record or entry to access or modify. Methods on a `Resource` class can be called with a primary key as a string or number value as the first argument, to access or modify a record by primary key, which will work with all the default methods. The static methods will be transform the primary key to a `RequestTarget` instance to call the instance methods for argument normalization. When RequestTarget is constructed with a URL path (from the REST methods). The static methods will also automatically parse the path to a `RequestTarget` instance, including parsing the search string into query parameters. Below are the properties and methods of the `RequestTarget` class: -* `pathname` - The path of the URL relative to the resource path that matched this request. This excluded the query/search string -* `toString()` - The full relative path and search string of the URL -* `search` - The search/query part the target path (the part after the first `?` character) -* `id` - The primary key of the resource, as determined by the path -* `checkPermission` - This property is set to an object indicating that a permission check should be performed on the +- `pathname` - The path of the URL relative to the resource path that matched this request. This excluded the query/search string +- `toString()` - The full relative path and search string of the URL +- `search` - The search/query part the target path (the part after the first `?` character) +- `id` - The primary key of the resource, as determined by the path +- `checkPermission` - This property is set to an object indicating that a permission check should be performed on the resource. This is used by the REST interface to determine if a user has permission to access the resource. The object contains: - * `action` - The type of action being performed (read/write/delete) - * `resource` - The resource being accessed - * `user` - The user requesting access + - `action` - The type of action being performed (read/write/delete) + - `resource` - The resource being accessed + - `user` - The user requesting access `RequestTarget` is subclass of `URLSearchParams`, and these methods are available for accessing and modifying the query parameters: -* `get(name: string)` - Get the value of the query parameter with the specified name -* `getAll(name: string)` - Get all the values of the query parameter with the specified name -* `set(name: string, value: string)` - Set the value of the query parameter with the specified name -* `append(name: string, value: string)` - Append the value to the query parameter with the specified name -* `delete(name: string)` - Delete the query parameter with the specified name -* `has(name: string)` - Check if the query parameter with the specified name exists + +- `get(name: string)` - Get the value of the query parameter with the specified name +- `getAll(name: string)` - Get all the values of the query parameter with the specified name +- `set(name: string, value: string)` - Set the value of the query parameter with the specified name +- `append(name: string, value: string)` - Append the value to the query parameter with the specified name +- `delete(name: string)` - Delete the query parameter with the specified name +- `has(name: string)` - Check if the query parameter with the specified name exists In addition, the `RequestTarget` class is an iterable, so you can iterate through the query parameters: -* `for (let [name, value] of target)` - Iterate through the query parameters + +- `for (let [name, value] of target)` - Iterate through the query parameters When a `RequestTarget` has query parameters using Harper's extended query syntax, the REST static methods will parse the `RequestTarget` and potentially add any of the following properties if they are present in the query: -* `conditions` - An array of conditions that will be used to filter the query results -* `limit` - The limit of the number of records to return -* `offset` - The number of records to skip before returning the results -* `sort` - The sort order of the query results -* `select` - The properties to return in the query results + +- `conditions` - An array of conditions that will be used to filter the query results +- `limit` - The limit of the number of records to return +- `offset` - The number of records to skip before returning the results +- `sort` - The sort order of the query results +- `select` - The properties to return in the query results ### `RecordObject` + The `get` method will return a `RecordObject` instance, which is an object containing all the properties of the record. Any property on the record can be directly accessed and the properties can be enumerated with standard JS capabilities like `for`-`in` and `Object.keys`. The `RecordObject` instance will also have the following methods: -* `getUpdatedTime()` - Get the last updated time (the version number) of the record -* `getExpiresAt()` - Get the expiration time of the entry, if there is one. + +- `getUpdatedTime()` - Get the last updated time (the version number) of the record +- `getExpiresAt()` - Get the expiration time of the entry, if there is one. ### Interacting with the Resource Data Model @@ -620,7 +638,7 @@ export class CustomProduct extends Product { let record = await super.get(target); let name = record.name; // this is the name of the current product let rating = record.rating; // this is the rating of the current product - // we can't directly modify the record (it is frozen), but we can copy if we want to return a modification + // we can't directly modify the record (it is frozen), but we can copy if we want to return a modification record = { ...record, rating: 3 }; return record; } @@ -634,7 +652,7 @@ let product1 = await Product.get(1); let name = product1.name; // this is the name of the product with a primary key of 1 let rating = product1.rating; // this is the rating of the product with a primary key of 1 // if we want to update a single property: -await Product.patch(1, { rating: 3}); +await Product.patch(1, { rating: 3 }); ``` When running inside a transaction, we can use the `update` method and updates are automatically saved when a request completes: diff --git a/docs/technical-details/reference/resources/instance-binding.md b/docs/technical-details/reference/resources/instance-binding.md index 74fe89e2..16a34c74 100644 --- a/docs/technical-details/reference/resources/instance-binding.md +++ b/docs/technical-details/reference/resources/instance-binding.md @@ -1,6 +1,7 @@ # Resource Class with Resource Instance Binding behavior This document describes the legacy instance binding behavior of the Resource class. It is recommended that you use the [updated behavior of the Resource API](./resource.md) instead, but this legacy API is preserved for backwards compatibility. + ## Resource Class ```javascript @@ -15,7 +16,7 @@ export class MyExternalData extends Resource { // send the data into the external source } delete() { - // delete an entity in the external data source + // delete an entity in the external data source } subscribe(options) { // if the external data source is capable of real-time notification of changes, can subscribe @@ -41,7 +42,7 @@ export class MyTable extends tables.MyTable { super.put(data); } delete() { - super.delete(); + super.delete(); } post(data) { // providing a post handler (for HTTP POST requests) is a common way to create additional @@ -177,10 +178,10 @@ The returned (promise resolves to) Subscription object is an `AsyncIterable` tha The `SubscriptionRequest` object supports the following properties (all optional): -* `includeDescendants` - If this is enabled, this will create a subscription to all the record updates/messages that are prefixed with the id. For example, a subscription request of `{id:'sub', includeDescendants: true}` would return events for any update with an id/topic of the form sub/\* (like `sub/1`). -* `startTime` - This will begin the subscription at a past point in time, returning all updates/messages since the start time (a catch-up of historical messages). This can be used to resume a subscription, getting all messages since the last subscription. -* `previousCount` - This specifies the number of previous updates/messages to deliver. For example, `previousCount: 10` would return the last ten messages. Note that `previousCount` can not be used in conjunction with `startTime`. -* `omitCurrent` - Indicates that the current (or retained) record should _not_ be immediately sent as the first update in the subscription (if no `startTime` or `previousCount` was used). By default, the current record is sent as the first update. +- `includeDescendants` - If this is enabled, this will create a subscription to all the record updates/messages that are prefixed with the id. For example, a subscription request of `{id:'sub', includeDescendants: true}` would return events for any update with an id/topic of the form sub/\* (like `sub/1`). +- `startTime` - This will begin the subscription at a past point in time, returning all updates/messages since the start time (a catch-up of historical messages). This can be used to resume a subscription, getting all messages since the last subscription. +- `previousCount` - This specifies the number of previous updates/messages to deliver. For example, `previousCount: 10` would return the last ten messages. Note that `previousCount` can not be used in conjunction with `startTime`. +- `omitCurrent` - Indicates that the current (or retained) record should _not_ be immediately sent as the first update in the subscription (if no `startTime` or `previousCount` was used). By default, the current record is sent as the first update. ### `connect(incomingMessages?: AsyncIterable, query?: Query): AsyncIterable` @@ -226,25 +227,25 @@ Returns the context for this resource. The context contains information about th The `Context` object has the following (potential) properties: -* `user` - This is the user object, which includes information about the username, role, and authorizations. -* `transaction` - The current transaction If the current method was triggered by an HTTP request, the following properties are available: -* `lastModified` - This value is used to indicate the last modified or updated timestamp of any resource(s) that are accessed and will inform the response's `ETag` (or `Last-Modified`) header. This can be updated by application code if it knows that modification should cause this timestamp to be updated. +- `user` - This is the user object, which includes information about the username, role, and authorizations. +- `transaction` - The current transaction If the current method was triggered by an HTTP request, the following properties are available: +- `lastModified` - This value is used to indicate the last modified or updated timestamp of any resource(s) that are accessed and will inform the response's `ETag` (or `Last-Modified`) header. This can be updated by application code if it knows that modification should cause this timestamp to be updated. When a resource gets a request through HTTP, the request object is the context, which has the following properties: -* `url` - The local path/URL of the request (this will not include the protocol or host name, but will start at the path and includes the query string). -* `method` - The method of the HTTP request. -* `headers` - This is an object with the headers that were included in the HTTP request. You can access headers by calling `context.headers.get(headerName)`. -* `responseHeaders` - This is an object with the headers that will be included in the HTTP response. You can set headers by calling `context.responseHeaders.set(headerName, value)`. -* `pathname` - This provides the path part of the URL (no querystring). -* `host` - This provides the host name of the request (from the `Host` header). -* `ip` - This provides the ip address of the client that made the request. -* `body` - This is the request body as a raw NodeJS Readable stream, if there is a request body. -* `data` - If the HTTP request had a request body, this provides a promise to the deserialized data from the request body. (Note that for methods that normally have a request body like `POST` and `PUT`, the resolved deserialized data is passed in as the main argument, but accessing the data from the context provides access to this for requests that do not traditionally have a request body like `DELETE`). +- `url` - The local path/URL of the request (this will not include the protocol or host name, but will start at the path and includes the query string). +- `method` - The method of the HTTP request. +- `headers` - This is an object with the headers that were included in the HTTP request. You can access headers by calling `context.headers.get(headerName)`. +- `responseHeaders` - This is an object with the headers that will be included in the HTTP response. You can set headers by calling `context.responseHeaders.set(headerName, value)`. +- `pathname` - This provides the path part of the URL (no querystring). +- `host` - This provides the host name of the request (from the `Host` header). +- `ip` - This provides the ip address of the client that made the request. +- `body` - This is the request body as a raw NodeJS Readable stream, if there is a request body. +- `data` - If the HTTP request had a request body, this provides a promise to the deserialized data from the request body. (Note that for methods that normally have a request body like `POST` and `PUT`, the resolved deserialized data is passed in as the main argument, but accessing the data from the context provides access to this for requests that do not traditionally have a request body like `DELETE`). When a resource is accessed as a data source: -* `requestContext` - For resources that are acting as a data source for another resource, this provides access to the context of the resource that is making a request for data from the data source resource. Note that it is generally not recommended to rely on this context. The resolved data may be used fulfilled many different requests, and relying on this first request context may not be representative of future requests. Also, source resolution may be triggered by various actions, not just specified endpoints (for example queries, operations, studio, etc.), so make sure you are not relying on specific request context information. +- `requestContext` - For resources that are acting as a data source for another resource, this provides access to the context of the resource that is making a request for data from the data source resource. Note that it is generally not recommended to rely on this context. The resolved data may be used fulfilled many different requests, and relying on this first request context may not be representative of future requests. Also, source resolution may be triggered by various actions, not just specified endpoints (for example queries, operations, studio, etc.), so make sure you are not relying on specific request context information. ### `operation(operationObject: Object, authorize?: boolean): Promise` @@ -265,7 +266,7 @@ The `get`, `put`, `delete`, `publish`, `subscribe`, and `connect` methods all ha This will retrieve a resource instance by id. For example, if you want to retrieve comments by id in the retrieval of a blog post you could do: ```javascript -const { MyTable, Comment } = tables; +const { MyTable, Comment } = tables; ... // in class: async get() { @@ -279,7 +280,7 @@ const { MyTable, Comment } = tables; Type definition for `Id`: ```typescript -Id = string|number|array +Id = string | number | array; ``` ### `get(query: Query, context?: Resource|Context)` @@ -372,28 +373,33 @@ There are additional methods that are only available on table classes (which are This defines the source for a table. This allows a table to function as a cache for an external resource. When a table is configured to have a source, any request for a record that is not found in the table will be delegated to the source resource to retrieve (via `get`) and the result will be cached/stored in the table. All writes to the table will also first be delegated to the source (if the source defines write functions like `put`, `delete`, etc.). The `options` parameter can include an `expiration` property that will configure the table with a time-to-live expiration window for automatic deletion or invalidation of older entries. The `options` parameter (also) supports: -* `expiration` - Default expiration time for records in seconds. -* `eviction` - Eviction time for records in seconds. -* `scanInterval` - Time period for scanning the table for records to evict. +- `expiration` - Default expiration time for records in seconds. +- `eviction` - Eviction time for records in seconds. +- `scanInterval` - Time period for scanning the table for records to evict. If the source resource implements subscription support, real-time invalidation can be performed to ensure the cache is guaranteed to be fresh (and this can eliminate or reduce the need for time-based expiration of data). ### `directURLMapping` + This property can be set to force the direct URL request target to be mapped to the resource primary key. Normally, URL resource targets are parsed, where the path is mapped to the primary key of the resource (and decoded using standard URL decoding), and any query string parameters are used to query that resource. But if this is turned on, the full URL is used as the primary key. For example: + ```javascript export class MyTable extends tables.MyTable { static directURLMapping = true; } ``` + ```http request GET /MyTable/test?foo=bar ``` + This will be mapped to the resource with a primary key of `test?foo=bar`, and no querying will be performed on that resource. ### `getRecordCount({ exactCount: boolean })` + This will return the number of records in the table. By default, this will return an approximate count of records, which is fast and efficient. If you want an exact count, you can pass `{ exactCount: true }` as the first argument, but this will be slower and more expensive. The return value will be a Promise that resolves to an object with a `recordCount` property, which is the number of records in the table. If this was not an exact count, it will also include `estimatedRange` array with estimate range of the count. -```javascript +````javascript ### `parsePath(path, context, query) {` @@ -403,7 +409,7 @@ This is called by static methods when they are responding to a URL (from HTTP re static parsePath(path) { return path; // return the path as the id } -``` +```` ### getRecordCount: Promise<{} @@ -427,10 +433,10 @@ const { Comment } = tables; export class BlogPost extends tables.BlogPost { post(comment) { // add a comment record to the comment table, using this resource as the source for the context - Comment.put(comment, this); + Comment.put(comment, this); this.comments.push(comment.id); // add the id for the record to our array of comment ids // Both of these actions will be committed atomically as part of the same transaction - } + } } ``` @@ -444,22 +450,27 @@ The `get`/`search` methods accept a Query object that can be used to specify a q This is an array of objects that specify the conditions to use the match records (if conditions are omitted or it is an empty array, this is a search for everything in the table). Each condition object can have the following properties: -* `attribute`: Name of the property/attribute to match on. -* `value`: The value to match. -* `comparator`: This can specify how the value is compared. This defaults to "equals", but can also be "greater\_than", "greater\_than\_equal", "less\_than", "less\_than\_equal", "starts\_with", "contains", "ends\_with", "between", and "not\_equal". -* `conditions`: An array of conditions, which follows the same structure as above. -* `operator`: Specifies the operator to apply to this set of conditions (`and` or `or`. This is optional and defaults to `and`). For example, a complex query might look like: +- `attribute`: Name of the property/attribute to match on. +- `value`: The value to match. +- `comparator`: This can specify how the value is compared. This defaults to "equals", but can also be "greater_than", "greater_than_equal", "less_than", "less_than_equal", "starts_with", "contains", "ends_with", "between", and "not_equal". +- `conditions`: An array of conditions, which follows the same structure as above. +- `operator`: Specifies the operator to apply to this set of conditions (`and` or `or`. This is optional and defaults to `and`). For example, a complex query might look like: For example, a more complex query might look like: ```javascript -Table.search({ conditions: [ - { attribute: 'price', comparator: 'less_than', value: 100 }, - { operator: 'or', conditions: [ - { attribute: 'rating', comparator: 'greater_than', value: 4 }, - { attribute: 'featured', value: true } - ]} -]}); +Table.search({ + conditions: [ + { attribute: 'price', comparator: 'less_than', value: 100 }, + { + operator: 'or', + conditions: [ + { attribute: 'rating', comparator: 'greater_than', value: 4 }, + { attribute: 'featured', value: true }, + ], + }, + ], +}); ``` **Chained Attributes/Properties** @@ -467,9 +478,7 @@ Table.search({ conditions: [ Chained attribute/property references can be used to search on properties within related records that are referenced by [relationship properties](../../developers/applications/defining-schemas.md) (in addition to the [schema documentation](../../developers/applications/defining-schemas.md), see the [REST documentation](../../developers/rest.md) for more of overview of relationships and querying). Chained property references are specified with an array, with each entry in the array being a property name for successive property references. For example, if a relationship property called `brand` has been defined that references a `Brand` table, we could search products by brand name: ```javascript -Product.search({ conditions: [ - { attribute: ['brand', 'name'], value: 'Harper' } -]}); +Product.search({ conditions: [{ attribute: ['brand', 'name'], value: 'Harper' }] }); ``` This effectively executes a join, searching on the `Brand` table and joining results with matching records in the `Product` table. Chained array properties can be used in any condition, as well nested/grouped conditions. The chain of properties may also be more than two entries, allowing for multiple relationships to be traversed, effectively joining across multiple tables. An array of chained properties can also be used as the `attribute` in the `sort` property, allowing for sorting by an attribute in a referenced joined tables. @@ -502,8 +511,8 @@ Table.search({ select: [ 'name', { name: 'related', select: ['description', 'id' The select properties can also include certain special properties: -* `$id` - This will specifically return the primary key of the record (regardless of name, even if there is no defined primary key attribute for the table). -* `$updatedtime` - This will return the last updated timestamp/version of the record (regardless of whether there is an attribute for the updated time). +- `$id` - This will specifically return the primary key of the record (regardless of name, even if there is no defined primary key attribute for the table). +- `$updatedtime` - This will return the last updated timestamp/version of the record (regardless of whether there is an attribute for the updated time). Alternately, the select value can be a string value, to specify that the value of the specified property should be returned for each iteration/element in the results. For example to just return an iterator of the `id`s of object: @@ -515,9 +524,9 @@ Table.search({ select: 'id', conditions: ...}) This defines the sort order, and should be an object that can have the following properties: -* `attributes`: The attribute to sort on. -* `descending`: If true, will sort in descending order (optional and defaults to `false`). -* `next`: Specifies the next sort order to resolve ties. This is an object that follows the same structure as `sort`. +- `attributes`: The attribute to sort on. +- `descending`: If true, will sort in descending order (optional and defaults to `false`). +- `next`: Specifies the next sort order to resolve ties. This is an object that follows the same structure as `sort`. #### `explain` @@ -541,8 +550,8 @@ let results = Product.search({ offset: 20, limit: 10, select: ['id', 'name', 'price', 'rating'], - sort: { attribute: 'price' } -}) + sort: { attribute: 'price' }, +}); for await (let record of results) { // iterate through each record in the query results } @@ -570,7 +579,7 @@ export class CustomProduct extends Product { get(query) { let name = this.name; // this is the name of the current product let rating = this.rating; // this is the rating of the current product - this.rating = 3 // we can also modify the rating for the current instance + this.rating = 3; // we can also modify the rating for the current instance // (with a get this won't be saved by default, but will be used when serialized) return super.get(query); } @@ -583,8 +592,7 @@ Likewise, we can interact with resource instances in the same way when retrievin let product1 = await Product.get(1); let name = product1.name; // this is the name of the product with a primary key of 1 let rating = product1.rating; // this is the rating of the product with a primary key of 1 -product1.rating = 3 // modify the rating for this instance (this will be saved without a call to update()) - +product1.rating = 3; // modify the rating for this instance (this will be saved without a call to update()) ``` If there are additional properties on (some) products that aren't defined in the schema, we can still access them through the resource instance, but since they aren't declared, there won't be getter/setter definition for direct property access, but we can access properties with the `get(propertyName)` method and modify properties with the `set(propertyName, value)` method: @@ -592,7 +600,7 @@ If there are additional properties on (some) products that aren't defined in the ```javascript let product1 = await Product.get(1); let additionalInformation = product1.get('additionalInformation'); // get the additionalInformation property value even though it isn't defined in the schema -product1.set('newProperty', 'some value'); // we can assign any properties we want with set +product1.set('newProperty', 'some value'); // we can assign any properties we want with set ``` And likewise, we can do this in an instance method, although you will probably want to use super.get()/set() so you don't have to write extra logic to avoid recursion: @@ -601,7 +609,7 @@ And likewise, we can do this in an instance method, although you will probably w export class CustomProduct extends Product { get(query) { let additionalInformation = super.get('additionalInformation'); // get the additionalInformation property value even though it isn't defined in the schema - super.set('newProperty', 'some value'); // we can assign any properties we want with set + super.set('newProperty', 'some value'); // we can assign any properties we want with set } } ``` @@ -708,4 +716,4 @@ if (notAuthorized()) { error.statusCode = 403; throw error; } -``` \ No newline at end of file +``` diff --git a/docs/technical-details/reference/resources/migration.md b/docs/technical-details/reference/resources/migration.md index 0ec1426b..c7d66aa1 100644 --- a/docs/technical-details/reference/resources/migration.md +++ b/docs/technical-details/reference/resources/migration.md @@ -3,26 +3,29 @@ The Resource API was inspired by two major design ideas: the REST architectural design and the [Active Record pattern](https://en.wikipedia.org/wiki/Active_record_pattern) (made popular by Ruby on Rails and heavily used as a pattern in many ORMs). The basic design goal of the Resource API is to integrate these concepts into a single construct that can directly map RESTful methods (specifically the "uniform interface" of HTTP) to an active record data model. However, while the active record pattern has been for _consumption_ of data, implementing methods for endpoint definitions and caching sources as a data _provider_ can be confusing and cumbersome to implement. The updated non-instance binding Resource API is designed to make it easier and more consistent to implement a data provider and interact with records across a table, while maintaining more explicit control over what data is loaded and when. The updated Resource API is enabled on a per-class basis by setting static `loadAsInstance` property to `false`. When this property is set to `false`, this means that the Resource instances will not be bound to a specific record. Instead instances represent the whole table, capturing the context and current transactional state. Any records in the table can be loaded or modified from `this` instance. There are a number of implications and different behaviors from a Resource class with `static loadAsInstance = false`: -* The `get` method (both static and instance) will directly return the record, a frozen enumerable object with direct properties, instead of a Resource instance. -* When instance methods are called, there will not be any record preloaded beforehand and the resource instance will not have properties mapped to a record. -* All instance methods accept a `target`, an instance of `RequestTarget`, as the first argument, which identifies the target record or query. - * The `target` will have an `id` property identifying the target resource, along with target information. - * The `getId()` method is no longer used and will return `undefined`. - * The `target` will provide access to query parameters, search operators, and other directives. - * A `target` property of `checkPermission` indicates that a method should check the permission before of request before proceeding. The default instance methods provide the default authorization behavior. - * This supplants the need for `allowRead`, `allowUpdate`, `allowCreate`, and `allowDelete` methods, which shouldn't need to be used (and don't provide the id of the target record). -* Any data from a POST, PUT, and PATCH request will be available in the second argument. This reverses the order of the arguments to `put`, `post`, and `patch` compared to the legacy Resource API. -* Context is tracked using asynchronous context tracking, and will automatically be available to calls to other resources. This can be disabled by setting `static explicitContext = true`, which can improve performance. -* The `update` method will return an `Updatable` object (instead of a Resource instance), which provides properties mapped to a record, but these properties can be updated and changes will be saved when the transaction is committed. + +- The `get` method (both static and instance) will directly return the record, a frozen enumerable object with direct properties, instead of a Resource instance. +- When instance methods are called, there will not be any record preloaded beforehand and the resource instance will not have properties mapped to a record. +- All instance methods accept a `target`, an instance of `RequestTarget`, as the first argument, which identifies the target record or query. + - The `target` will have an `id` property identifying the target resource, along with target information. + - The `getId()` method is no longer used and will return `undefined`. + - The `target` will provide access to query parameters, search operators, and other directives. + - A `target` property of `checkPermission` indicates that a method should check the permission before of request before proceeding. The default instance methods provide the default authorization behavior. + - This supplants the need for `allowRead`, `allowUpdate`, `allowCreate`, and `allowDelete` methods, which shouldn't need to be used (and don't provide the id of the target record). +- Any data from a POST, PUT, and PATCH request will be available in the second argument. This reverses the order of the arguments to `put`, `post`, and `patch` compared to the legacy Resource API. +- Context is tracked using asynchronous context tracking, and will automatically be available to calls to other resources. This can be disabled by setting `static explicitContext = true`, which can improve performance. +- The `update` method will return an `Updatable` object (instead of a Resource instance), which provides properties mapped to a record, but these properties can be updated and changes will be saved when the transaction is committed. The following are examples of how to migrate to the non-instance binding Resource API. Previous code with a `get` method: + ```javascript export class MyData extends tables.MyData { async get(query) { let id = this.getId(); // get the id - if (query?.size > 0) { // check number of query parameters + if (query?.size > 0) { + // check number of query parameters let idWithQuery = id + query.toString(); // add query parameters let resource = await tables.MyData.get(idWithQuery, this); // retrieve another record resource.newProperty = 'value'; // assign a new value to the returned resource instance @@ -34,14 +37,17 @@ export class MyData extends tables.MyData { } } ``` + Updated code: + ```javascript export class MyData extends tables.MyData { static loadAsInstance = false; // opt in to updated behavior async get(target) { let id = target.id; // get the id let record; - if (target.size > 0) { // check number of query parameters + if (target.size > 0) { + // check number of query parameters let idWithQuery = target.toString(); // this is the full target with the path query parameters // we can retrieve another record from this table directly with this.get/super.get or with tables.MyData.get record = await super.get(idWithQuery); @@ -53,8 +59,10 @@ export class MyData extends tables.MyData { } } ``` -Here is an example of the preferred approach for authorization: + +Here is an example of the preferred approach for authorization: Previous code with a `get` method: + ```javascript export class MyData extends tables.MyData { allowRead(user) { @@ -67,6 +75,7 @@ export class MyData extends tables.MyData { } } ``` + ```javascript export class MyData extends tables.MyData { static loadAsInstance = false; // opt in to updated behavior @@ -83,36 +92,42 @@ export class MyData extends tables.MyData { Here is an example of how to convert/upgrade an implementation of a `post` method: Previous code with a `post` method: + ```javascript export class MyData extends tables.MyData { async post(data, query) { let resource = await tables.MyData.get(data.id, this); - if (resource) { // update a property + if (resource) { + // update a property resource.someProperty = 'value'; // or tables.MyData.patch(data.id, { someProperty: 'value' }, this); - } else { // create a new record + } else { + // create a new record MyData.create(data, this); } } } - ``` + Updated code: + ```javascript export class MyData extends tables.MyData { static loadAsInstance = false; // opt in to updated behavior // IMPORTANT: arguments are reversed: async post(target, data) { let record = await this.get(data.id); - if (record) { // update a property + if (record) { + // update a property const updatable = await this.update(data.id); // we can alternately pass a target to update updatable.someProperty = 'value'; // or this.patch(data.id, { someProperty: 'value' }); - } else { // create a new record + } else { + // create a new record this.create(data); } } } -``` \ No newline at end of file +``` diff --git a/docs/technical-details/reference/transactions.md b/docs/technical-details/reference/transactions.md index ee880dc4..313fe17f 100644 --- a/docs/technical-details/reference/transactions.md +++ b/docs/technical-details/reference/transactions.md @@ -14,7 +14,7 @@ This executes the callback in a transaction, providing a context that can be use ```javascript import { tables } from 'harperdb'; -const { MyTable } = tables; +const { MyTable } = tables; if (isMainThread) // only on main thread setInterval(async () => { let someData = await (await fetch(... some URL ...)).json(); @@ -30,7 +30,7 @@ You can provide your own context object for the transaction to attach to. If you Once the transaction callback is completed (for non-nested transaction calls), the transaction will commit, and if the callback throws an error, the transaction will abort. However, the callback is called with the `transaction` object, which also provides the following methods and property: -* `commit(): Promise` - Commits the current transaction. The transaction will be committed once the returned promise resolves. -* `abort(): void` - Aborts the current transaction and resets it. -* `resetReadSnapshot(): void` - Resets the read snapshot for the transaction, resetting to the latest data in the database. -* `timestamp: number` - This is the timestamp associated with the current transaction. +- `commit(): Promise` - Commits the current transaction. The transaction will be committed once the returned promise resolves. +- `abort(): void` - Aborts the current transaction and resets it. +- `resetReadSnapshot(): void` - Resets the read snapshot for the transaction, resetting to the latest data in the database. +- `timestamp: number` - This is the timestamp associated with the current transaction. diff --git a/docs/technical-details/release-notes/1.alby/1.1.0.md b/docs/technical-details/release-notes/1.alby/1.1.0.md index 7c15d913..d3b6a431 100644 --- a/docs/technical-details/release-notes/1.alby/1.1.0.md +++ b/docs/technical-details/release-notes/1.alby/1.1.0.md @@ -1,72 +1,67 @@ ### HarperDB 1.1.0, Alby Release + 4/18/2018 **Features** -* Users & Roles: +- Users & Roles: + - Limit/Assign access to all HarperDB operations + + - Limit/Assign access to schemas, tables & attributes + + - Limit/Assign access to specific SQL operations (`INSERT`, `UPDATE`, `DELETE`, `SELECT`) - * Limit/Assign access to all HarperDB operations +- Enhanced SQL parser + - Added extensive ANSI SQL Support. + - Added Array function, which allows for converting relational data into Object/Hierarchical data + - `Distinct_Array` Function: allows for removing duplicates in the Array function. + - Enhanced SQL Validation: Improved validation around structure of SQL, validating the schema, etc.. + - 10x performance improvement on SQL statements. - * Limit/Assign access to schemas, tables & attributes +- Export Function: can now call a NoSQL/SQL search and have it export to CSV or JSON. - * Limit/Assign access to specific SQL operations (`INSERT`, `UPDATE`, `DELETE`, `SELECT`) - -* Enhanced SQL parser - - * Added extensive ANSI SQL Support. - - * Added Array function, which allows for converting relational data into Object/Hierarchical data - - * `Distinct_Array` Function: allows for removing duplicates in the Array function. - - * Enhanced SQL Validation: Improved validation around structure of SQL, validating the schema, etc.. - - * 10x performance improvement on SQL statements. - -* Export Function: can now call a NoSQL/SQL search and have it export to CSV or JSON. +- Added upgrade function to CLI -* Added upgrade function to CLI +- Added ability to perform bulk update from CSV -* Added ability to perform bulk update from CSV +- Created landing page for HarperDB. -* Created landing page for HarperDB. +- Added CORS support to HarperDB -* Added CORS support to HarperDB - **Fixes** -* Fixed memory leak in CSV bulk loads +- Fixed memory leak in CSV bulk loads -* Corrected error when attempting to perform a `SQL DELETE` +- Corrected error when attempting to perform a `SQL DELETE` -* Added further validation to NoSQL `UPDATE` to validate schema & table exist +- Added further validation to NoSQL `UPDATE` to validate schema & table exist -* Fixed install issue occurring when part of the install path does not exist, the install would silently fail. +- Fixed install issue occurring when part of the install path does not exist, the install would silently fail. -* Fixed issues with replicated data when one of the replicas is down +- Fixed issues with replicated data when one of the replicas is down -* Removed logging of initial user’s credentials during install +- Removed logging of initial user’s credentials during install -* Can now use reserved words as aliases in SQL +- Can now use reserved words as aliases in SQL -* Removed user(s) password in results when calling `list_users` +- Removed user(s) password in results when calling `list_users` -* Corrected forwarding of operations to other nodes in a cluster +- Corrected forwarding of operations to other nodes in a cluster -* Corrected lag in schema meta-data passing to other nodes in a cluster +- Corrected lag in schema meta-data passing to other nodes in a cluster -* Drop table & schema now move the table & schema or table to the trash folder under the Database folder for later permanent deletion. +- Drop table & schema now move the table & schema or table to the trash folder under the Database folder for later permanent deletion. -* Bulk inserts no longer halt the entire operation if n records already exist, instead the return includes the hashes of records that have been skipped. +- Bulk inserts no longer halt the entire operation if n records already exist, instead the return includes the hashes of records that have been skipped. -* Added ability to accept EULA from command line +- Added ability to accept EULA from command line -* Corrected `search_by_value` not searching on the correct attribute +- Corrected `search_by_value` not searching on the correct attribute -* Added ability to increase the timeout of a request by adding `SERVER_TIMEOUT_MS` to config/settings.js +- Added ability to increase the timeout of a request by adding `SERVER_TIMEOUT_MS` to config/settings.js -* Add error handling resulting from SQL calculations. +- Add error handling resulting from SQL calculations. -* Standardized error responses as JSON. +- Standardized error responses as JSON. -* Corrected internal process generation to not allow more processes than machine has cores. \ No newline at end of file +- Corrected internal process generation to not allow more processes than machine has cores. diff --git a/docs/technical-details/release-notes/1.alby/1.2.0.md b/docs/technical-details/release-notes/1.alby/1.2.0.md index 5a75e558..259890cd 100644 --- a/docs/technical-details/release-notes/1.alby/1.2.0.md +++ b/docs/technical-details/release-notes/1.alby/1.2.0.md @@ -1,37 +1,37 @@ ### HarperDB 1.2.0, Alby Release + 7/10/2018 **Features** -* Time to Live: Conserve the resources of your edge device by setting data on devices to live for a specific period of time. -* Geo: HarperDB has implemented turf.js into its SQL parser to enable geo based analytics. -* Jobs: CSV Data loads, Exports & Time to Live now all run as back ground jobs. -* Exports: Perform queries that export into JSON or CSV and save to disk or S3. - +- Time to Live: Conserve the resources of your edge device by setting data on devices to live for a specific period of time. +- Geo: HarperDB has implemented turf.js into its SQL parser to enable geo based analytics. +- Jobs: CSV Data loads, Exports & Time to Live now all run as back ground jobs. +- Exports: Perform queries that export into JSON or CSV and save to disk or S3. **Fixes** -* Fixed issue where CSV data loads incorrectly report number of records loaded. -* Added validation to stop `BETWEEN` operations in SQL. -* Updated logging to not include internal variables in the logs. -* Cleaned up `add_role` response to not include internal variables. -* Removed old and unused dependencies. -* Build out further unit tests and integration tests. -* Fixed https to handle certificates properly. -* Improved stability of clustering & replication. -* Corrected issue where Objects and Arrays were not casting properly in `SQL SELECT` response. -* Fixed issue where Blob text was not being returned from `SQL SELECT`s. -* Fixed error being returned when querying on table with no data, now correctly returns empty array. -* Improved performance in SQL when searching on exact values. -* Fixed error when ./harperdb stop is called. -* Fixed logging issue causing instability in installer. -* Fixed `read_log` operation to accept date time. -* Added permissions checking to `export_to_s3`. -* Added ability to run SQL on `SELECT` without a `FROM`. -* Fixed issue where updating a user’s password was not encrypting properly. -* Fixed `user_guide.html` to point to readme on git repo. -* Created option to have HarperDB run as a foreground process. -* Updated `user_info` to return the correct role for a user. -* Fixed issue where HarperDB would not stop if the database root was deleted. -* Corrected error message on insert if an invalid schema is provided. -* Added permissions checks for user & role operations. \ No newline at end of file +- Fixed issue where CSV data loads incorrectly report number of records loaded. +- Added validation to stop `BETWEEN` operations in SQL. +- Updated logging to not include internal variables in the logs. +- Cleaned up `add_role` response to not include internal variables. +- Removed old and unused dependencies. +- Build out further unit tests and integration tests. +- Fixed https to handle certificates properly. +- Improved stability of clustering & replication. +- Corrected issue where Objects and Arrays were not casting properly in `SQL SELECT` response. +- Fixed issue where Blob text was not being returned from `SQL SELECT`s. +- Fixed error being returned when querying on table with no data, now correctly returns empty array. +- Improved performance in SQL when searching on exact values. +- Fixed error when ./harperdb stop is called. +- Fixed logging issue causing instability in installer. +- Fixed `read_log` operation to accept date time. +- Added permissions checking to `export_to_s3`. +- Added ability to run SQL on `SELECT` without a `FROM`. +- Fixed issue where updating a user’s password was not encrypting properly. +- Fixed `user_guide.html` to point to readme on git repo. +- Created option to have HarperDB run as a foreground process. +- Updated `user_info` to return the correct role for a user. +- Fixed issue where HarperDB would not stop if the database root was deleted. +- Corrected error message on insert if an invalid schema is provided. +- Added permissions checks for user & role operations. diff --git a/docs/technical-details/release-notes/1.alby/1.3.0.md b/docs/technical-details/release-notes/1.alby/1.3.0.md index 3b4e96d6..30043b32 100644 --- a/docs/technical-details/release-notes/1.alby/1.3.0.md +++ b/docs/technical-details/release-notes/1.alby/1.3.0.md @@ -1,22 +1,22 @@ ### HarperDB 1.3.0, Alby Release + 11/2/2018 **Features** -* Upgrade: Upgrade to newest version via command line. -* SQL Support: Added `IS NULL` for SQL parser. -* Added attribute validation to search operations. - +- Upgrade: Upgrade to newest version via command line. +- SQL Support: Added `IS NULL` for SQL parser. +- Added attribute validation to search operations. **Fixes** -* Fixed `SELECT` calculations, i.e. `SELECT` 2+2. -* Fixed select OR not returning expected results. -* No longer allowing reserved words for schema and table names. -* Corrected process interruptions from improper SQL statements. -* Improved message handling between spawned processes that replace killed processes. -* Enhanced error handling for updates to tables that do not exist. -* Fixed error handling for NoSQL responses when `get_attributes` is provided with invalid attributes. -* Fixed issue with new columns not being updated properly in update statements. -* Now validating roles, tables and attributes when creating or updating roles. -* Fixed an issue where in some cases `undefined` was being returned after dropping a role +- Fixed `SELECT` calculations, i.e. `SELECT` 2+2. +- Fixed select OR not returning expected results. +- No longer allowing reserved words for schema and table names. +- Corrected process interruptions from improper SQL statements. +- Improved message handling between spawned processes that replace killed processes. +- Enhanced error handling for updates to tables that do not exist. +- Fixed error handling for NoSQL responses when `get_attributes` is provided with invalid attributes. +- Fixed issue with new columns not being updated properly in update statements. +- Now validating roles, tables and attributes when creating or updating roles. +- Fixed an issue where in some cases `undefined` was being returned after dropping a role diff --git a/docs/technical-details/release-notes/1.alby/1.3.1.md b/docs/technical-details/release-notes/1.alby/1.3.1.md index 0658048b..c0190f86 100644 --- a/docs/technical-details/release-notes/1.alby/1.3.1.md +++ b/docs/technical-details/release-notes/1.alby/1.3.1.md @@ -1,24 +1,24 @@ ### HarperDB 1.3.1, Alby Release + 2/26/2019 **Features** -* Clustering connection direction appointment -* Foundations for threading/multi processing -* UUID autogen for hash attributes that were not provided -* Added cluster status operation - +- Clustering connection direction appointment +- Foundations for threading/multi processing +- UUID autogen for hash attributes that were not provided +- Added cluster status operation **Bug Fixes and Enhancements** -* More logging -* Clustering communication enhancements -* Clustering queue ordering by timestamps -* Cluster re connection enhancements -* Number of system core(s) detection -* Node LTS (10.15) compatibility -* Update/Alter users enhancements -* General performance enhancements -* Warning is logged if different versions of harperdb are connected via clustering -* Fixed need to restart after user creation/alteration -* Fixed SQL error that occurred on selecting from an empty table \ No newline at end of file +- More logging +- Clustering communication enhancements +- Clustering queue ordering by timestamps +- Cluster re connection enhancements +- Number of system core(s) detection +- Node LTS (10.15) compatibility +- Update/Alter users enhancements +- General performance enhancements +- Warning is logged if different versions of harperdb are connected via clustering +- Fixed need to restart after user creation/alteration +- Fixed SQL error that occurred on selecting from an empty table diff --git a/docs/technical-details/release-notes/2.penny/2.1.1.md b/docs/technical-details/release-notes/2.penny/2.1.1.md index a3826ec6..eeaedf40 100644 --- a/docs/technical-details/release-notes/2.penny/2.1.1.md +++ b/docs/technical-details/release-notes/2.penny/2.1.1.md @@ -1,22 +1,23 @@ ### HarperDB 2.1.1, Penny Release + 05/22/2020 **Highlights** -* CORE-1007 Added the ability to perform `SQL INSERT` & `UPDATE` with function calls & expressions on values. -* CORE-1023 Fixed minor bug in final SQL step incorrectly trying to translate ordinals to alias in `ORDER BY` statement. -* CORE-1020 Fixed bug allowing 'null' and 'undefined' string values to be passed in as valid hash values. -* CORE-1006 Added SQL functionality that enables `JOIN` statements across different schemas. -* CORE-1005 Implemented JSONata library to handle our JSON document search functionality in SQL, creating the `SEARCH_JSON` function. -* CORE-1009 Updated schema validation to allow all printable ASCII characters to be used in schema/table/attribute names, except, forward slashes and backticks. Same rules apply now for hash attribute values. -* CORE-1003 Fixed handling of ORDER BY statements with function aliases. -* CORE-1004 Fixed bug related to `SELECT*` on `JOIN` queries with table columns with the same name. -* CORE-996 Fixed an issue where the `transact_to_cluster` flag is lost for CSV URL loads, fixed an issue where new attributes created in CSV bulk load do not sync to the cluster. -* CORE-994 Added new operation `system_information`. This operation returns info & metrics for the OS, time, memory, cpu, disk, network. -* CORE-993 Added new custom date functions for AlaSQL & UTC updates. -* CORE-991 Changed jobs to spawn a new process which will run the intended job without impacting a main HarperDB process. -* CORE-992 HTTPS enabled by default. -* CORE-990 Updated `describe_table` to add the record count for the table for LMDB data storage. -* CORE-989 Killed the socket cluster processes prior to HarperDB processes to eliminate a false uptime. -* CORE-975 Updated time values set by SQL Date Functions to be in epoch format. -* CORE-974 Added date functions to `SQL SELECT` column alias functionality. \ No newline at end of file +- CORE-1007 Added the ability to perform `SQL INSERT` & `UPDATE` with function calls & expressions on values. +- CORE-1023 Fixed minor bug in final SQL step incorrectly trying to translate ordinals to alias in `ORDER BY` statement. +- CORE-1020 Fixed bug allowing 'null' and 'undefined' string values to be passed in as valid hash values. +- CORE-1006 Added SQL functionality that enables `JOIN` statements across different schemas. +- CORE-1005 Implemented JSONata library to handle our JSON document search functionality in SQL, creating the `SEARCH_JSON` function. +- CORE-1009 Updated schema validation to allow all printable ASCII characters to be used in schema/table/attribute names, except, forward slashes and backticks. Same rules apply now for hash attribute values. +- CORE-1003 Fixed handling of ORDER BY statements with function aliases. +- CORE-1004 Fixed bug related to `SELECT*` on `JOIN` queries with table columns with the same name. +- CORE-996 Fixed an issue where the `transact_to_cluster` flag is lost for CSV URL loads, fixed an issue where new attributes created in CSV bulk load do not sync to the cluster. +- CORE-994 Added new operation `system_information`. This operation returns info & metrics for the OS, time, memory, cpu, disk, network. +- CORE-993 Added new custom date functions for AlaSQL & UTC updates. +- CORE-991 Changed jobs to spawn a new process which will run the intended job without impacting a main HarperDB process. +- CORE-992 HTTPS enabled by default. +- CORE-990 Updated `describe_table` to add the record count for the table for LMDB data storage. +- CORE-989 Killed the socket cluster processes prior to HarperDB processes to eliminate a false uptime. +- CORE-975 Updated time values set by SQL Date Functions to be in epoch format. +- CORE-974 Added date functions to `SQL SELECT` column alias functionality. diff --git a/docs/technical-details/release-notes/2.penny/2.2.0.md b/docs/technical-details/release-notes/2.penny/2.2.0.md index 038d5a6e..5888912e 100644 --- a/docs/technical-details/release-notes/2.penny/2.2.0.md +++ b/docs/technical-details/release-notes/2.penny/2.2.0.md @@ -1,38 +1,39 @@ ### HarperDB 2.2.0, Penny Release + 08/24/2020 **Features/Updates** -* CORE-997 Updated the data format for CSV data loads being sync'd across a cluster to take up less resources -* CORE-1018 Adds SQL functionality for `BETWEEN` statements -* CORE-1032 Updates permissions to allow regular users (i.e. non-super users) to call the `get_job` operation -* CORE-1036 On create/drop table we auto create/drop the related transactions environments for the schema.table -* CORE-1042 Built raw functions to write to a tables transaction log for insert/update/delete operations -* CORE-1057 Implemented write transaction into lmdb create/update/delete functions -* CORE-1048 Adds `SEARCH` wildcard handling for role permissions standards -* CORE-1059 Added config setting to disable transaction logging for an instance -* CORE-1076 Adds permissions filter to describe operations -* CORE-1043 Change clustering catchup to use the new transaction log -* CORE-1052 Removed word "master" from source -* CORE-1061 Added new operation called `delete_transactions_before` this will tail a transaction log for a specific schema / table -* CORE-1040 On HarperDB startup make sure all tables have a transaction environment -* CORE-1055 Added 2 new setting to change the server headersTimeout & keepAliveTimeout from the config file -* CORE-1044 Created new operation `read_transaction_log` which will allow a user to get transactions for a table by `timestamp`, `username`, or `hash_value` -* CORE-1043 Change clustering catchup to use the new transaction log -* CORE-1089 Added new attribute to `system_information` for table/transaction log data size in bytes & transaction log record count -* CORE-1101 Fix to store empty strings rather than considering them null & fix to be able to search on empty strings in SQL/NoSQL. -* CORE-1054 Updates permissions object to remove delete attribute permission and update table attribute permission key to `attribute_permissions` -* CORE-1092 Do not allow the `__createdtime__` to be updated -* CORE-1085 Updates create schema/table & drop schema/table/attribute operations permissions to require super user role and adds integration tests to validate -* CORE-1071 Updates response messages and status codes from `describe_schema` and `describe_table` operations to provide standard language/status code when a schema item is not found -* CORE-1049 Updates response message for SQL update op with no matching rows -* CORE-1096 Added tracking of the origin in the transaction log. This origin object stores the node name, timestamp of the transaction from the originating node & the user. +- CORE-997 Updated the data format for CSV data loads being sync'd across a cluster to take up less resources +- CORE-1018 Adds SQL functionality for `BETWEEN` statements +- CORE-1032 Updates permissions to allow regular users (i.e. non-super users) to call the `get_job` operation +- CORE-1036 On create/drop table we auto create/drop the related transactions environments for the schema.table +- CORE-1042 Built raw functions to write to a tables transaction log for insert/update/delete operations +- CORE-1057 Implemented write transaction into lmdb create/update/delete functions +- CORE-1048 Adds `SEARCH` wildcard handling for role permissions standards +- CORE-1059 Added config setting to disable transaction logging for an instance +- CORE-1076 Adds permissions filter to describe operations +- CORE-1043 Change clustering catchup to use the new transaction log +- CORE-1052 Removed word "master" from source +- CORE-1061 Added new operation called `delete_transactions_before` this will tail a transaction log for a specific schema / table +- CORE-1040 On HarperDB startup make sure all tables have a transaction environment +- CORE-1055 Added 2 new setting to change the server headersTimeout & keepAliveTimeout from the config file +- CORE-1044 Created new operation `read_transaction_log` which will allow a user to get transactions for a table by `timestamp`, `username`, or `hash_value` +- CORE-1043 Change clustering catchup to use the new transaction log +- CORE-1089 Added new attribute to `system_information` for table/transaction log data size in bytes & transaction log record count +- CORE-1101 Fix to store empty strings rather than considering them null & fix to be able to search on empty strings in SQL/NoSQL. +- CORE-1054 Updates permissions object to remove delete attribute permission and update table attribute permission key to `attribute_permissions` +- CORE-1092 Do not allow the `__createdtime__` to be updated +- CORE-1085 Updates create schema/table & drop schema/table/attribute operations permissions to require super user role and adds integration tests to validate +- CORE-1071 Updates response messages and status codes from `describe_schema` and `describe_table` operations to provide standard language/status code when a schema item is not found +- CORE-1049 Updates response message for SQL update op with no matching rows +- CORE-1096 Added tracking of the origin in the transaction log. This origin object stores the node name, timestamp of the transaction from the originating node & the user. **Bug Fixes** -* CORE-1028 Fixes bug for simple `SQL SELECT` queries not returning aliases and incorrectly returning hash values when not requested in query -* CORE-1037 Fixed an issue where numbers with leading zero i.e. 00123 are converted to numbers rather than being honored as strings. -* CORE-1063 Updates permission error response shape to consolidate issues into individual objects per schema/table combo -* CORE-1098 Fixed an issue where transaction environments were remaining in the global cache after being dropped. -* CORE-1086 Fixed issue where responses from insert/update were incorrect with skipped records. -* CORE-1079 Fixes SQL bugs around invalid schema/table and special characters in `WHERE` clause \ No newline at end of file +- CORE-1028 Fixes bug for simple `SQL SELECT` queries not returning aliases and incorrectly returning hash values when not requested in query +- CORE-1037 Fixed an issue where numbers with leading zero i.e. 00123 are converted to numbers rather than being honored as strings. +- CORE-1063 Updates permission error response shape to consolidate issues into individual objects per schema/table combo +- CORE-1098 Fixed an issue where transaction environments were remaining in the global cache after being dropped. +- CORE-1086 Fixed issue where responses from insert/update were incorrect with skipped records. +- CORE-1079 Fixes SQL bugs around invalid schema/table and special characters in `WHERE` clause diff --git a/docs/technical-details/release-notes/2.penny/2.2.2.md b/docs/technical-details/release-notes/2.penny/2.2.2.md index cb64b1e8..46e822b0 100644 --- a/docs/technical-details/release-notes/2.penny/2.2.2.md +++ b/docs/technical-details/release-notes/2.penny/2.2.2.md @@ -1,11 +1,12 @@ ### HarperDB 2.2.2, Penny Release + 10/27/2020 -* CORE-1154 Allowed transaction logging to be disabled even if clustering is enabled. -* CORE-1153 Fixed issue where `delete_files_before` was writing to transaction log. -* CORE-1152 Fixed issue where no more than 4 HarperDB forks would be created. -* CORE-1112 Adds handling for system timestamp attributes in permissions. -* CORE-1131 Adds better handling for checking perms on operations with action value in JSON. -* CORE-1113 Fixes validation bug checking for super user/cluster user permissions and other permissions. -* CORE-1135 Adds validation for valid keys in role API operations. -* CORE-1073 Adds new `import_from_s3` operation to API. +- CORE-1154 Allowed transaction logging to be disabled even if clustering is enabled. +- CORE-1153 Fixed issue where `delete_files_before` was writing to transaction log. +- CORE-1152 Fixed issue where no more than 4 HarperDB forks would be created. +- CORE-1112 Adds handling for system timestamp attributes in permissions. +- CORE-1131 Adds better handling for checking perms on operations with action value in JSON. +- CORE-1113 Fixes validation bug checking for super user/cluster user permissions and other permissions. +- CORE-1135 Adds validation for valid keys in role API operations. +- CORE-1073 Adds new `import_from_s3` operation to API. diff --git a/docs/technical-details/release-notes/2.penny/2.2.3.md b/docs/technical-details/release-notes/2.penny/2.2.3.md index b627bb7e..aadf55d9 100644 --- a/docs/technical-details/release-notes/2.penny/2.2.3.md +++ b/docs/technical-details/release-notes/2.penny/2.2.3.md @@ -1,4 +1,5 @@ ### HarperDB 2.2.3, Penny Release + 11/16/2020 -* CORE-1158 Performance improvements to core delete function and configuration of `delete_files_before` to run in batches with a pause into between. \ No newline at end of file +- CORE-1158 Performance improvements to core delete function and configuration of `delete_files_before` to run in batches with a pause into between. diff --git a/docs/technical-details/release-notes/2.penny/2.3.0.md b/docs/technical-details/release-notes/2.penny/2.3.0.md index 9b2d3f76..e3c7a724 100644 --- a/docs/technical-details/release-notes/2.penny/2.3.0.md +++ b/docs/technical-details/release-notes/2.penny/2.3.0.md @@ -1,17 +1,18 @@ ### HarperDB 2.3.0, Penny Release + 12/03/2020 **Features/Updates** -* CORE-1191, CORE-1190, CORE-1125, CORE-1157, CORE-1126, CORE-1140, CORE-1134, CORE-1123, CORE-1124, CORE-1122 Added JWT Authentication option (See documentation for more information) -* CORE-1128, CORE-1143, CORE-1140, CORE-1129 Added `upsert` operation -* CORE-1187 Added `get_configuration` operation which allows admins to view their configuration settings. -* CORE-1175 Added new internal LMDB function to copy an environment for use in future features. -* CORE-1166 Updated packages to address security vulnerabilities. +- CORE-1191, CORE-1190, CORE-1125, CORE-1157, CORE-1126, CORE-1140, CORE-1134, CORE-1123, CORE-1124, CORE-1122 Added JWT Authentication option (See documentation for more information) +- CORE-1128, CORE-1143, CORE-1140, CORE-1129 Added `upsert` operation +- CORE-1187 Added `get_configuration` operation which allows admins to view their configuration settings. +- CORE-1175 Added new internal LMDB function to copy an environment for use in future features. +- CORE-1166 Updated packages to address security vulnerabilities. **Bug Fixes** -* CORE-1195 Modified `drop_attribute` to drop after data cleanse completes. -* CORE-1149 Fix SQL bug regarding self joins and updates alasql to 0.6.5 release. -* CORE-1168 Fix inconsistent invalid schema/table errors. -* CORE-1162 Fix bug which caused `delete_files_before` to cause tables to grow in size due to an open cursor issue. \ No newline at end of file +- CORE-1195 Modified `drop_attribute` to drop after data cleanse completes. +- CORE-1149 Fix SQL bug regarding self joins and updates alasql to 0.6.5 release. +- CORE-1168 Fix inconsistent invalid schema/table errors. +- CORE-1162 Fix bug which caused `delete_files_before` to cause tables to grow in size due to an open cursor issue. diff --git a/docs/technical-details/release-notes/2.penny/2.3.1.md b/docs/technical-details/release-notes/2.penny/2.3.1.md index d3af8dd6..5877b3ba 100644 --- a/docs/technical-details/release-notes/2.penny/2.3.1.md +++ b/docs/technical-details/release-notes/2.penny/2.3.1.md @@ -1,7 +1,8 @@ ### HarperDB 2.3.1, Penny Release + 1/29/2021 **Bug Fixes** -* CORE-1218 A bug in HarperDB 2.3.0 was identified related to manually calling the `create_attribute` operation. This bug caused secondary indexes to be overwritten by the most recently inserted or updated value for the index, thereby causing a search operation filtered with that index to only return the most recently inserted/updated row. Note, this issue does not affect attributes that are reflexively/automatically created. It only affects attributes created using `create_attribute`. To resolve this issue in 2.3.0 or earlier, drop and recreate your table using reflexive attribute creation. In 2.3.1, drop and recreate your table and use either reflexive attribute creation or `create_attribute`. -* CORE-1219 Increased maximum table attributes from 1000 to 10000 \ No newline at end of file +- CORE-1218 A bug in HarperDB 2.3.0 was identified related to manually calling the `create_attribute` operation. This bug caused secondary indexes to be overwritten by the most recently inserted or updated value for the index, thereby causing a search operation filtered with that index to only return the most recently inserted/updated row. Note, this issue does not affect attributes that are reflexively/automatically created. It only affects attributes created using `create_attribute`. To resolve this issue in 2.3.0 or earlier, drop and recreate your table using reflexive attribute creation. In 2.3.1, drop and recreate your table and use either reflexive attribute creation or `create_attribute`. +- CORE-1219 Increased maximum table attributes from 1000 to 10000 diff --git a/docs/technical-details/release-notes/3.monkey/3.0.0.md b/docs/technical-details/release-notes/3.monkey/3.0.0.md index 3102039f..43376fc8 100644 --- a/docs/technical-details/release-notes/3.monkey/3.0.0.md +++ b/docs/technical-details/release-notes/3.monkey/3.0.0.md @@ -1,26 +1,27 @@ ### HarperDB 3.0, Monkey Release + 5/18/2021 **Features/Updates** -* CORE-1217, CORE-1226, CORE-1232 Create new `search_by_conditions` operation. -* CORE-1304 Upgrade to Node 12.22.1. -* CORE-1235 Adds new upgrade/install functionality. -* CORE-1206, CORE-1248, CORE-1252 Implement `lmdb-store` library for optimized performance. -* CORE-1062 Added alias operation for `delete_files_before`, named `delete_records_before`. -* CORE-1243 Change `HTTPS_ON` settings value to false by default. -* CORE-1189 Implement fastify web server, resulting in improved performance. -* CORE-1221 Update user API to use role name instead of role id. -* CORE-1225 Updated dependencies to eliminate npm security warnings. -* CORE-1241 Adds 3.0 update directive and refactors/fixes update functionality. +- CORE-1217, CORE-1226, CORE-1232 Create new `search_by_conditions` operation. +- CORE-1304 Upgrade to Node 12.22.1. +- CORE-1235 Adds new upgrade/install functionality. +- CORE-1206, CORE-1248, CORE-1252 Implement `lmdb-store` library for optimized performance. +- CORE-1062 Added alias operation for `delete_files_before`, named `delete_records_before`. +- CORE-1243 Change `HTTPS_ON` settings value to false by default. +- CORE-1189 Implement fastify web server, resulting in improved performance. +- CORE-1221 Update user API to use role name instead of role id. +- CORE-1225 Updated dependencies to eliminate npm security warnings. +- CORE-1241 Adds 3.0 update directive and refactors/fixes update functionality. **Bug Fixes** -* CORE-1299 Remove all references to the `PROJECT_DIR` setting. This setting is problematic when using node version managers and upgrading the version of node and then installing a new instance of HarperDB. -* CORE-1288 Fix bug with drop table/schema that was causing 'env required' error log. -* CORE-1285 Update warning log when trying to create an attribute that already exists. -* CORE-1254 Added logic to manage data collisions in clustering. -* CORE-1212 Add pre-check to `drop_user` that returns error if user doesn't exist. -* CORE-1114 Update response code and message from `add_user` when user already exists. -* CORE-1111 Update response from `create_attribute` to match the create schema/table response. -* CORE-1205 Fixed bug that prevented schema/table from being dropped if name was a number or had a wildcard value in it. Updated validation for insert, upsert and update. \ No newline at end of file +- CORE-1299 Remove all references to the `PROJECT_DIR` setting. This setting is problematic when using node version managers and upgrading the version of node and then installing a new instance of HarperDB. +- CORE-1288 Fix bug with drop table/schema that was causing 'env required' error log. +- CORE-1285 Update warning log when trying to create an attribute that already exists. +- CORE-1254 Added logic to manage data collisions in clustering. +- CORE-1212 Add pre-check to `drop_user` that returns error if user doesn't exist. +- CORE-1114 Update response code and message from `add_user` when user already exists. +- CORE-1111 Update response from `create_attribute` to match the create schema/table response. +- CORE-1205 Fixed bug that prevented schema/table from being dropped if name was a number or had a wildcard value in it. Updated validation for insert, upsert and update. diff --git a/docs/technical-details/release-notes/3.monkey/3.1.0.md b/docs/technical-details/release-notes/3.monkey/3.1.0.md index b329db7b..4a41daf4 100644 --- a/docs/technical-details/release-notes/3.monkey/3.1.0.md +++ b/docs/technical-details/release-notes/3.monkey/3.1.0.md @@ -1,18 +1,19 @@ ### HarperDB 3.1.0, Monkey Release + 8/24/2021 **Features/Updates** -* CORE-1320, CORE-1321, CORE-1323, CORE-1324 Version 1.0 of HarperDB Custom Functions -* CORE-1275, CORE-1276, CORE-1278, CORE-1279, CORE-1280, CORE-1282, CORE-1283, CORE-1305, CORE-1314 IPC server for communication between HarperDB processes, including HarperDB, HarperDB Clustering, and HarperDB Functions -* CORE-1352, CORE-1355, CORE-1356, CORE-1358 Implement pm2 for HarperDB process management -* CORE-1292, CORE-1308, CORE-1312, CORE-1334, CORE-1338 Updated installation process to start HarperDB immediately on install and to accept all config settings via environment variable or command line arguments -* CORE-1310 Updated licensing functionality -* CORE-1301 Updated validation for performance improvement -* CORE-1359 Add `hdb-response-time` header which returns the HarperDB response time in milliseconds -* CORE-1330, CORE-1309 New config settings: `LOG_TO_FILE`, `LOG_TO_STDSTREAMS`, `IPC_SERVER_PORT`, `RUN_IN_FOREGROUND`, `CUSTOM_FUNCTIONS`, `CUSTOM_FUNCTIONS_PORT`, `CUSTOM_FUNCTIONS_DIRECTORY`, `MAX_CUSTOM_FUNCTION_PROCESSES` +- CORE-1320, CORE-1321, CORE-1323, CORE-1324 Version 1.0 of HarperDB Custom Functions +- CORE-1275, CORE-1276, CORE-1278, CORE-1279, CORE-1280, CORE-1282, CORE-1283, CORE-1305, CORE-1314 IPC server for communication between HarperDB processes, including HarperDB, HarperDB Clustering, and HarperDB Functions +- CORE-1352, CORE-1355, CORE-1356, CORE-1358 Implement pm2 for HarperDB process management +- CORE-1292, CORE-1308, CORE-1312, CORE-1334, CORE-1338 Updated installation process to start HarperDB immediately on install and to accept all config settings via environment variable or command line arguments +- CORE-1310 Updated licensing functionality +- CORE-1301 Updated validation for performance improvement +- CORE-1359 Add `hdb-response-time` header which returns the HarperDB response time in milliseconds +- CORE-1330, CORE-1309 New config settings: `LOG_TO_FILE`, `LOG_TO_STDSTREAMS`, `IPC_SERVER_PORT`, `RUN_IN_FOREGROUND`, `CUSTOM_FUNCTIONS`, `CUSTOM_FUNCTIONS_PORT`, `CUSTOM_FUNCTIONS_DIRECTORY`, `MAX_CUSTOM_FUNCTION_PROCESSES` **Bug Fixes** -* CORE-1315 Corrected issue in HarperDB restart scenario -* CORE-1370 Update some of the validation error handlers so that they don't log full stack \ No newline at end of file +- CORE-1315 Corrected issue in HarperDB restart scenario +- CORE-1370 Update some of the validation error handlers so that they don't log full stack diff --git a/docs/technical-details/release-notes/3.monkey/3.1.1.md b/docs/technical-details/release-notes/3.monkey/3.1.1.md index 27b39b5f..4733f103 100644 --- a/docs/technical-details/release-notes/3.monkey/3.1.1.md +++ b/docs/technical-details/release-notes/3.monkey/3.1.1.md @@ -1,13 +1,14 @@ ### HarperDB 3.1.1, Monkey Release + 9/23/2021 **Features/Updates** -* CORE-1393 Added utility function to add settings from env/cmd vars to the settings file on every run/restart -* CORE-1395 Create a setting which will allow to enable the local Studio to be served from an instance of HarperDB -* CORE-1397 Update the stock 404 response to not return the request URL -* General updates to optimize Docker container +- CORE-1393 Added utility function to add settings from env/cmd vars to the settings file on every run/restart +- CORE-1395 Create a setting which will allow to enable the local Studio to be served from an instance of HarperDB +- CORE-1397 Update the stock 404 response to not return the request URL +- General updates to optimize Docker container **Bug Fixes** -* CORE-1399 Added fixes for complex SQL alias issues \ No newline at end of file +- CORE-1399 Added fixes for complex SQL alias issues diff --git a/docs/technical-details/release-notes/3.monkey/3.1.2.md b/docs/technical-details/release-notes/3.monkey/3.1.2.md index a48d8ce8..d07d9993 100644 --- a/docs/technical-details/release-notes/3.monkey/3.1.2.md +++ b/docs/technical-details/release-notes/3.monkey/3.1.2.md @@ -1,10 +1,11 @@ ### HarperDB 3.1.2, Monkey Release + 10/21/2021 **Features/Updates** -* Updated the installation ASCII art to reflect the new HarperDB logo +- Updated the installation ASCII art to reflect the new HarperDB logo **Bug Fixes** -* CORE-1408 Corrects issue where `drop_attribute` was not properly setting the LMDB version number causing tables to behave unexpectedly \ No newline at end of file +- CORE-1408 Corrects issue where `drop_attribute` was not properly setting the LMDB version number causing tables to behave unexpectedly diff --git a/docs/technical-details/release-notes/3.monkey/3.1.3.md b/docs/technical-details/release-notes/3.monkey/3.1.3.md index b7ca8284..72c1ba84 100644 --- a/docs/technical-details/release-notes/3.monkey/3.1.3.md +++ b/docs/technical-details/release-notes/3.monkey/3.1.3.md @@ -1,6 +1,7 @@ ### HarperDB 3.1.3, Monkey Release + 1/14/2022 **Bug Fixes** -* CORE-1446 Fix for scans on indexes larger than 1 million entries causing queries to never return \ No newline at end of file +- CORE-1446 Fix for scans on indexes larger than 1 million entries causing queries to never return diff --git a/docs/technical-details/release-notes/3.monkey/3.1.4.md b/docs/technical-details/release-notes/3.monkey/3.1.4.md index 74989896..f4f94715 100644 --- a/docs/technical-details/release-notes/3.monkey/3.1.4.md +++ b/docs/technical-details/release-notes/3.monkey/3.1.4.md @@ -1,6 +1,7 @@ ### HarperDB 3.1.4, Monkey Release + 2/24/2022 **Features/Updates** -* CORE-1460 Added new setting `STORAGE_WRITE_ASYNC`. If this setting is true, LMDB will have faster write performance at the expense of not being crash safe. The default for this setting is false, which results in HarperDB being crash safe. \ No newline at end of file +- CORE-1460 Added new setting `STORAGE_WRITE_ASYNC`. If this setting is true, LMDB will have faster write performance at the expense of not being crash safe. The default for this setting is false, which results in HarperDB being crash safe. diff --git a/docs/technical-details/release-notes/3.monkey/3.1.5.md b/docs/technical-details/release-notes/3.monkey/3.1.5.md index 5ae595fa..9873984a 100644 --- a/docs/technical-details/release-notes/3.monkey/3.1.5.md +++ b/docs/technical-details/release-notes/3.monkey/3.1.5.md @@ -1,6 +1,7 @@ ### HarperDB 3.1.5, Monkey Release + 3/4/2022 **Features/Updates** -* CORE-1498 Fixed incorrect autocasting of string that start with "0." that tries to convert to number but instead returns NaN. \ No newline at end of file +- CORE-1498 Fixed incorrect autocasting of string that start with "0." that tries to convert to number but instead returns NaN. diff --git a/docs/technical-details/release-notes/3.monkey/3.2.0.md b/docs/technical-details/release-notes/3.monkey/3.2.0.md index d69e3856..a76c0ec2 100644 --- a/docs/technical-details/release-notes/3.monkey/3.2.0.md +++ b/docs/technical-details/release-notes/3.monkey/3.2.0.md @@ -1,8 +1,9 @@ ### HarperDB 3.2.0, Monkey Release + 3/25/2022 **Features/Updates** -* CORE-1391 Bug fix related to orphaned HarperDB background processes. -* CORE-1509 Updated node version check, updated Node.js version, updated project dependencies. -* CORE-1518 Remove final call from logger. \ No newline at end of file +- CORE-1391 Bug fix related to orphaned HarperDB background processes. +- CORE-1509 Updated node version check, updated Node.js version, updated project dependencies. +- CORE-1518 Remove final call from logger. diff --git a/docs/technical-details/release-notes/3.monkey/3.2.1.md b/docs/technical-details/release-notes/3.monkey/3.2.1.md index 4fbdeab1..b2518e56 100644 --- a/docs/technical-details/release-notes/3.monkey/3.2.1.md +++ b/docs/technical-details/release-notes/3.monkey/3.2.1.md @@ -1,6 +1,7 @@ ### HarperDB 3.2.1, Monkey Release + 6/1/2022 **Features/Updates** -* CORE-1573 Added logic to track the pid of the foreground process if running in foreground. Then on stop, use that pid to kill the process. Logic was also added to kill the pm2 daemon when stop is called. \ No newline at end of file +- CORE-1573 Added logic to track the pid of the foreground process if running in foreground. Then on stop, use that pid to kill the process. Logic was also added to kill the pm2 daemon when stop is called. diff --git a/docs/technical-details/release-notes/3.monkey/3.3.0.md b/docs/technical-details/release-notes/3.monkey/3.3.0.md index 2eb7e4a9..b3769195 100644 --- a/docs/technical-details/release-notes/3.monkey/3.3.0.md +++ b/docs/technical-details/release-notes/3.monkey/3.3.0.md @@ -1,7 +1,7 @@ ### HarperDB 3.3.0 - Monkey -* CORE-1595 Added new role type `structure_user`, this enables non-superusers to be able to create/drop schema/table/attribute. -* CORE-1501 Improved performance for drop_table. -* CORE-1599 Added two new operations for custom functions `install_node_modules` & `audit_node_modules`. -* CORE-1598 Added `skip_node_modules` flag to `package_custom_function_project` operation. This flag allows for not bundling project dependencies and deploying a smaller project to other nodes. Use this flag in tandem with `install_node_modules`. -* CORE-1707 Binaries are now included for Linux on AMD64, Linux on ARM64, and macOS. GCC, Make, Python are no longer required when installing on these platforms. +- CORE-1595 Added new role type `structure_user`, this enables non-superusers to be able to create/drop schema/table/attribute. +- CORE-1501 Improved performance for drop_table. +- CORE-1599 Added two new operations for custom functions `install_node_modules` & `audit_node_modules`. +- CORE-1598 Added `skip_node_modules` flag to `package_custom_function_project` operation. This flag allows for not bundling project dependencies and deploying a smaller project to other nodes. Use this flag in tandem with `install_node_modules`. +- CORE-1707 Binaries are now included for Linux on AMD64, Linux on ARM64, and macOS. GCC, Make, Python are no longer required when installing on these platforms. diff --git a/docs/technical-details/release-notes/4.tucker/4.0.0.md b/docs/technical-details/release-notes/4.tucker/4.0.0.md index cfe07285..6c8f5f9c 100644 --- a/docs/technical-details/release-notes/4.tucker/4.0.0.md +++ b/docs/technical-details/release-notes/4.tucker/4.0.0.md @@ -1,119 +1,126 @@ ### HarperDB 4.0.0, Tucker Release + 11/2/2022 **Networking & Data Replication (Clustering)** -The HarperDB clustering internals have been rewritten and the underlying technology for Clustering has been completely replaced with [NATS](https://nats.io/), an enterprise grade connective technology responsible for addressing, discovery and exchanging of messages that drive the common patterns in distributed systems. -* CORE-1464, CORE-1470, : Remove SocketCluster dependencies and all code related to them. -* CORE-1465, CORE-1485, CORE-1537, CORE-1538, CORE-1558, CORE-1583, CORE_1665, CORE-1710, CORE-1801, CORE-1865 :Add nats-`server` code as dependency, on install of HarperDB download nats-`server` is possible else fallback to building from source code. -* CORE-1593, CORE-1761: Add `nats.js` as project dependency. -* CORE-1466: Build NATS configs on `harperdb run` based on HarperDB YAML configuration. -* CORE-1467, CORE-1508: Launch and manage NATS servers with PM2. -* CORE-1468, CORE-1507: Create a process which reads the work queue stream and processes transactions. -* CORE-1481, CORE-1529, CORE-1698, CORE-1502, CORE-1696: On upgrade to 4.0, update pre-existing clustering configurations, create table transaction streams, create work queue stream, update `hdb_nodes` table, create clustering folder structure, and rebuild self-signed certs. -* CORE-1494, CORE-1521, CORE-1755: Build out internals to interface with NATS. -* CORE-1504: Update existing hooks to save transactions to work with NATS. -* CORE-1514, CORE-1515, CORE-1516, CORE-1527, CORE-1532: Update `add_node`, `update_node`, and `remove_node` operations to no longer need host and port in payload. These operations now manage dynamically sourcing of table level transaction streams between nodes and work queues. -* CORE-1522: Create `NATSReplyService` process which handles the receiving NATS based requests from remote instances and sending back appropriate responses. -* CORE-1471, CORE-1568, CORE-1563, CORE-1534, CORE-1569: Update `cluster_status` operation. -* CORE-1611: Update pre-existing transaction log operations to be audit log operations. -* CORE-1541, CORE-1612, CORE-1613: Create translation log operations which interface with streams. -* CORE-1668: Update NATS serialization / deserialization to use MessagePack. -* CORE-1673: Add `system_info` param to `hdb_nodes` table and update on `add_node` and `cluster_status`. -* CORE-1477, CORE-1493, CORE-1557, CORE-1596, CORE-1577: Both a full HarperDB restart & just clustering restart call the NATS server with a reload directive to maintain full uptime while servers refresh. -* CORE-1474:HarperDB install adds clustering folder structure. -* CORE-1530: Post `drop_table` HarperDB purges the related transaction stream. -* CORE-1567: Set NATS config to always use TLS. -* CORE-1543: Removed the `transact_to_cluster` attribute from the bulk load operations. Now bulk loads always replicate. -* CORE-1533, CORE-1556, CORE-1561, CORE-1562, CORE-1564: New operation `configure_cluster`, this operation enables bulk publishing and subscription of multiple tables to multiple instances of HarperDB. -* CORE-1535: Create work queue stream on install of HarperDB. This stream receives transactions from remote instances of HarperDB which are then ingested in order. -* CORE-1551: Create transaction streams on the remote node if they do not exist when performing `add_node` or `update_node`. -* CORE-1594, CORE-1605, CORE-1749, CORE-1767, CORE-1770: Optimize the work queue stream and its consumer to be more performant and validate exact once delivery. -* CORE-1621, CORE-1692, CORE-1570, CORE-1693: NATS stream names are MD5 hashed to avoid characters that HarperDB allows, but NATS may not. -* CORE-1762: Add a new optional attribute to `add_node` and `update_node` named `opt_start_time`. This attribute sets a starting time to start synchronizing transactions. -* CORE-1785: Optimizations and bug fixes in regards to sourcing data from remote instances on HarperDB. -* CORE-1588: Created new operation `set_cluster_routes` to enable setting routes for instances of HarperDB to mesh together. -* CORE-1589: Created new operation `get_cluster_routes` to allow for retrieval of routes used to connect the instance of HarperDB to the mesh. -* CORE-1590: Created new operation `delete_cluster_routes` to allow for removal of routes used to connect the instance of HarperDB to the mesh. -* CORE-1667: Fix old environment variable `CLUSTERING_PORT` not mapping to new hub server port. -* CORE-1609: Allow `remove_node` to be called when the other node cannot be reached. -* CORE-1815: Add transaction lock to `add_node` and `update_node` to avoid concurrent nats source update bug. -* CORE-1848: Update stream configs if the node name has been changed in the YAML configuration. -* CORE-1873: Update `add_node` and `update_node` so that it auto-creates schema/table on both local and remote node respectively +The HarperDB clustering internals have been rewritten and the underlying technology for Clustering has been completely replaced with [NATS](https://nats.io/), an enterprise grade connective technology responsible for addressing, discovery and exchanging of messages that drive the common patterns in distributed systems. +- CORE-1464, CORE-1470, : Remove SocketCluster dependencies and all code related to them. +- CORE-1465, CORE-1485, CORE-1537, CORE-1538, CORE-1558, CORE-1583, CORE_1665, CORE-1710, CORE-1801, CORE-1865 :Add nats-`server` code as dependency, on install of HarperDB download nats-`server` is possible else fallback to building from source code. +- CORE-1593, CORE-1761: Add `nats.js` as project dependency. +- CORE-1466: Build NATS configs on `harperdb run` based on HarperDB YAML configuration. +- CORE-1467, CORE-1508: Launch and manage NATS servers with PM2. +- CORE-1468, CORE-1507: Create a process which reads the work queue stream and processes transactions. +- CORE-1481, CORE-1529, CORE-1698, CORE-1502, CORE-1696: On upgrade to 4.0, update pre-existing clustering configurations, create table transaction streams, create work queue stream, update `hdb_nodes` table, create clustering folder structure, and rebuild self-signed certs. +- CORE-1494, CORE-1521, CORE-1755: Build out internals to interface with NATS. +- CORE-1504: Update existing hooks to save transactions to work with NATS. +- CORE-1514, CORE-1515, CORE-1516, CORE-1527, CORE-1532: Update `add_node`, `update_node`, and `remove_node` operations to no longer need host and port in payload. These operations now manage dynamically sourcing of table level transaction streams between nodes and work queues. +- CORE-1522: Create `NATSReplyService` process which handles the receiving NATS based requests from remote instances and sending back appropriate responses. +- CORE-1471, CORE-1568, CORE-1563, CORE-1534, CORE-1569: Update `cluster_status` operation. +- CORE-1611: Update pre-existing transaction log operations to be audit log operations. +- CORE-1541, CORE-1612, CORE-1613: Create translation log operations which interface with streams. +- CORE-1668: Update NATS serialization / deserialization to use MessagePack. +- CORE-1673: Add `system_info` param to `hdb_nodes` table and update on `add_node` and `cluster_status`. +- CORE-1477, CORE-1493, CORE-1557, CORE-1596, CORE-1577: Both a full HarperDB restart & just clustering restart call the NATS server with a reload directive to maintain full uptime while servers refresh. +- CORE-1474:HarperDB install adds clustering folder structure. +- CORE-1530: Post `drop_table` HarperDB purges the related transaction stream. +- CORE-1567: Set NATS config to always use TLS. +- CORE-1543: Removed the `transact_to_cluster` attribute from the bulk load operations. Now bulk loads always replicate. +- CORE-1533, CORE-1556, CORE-1561, CORE-1562, CORE-1564: New operation `configure_cluster`, this operation enables bulk publishing and subscription of multiple tables to multiple instances of HarperDB. +- CORE-1535: Create work queue stream on install of HarperDB. This stream receives transactions from remote instances of HarperDB which are then ingested in order. +- CORE-1551: Create transaction streams on the remote node if they do not exist when performing `add_node` or `update_node`. +- CORE-1594, CORE-1605, CORE-1749, CORE-1767, CORE-1770: Optimize the work queue stream and its consumer to be more performant and validate exact once delivery. +- CORE-1621, CORE-1692, CORE-1570, CORE-1693: NATS stream names are MD5 hashed to avoid characters that HarperDB allows, but NATS may not. +- CORE-1762: Add a new optional attribute to `add_node` and `update_node` named `opt_start_time`. This attribute sets a starting time to start synchronizing transactions. +- CORE-1785: Optimizations and bug fixes in regards to sourcing data from remote instances on HarperDB. +- CORE-1588: Created new operation `set_cluster_routes` to enable setting routes for instances of HarperDB to mesh together. +- CORE-1589: Created new operation `get_cluster_routes` to allow for retrieval of routes used to connect the instance of HarperDB to the mesh. +- CORE-1590: Created new operation `delete_cluster_routes` to allow for removal of routes used to connect the instance of HarperDB to the mesh. +- CORE-1667: Fix old environment variable `CLUSTERING_PORT` not mapping to new hub server port. +- CORE-1609: Allow `remove_node` to be called when the other node cannot be reached. +- CORE-1815: Add transaction lock to `add_node` and `update_node` to avoid concurrent nats source update bug. +- CORE-1848: Update stream configs if the node name has been changed in the YAML configuration. +- CORE-1873: Update `add_node` and `update_node` so that it auto-creates schema/table on both local and remote node respectively **Data Storage** We have made improvements to how we store, index, and retrieve data. -* CORE-1619: Enabled new concurrent flushing technology for improved write performance. -* CORE-1701: Optimize search performance for `search_by_conditions` when executing multiple AND conditions. -* CORE-1652: Encode the values of secondary indices more efficiently for faster access. -* CORE-1670: Store updated timestamp in `lmdb.js`' version property. -* CORE-1651: Enabled multiple value indexing of array values which allows for the ability to search on specific elements in an array more efficiently. -* CORE-1649, CORE-1659: Large text values (larger than 255 bytes) are no longer stored in separate blob index. Now they are segmented and delimited in the same index to increase search performance. -* Complex objects and object arrays are no longer stored in a separate index to preserve storage and increase write throughput. -* CORE-1650, CORE-1724, CORE-1738: Improved internals around interpreting attribute values. -* CORE-1657: Deferred property decoding allows large objects to be stored, but individual attributes can be accessed (like with get_attributes) without incurring the cost of decoding the entire object. -* CORE-1658: Enable in-memory caching of records for even faster access to frequently accessed data. -* CORE-1693: Wrap updates in async transactions to ensure ACID-compliant updates. -* CORE-1653: Upgrade to 4.0 rebuilds tables to reflect changes made to index improvements. -* CORE-1753: Removed old `node-lmdb` dependency. -* CORE-1787: Freeze objects returned from queries. -* CORE-1821: Read the `WRITE_ASYNC` setting which enables LMDB nosync. + +- CORE-1619: Enabled new concurrent flushing technology for improved write performance. +- CORE-1701: Optimize search performance for `search_by_conditions` when executing multiple AND conditions. +- CORE-1652: Encode the values of secondary indices more efficiently for faster access. +- CORE-1670: Store updated timestamp in `lmdb.js`' version property. +- CORE-1651: Enabled multiple value indexing of array values which allows for the ability to search on specific elements in an array more efficiently. +- CORE-1649, CORE-1659: Large text values (larger than 255 bytes) are no longer stored in separate blob index. Now they are segmented and delimited in the same index to increase search performance. +- Complex objects and object arrays are no longer stored in a separate index to preserve storage and increase write throughput. +- CORE-1650, CORE-1724, CORE-1738: Improved internals around interpreting attribute values. +- CORE-1657: Deferred property decoding allows large objects to be stored, but individual attributes can be accessed (like with get_attributes) without incurring the cost of decoding the entire object. +- CORE-1658: Enable in-memory caching of records for even faster access to frequently accessed data. +- CORE-1693: Wrap updates in async transactions to ensure ACID-compliant updates. +- CORE-1653: Upgrade to 4.0 rebuilds tables to reflect changes made to index improvements. +- CORE-1753: Removed old `node-lmdb` dependency. +- CORE-1787: Freeze objects returned from queries. +- CORE-1821: Read the `WRITE_ASYNC` setting which enables LMDB nosync. **Logging** HarperDB has increased logging specificity by breaking out logs based on components logging. There are specific log files each for HarperDB Core, Custom Functions, Hub Server, Leaf Server, and more. -* CORE-1497: Remove `pino` and `winston` dependencies. -* CORE-1426: All logging is output via `stdout` and `stderr`, our default logging is then picked up by PM2 which handles writing out to file. -* CORE-1431: Improved `read_log` operation validation. -* CORE-1433, CORE-1463: Added log rotation. -* CORE-1553, CORE-1555, CORE-1552, CORE-1554, CORE-1704: Performance gain by only serializing objects and arrays if the log is for the level defined in configuration. -* CORE-1436: Upgrade to 4.0 updates internals for logging changes. -* CORE-1428, CORE-1440, CORE-1442, CORE-1434, CORE-1435, CORE-1439, CORE-1482, CORE-1751, CORE-1752: Bug fixes, performance improvements and improved unit tests. -* CORE-1691: Convert non-PM2 managed log file writes to use Node.js `fs.appendFileSync` function. + +- CORE-1497: Remove `pino` and `winston` dependencies. +- CORE-1426: All logging is output via `stdout` and `stderr`, our default logging is then picked up by PM2 which handles writing out to file. +- CORE-1431: Improved `read_log` operation validation. +- CORE-1433, CORE-1463: Added log rotation. +- CORE-1553, CORE-1555, CORE-1552, CORE-1554, CORE-1704: Performance gain by only serializing objects and arrays if the log is for the level defined in configuration. +- CORE-1436: Upgrade to 4.0 updates internals for logging changes. +- CORE-1428, CORE-1440, CORE-1442, CORE-1434, CORE-1435, CORE-1439, CORE-1482, CORE-1751, CORE-1752: Bug fixes, performance improvements and improved unit tests. +- CORE-1691: Convert non-PM2 managed log file writes to use Node.js `fs.appendFileSync` function. **Configuration** HarperDB has updated its configuration from a properties file to YAML. -* CORE-1448, CORE-1449, CORE-1519, CORE-1587: Upgrade automatically converts the pre-existing settings file to YAML. -* CORE-1445, CORE-1534, CORE-1444, CORE-1858: Build out new logic to create, update, and interpret the YAML configuration file. -* Installer has updated prompts to reflect YAML settings. -* CORE-1447: Create an alias for the `configure_cluster` operation as `set_configuration`. -* CORE-1461, CORE-1462, CORE-1483: Unit test improvements. -* CORE-1492: Improvements to get_configuration and set_configuration operations. -* CORE-1503: Modify HarperDB configuration for more granular certificate definition. -* CORE-1591: Update `routes` IP param to `host` and to `leaf` config in `harperdb.conf` -* CORE-1519: Fix issue when switching between old and new versions of HarperDB we are getting the config parameter is undefined error on npm install. + +- CORE-1448, CORE-1449, CORE-1519, CORE-1587: Upgrade automatically converts the pre-existing settings file to YAML. +- CORE-1445, CORE-1534, CORE-1444, CORE-1858: Build out new logic to create, update, and interpret the YAML configuration file. +- Installer has updated prompts to reflect YAML settings. +- CORE-1447: Create an alias for the `configure_cluster` operation as `set_configuration`. +- CORE-1461, CORE-1462, CORE-1483: Unit test improvements. +- CORE-1492: Improvements to get_configuration and set_configuration operations. +- CORE-1503: Modify HarperDB configuration for more granular certificate definition. +- CORE-1591: Update `routes` IP param to `host` and to `leaf` config in `harperdb.conf` +- CORE-1519: Fix issue when switching between old and new versions of HarperDB we are getting the config parameter is undefined error on npm install. **Broad NodeJS and Platform Support** -* CORE-1624: HarperDB can now run on multiple versions of NodeJS, from v14 to v19. We primarily test on v18, so that is the preferred version. + +- CORE-1624: HarperDB can now run on multiple versions of NodeJS, from v14 to v19. We primarily test on v18, so that is the preferred version. **Windows 10 and 11** -* CORE-1088: HarperDB now runs natively on Windows 10 and 11 without the need to run in a container or installed in WSL. Windows is only intended for evaluation and development purposes, not for production work loads. + +- CORE-1088: HarperDB now runs natively on Windows 10 and 11 without the need to run in a container or installed in WSL. Windows is only intended for evaluation and development purposes, not for production work loads. **Extra Changes and Bug Fixes** -* CORE-1520: Refactor installer to remove all waterfall code and update to use Promises. -* CORE-1573: Stop the PM2 daemon and any logging processes when stopping hdb. -* CORE-1586: When HarperDB is running in foreground stop any additional logging processes from being spawned. -* CORE-1626: Update docker file to accommodate new `harperdb.conf` file. -* CORE-1592, CORE-1526, CORE-1660, CORE-1646, CORE-1640, CORE-1689, CORE-1711, CORE-1601, CORE-1726, CORE-1728, CORE-1736, CORE-1735, CORE-1745, CORE-1729, CORE-1748, CORE-1644, CORE-1750, CORE-1757, CORE-1727, CORE-1740, CORE-1730, CORE-1777, CORE-1778, CORE-1782, CORE-1775, CORE-1771, CORE-1774, CORE-1759, CORE-1772, CORE-1861, CORE-1862, CORE-1863, CORE-1870, CORE-1869:Changes for CI/CD pipeline and integration tests. -* CORE-1661: Fixed issue where old boot properties file caused an error when attempting to install 4.0.0. -* CORE-1697, CORE-1814, CORE-1855: Upgrade fastify dependency to new major version 4. -* CORE-1629: Jobs are now running as processes managed by the PM2 daemon. -* CORE-1733: Update LICENSE to reflect our EULA on our site. -* CORE-1606: Enable Custom Functions by default. -* CORE-1714: Include pre-built binaries for most common platforms (darwin-arm64, darwin-x64, linux-arm64, linux-x64, win32-x64). -* CORE-1628: Fix issue where setting license through environment variable not working. -* CORE-1602, CORE-1760, CORE-1838, CORE-1839, CORE-1847, CORE-1773: HarperDB Docker container improvements. -* CORE-1706: Add support for encoding HTTP responses with MessagePack. -* CORE-1709: Improve the way lmdb.js dependencies are installed. -* CORE-1758: Remove/update unnecessary HTTP headers. -* CORE-1756: On `npm install` and `harperdb install` change the node version check from an error to a warning if the installed Node.js version does not match our preferred version. -* CORE-1791: Optimizations to authenticated user caching. -* CORE-1794: Update README to discuss Windows support & Node.js versions -* CORE-1837: Fix issue where Custom Function directory was not being created on install. -* CORE-1742: Add more validation to audit log - check schema/table exists and log is enabled. -* CORE-1768: Fix issue where when running in foreground HarperDB process is not stopping on `harperdb stop`. -* CORE-1864: Fix to semver checks on upgrade. -* CORE-1850: Fix issue where a `cluster_user` type role could not be altered. + +- CORE-1520: Refactor installer to remove all waterfall code and update to use Promises. +- CORE-1573: Stop the PM2 daemon and any logging processes when stopping hdb. +- CORE-1586: When HarperDB is running in foreground stop any additional logging processes from being spawned. +- CORE-1626: Update docker file to accommodate new `harperdb.conf` file. +- CORE-1592, CORE-1526, CORE-1660, CORE-1646, CORE-1640, CORE-1689, CORE-1711, CORE-1601, CORE-1726, CORE-1728, CORE-1736, CORE-1735, CORE-1745, CORE-1729, CORE-1748, CORE-1644, CORE-1750, CORE-1757, CORE-1727, CORE-1740, CORE-1730, CORE-1777, CORE-1778, CORE-1782, CORE-1775, CORE-1771, CORE-1774, CORE-1759, CORE-1772, CORE-1861, CORE-1862, CORE-1863, CORE-1870, CORE-1869:Changes for CI/CD pipeline and integration tests. +- CORE-1661: Fixed issue where old boot properties file caused an error when attempting to install 4.0.0. +- CORE-1697, CORE-1814, CORE-1855: Upgrade fastify dependency to new major version 4. +- CORE-1629: Jobs are now running as processes managed by the PM2 daemon. +- CORE-1733: Update LICENSE to reflect our EULA on our site. +- CORE-1606: Enable Custom Functions by default. +- CORE-1714: Include pre-built binaries for most common platforms (darwin-arm64, darwin-x64, linux-arm64, linux-x64, win32-x64). +- CORE-1628: Fix issue where setting license through environment variable not working. +- CORE-1602, CORE-1760, CORE-1838, CORE-1839, CORE-1847, CORE-1773: HarperDB Docker container improvements. +- CORE-1706: Add support for encoding HTTP responses with MessagePack. +- CORE-1709: Improve the way lmdb.js dependencies are installed. +- CORE-1758: Remove/update unnecessary HTTP headers. +- CORE-1756: On `npm install` and `harperdb install` change the node version check from an error to a warning if the installed Node.js version does not match our preferred version. +- CORE-1791: Optimizations to authenticated user caching. +- CORE-1794: Update README to discuss Windows support & Node.js versions +- CORE-1837: Fix issue where Custom Function directory was not being created on install. +- CORE-1742: Add more validation to audit log - check schema/table exists and log is enabled. +- CORE-1768: Fix issue where when running in foreground HarperDB process is not stopping on `harperdb stop`. +- CORE-1864: Fix to semver checks on upgrade. +- CORE-1850: Fix issue where a `cluster_user` type role could not be altered. diff --git a/docs/technical-details/release-notes/4.tucker/4.0.1.md b/docs/technical-details/release-notes/4.tucker/4.0.1.md index 02cdca81..ba7e3d70 100644 --- a/docs/technical-details/release-notes/4.tucker/4.0.1.md +++ b/docs/technical-details/release-notes/4.tucker/4.0.1.md @@ -1,7 +1,8 @@ ### HarperDB 4.0.1, Tucker Release + 01/20/2023 **Bug Fixes** -* CORE-1992 Local studio was not loading because the path got mangled in the build. -* CORE-2001 Fixed deploy_custom_function_project after node update broke it. +- CORE-1992 Local studio was not loading because the path got mangled in the build. +- CORE-2001 Fixed deploy_custom_function_project after node update broke it. diff --git a/docs/technical-details/release-notes/4.tucker/4.0.2.md b/docs/technical-details/release-notes/4.tucker/4.0.2.md index 692094f5..34e86018 100644 --- a/docs/technical-details/release-notes/4.tucker/4.0.2.md +++ b/docs/technical-details/release-notes/4.tucker/4.0.2.md @@ -1,7 +1,8 @@ ### HarperDB 4.0.2, Tucker Release + 01/24/2023 **Bug Fixes** -* CORE-2003 Fix bug where if machine had one core thread config would default to zero. -* Update to lmdb 2.7.3 and msgpackr 1.7.0 +- CORE-2003 Fix bug where if machine had one core thread config would default to zero. +- Update to lmdb 2.7.3 and msgpackr 1.7.0 diff --git a/docs/technical-details/release-notes/4.tucker/4.0.3.md b/docs/technical-details/release-notes/4.tucker/4.0.3.md index ec0e055c..cd987f0b 100644 --- a/docs/technical-details/release-notes/4.tucker/4.0.3.md +++ b/docs/technical-details/release-notes/4.tucker/4.0.3.md @@ -1,6 +1,7 @@ ### HarperDB 4.0.3, Tucker Release + 01/26/2023 **Bug Fixes** -* CORE-2007 Add update nodes 4.0.0 launch script to build script to fix clustering upgrade. +- CORE-2007 Add update nodes 4.0.0 launch script to build script to fix clustering upgrade. diff --git a/docs/technical-details/release-notes/4.tucker/4.0.4.md b/docs/technical-details/release-notes/4.tucker/4.0.4.md index b8bad686..4dea9ee1 100644 --- a/docs/technical-details/release-notes/4.tucker/4.0.4.md +++ b/docs/technical-details/release-notes/4.tucker/4.0.4.md @@ -1,6 +1,7 @@ ### HarperDB 4.0.4, Tucker Release + 01/27/2023 **Bug Fixes** -* CORE-2009 Fixed bug where add node was not being called when upgrading clustering. \ No newline at end of file +- CORE-2009 Fixed bug where add node was not being called when upgrading clustering. diff --git a/docs/technical-details/release-notes/4.tucker/4.0.5.md b/docs/technical-details/release-notes/4.tucker/4.0.5.md index 9abdab0c..83a2a2c5 100644 --- a/docs/technical-details/release-notes/4.tucker/4.0.5.md +++ b/docs/technical-details/release-notes/4.tucker/4.0.5.md @@ -1,9 +1,9 @@ ### HarperDB 4.0.5, Tucker Release + 02/15/2023 **Bug Fixes** -* CORE-2029 Improved the upgrade process for handling existing user TLS certificates and correctly configuring TLS settings. Added a prompt to upgrade to determine if new certificates should be created or existing certificates should be kept/used. -* Fix the way NATS connections are honored in a local environment. -* Do not define the certificate authority path to NATS if it is not defined in the HarperDB config. - +- CORE-2029 Improved the upgrade process for handling existing user TLS certificates and correctly configuring TLS settings. Added a prompt to upgrade to determine if new certificates should be created or existing certificates should be kept/used. +- Fix the way NATS connections are honored in a local environment. +- Do not define the certificate authority path to NATS if it is not defined in the HarperDB config. diff --git a/docs/technical-details/release-notes/4.tucker/4.0.6.md b/docs/technical-details/release-notes/4.tucker/4.0.6.md index b3859502..bb696c3b 100644 --- a/docs/technical-details/release-notes/4.tucker/4.0.6.md +++ b/docs/technical-details/release-notes/4.tucker/4.0.6.md @@ -1,6 +1,7 @@ ### HarperDB 4.0.6, Tucker Release + 03/09/2023 **Bug Fixes** -* Fixed a data serialization error that occurs when a large number of different record structures are persisted in a single table. +- Fixed a data serialization error that occurs when a large number of different record structures are persisted in a single table. diff --git a/docs/technical-details/release-notes/4.tucker/4.0.7.md b/docs/technical-details/release-notes/4.tucker/4.0.7.md index 4381d747..dfd135bd 100644 --- a/docs/technical-details/release-notes/4.tucker/4.0.7.md +++ b/docs/technical-details/release-notes/4.tucker/4.0.7.md @@ -1,6 +1,7 @@ ### HarperDB 4.0.7, Tucker Release + 03/10/2023 **Bug Fixes** -* Update lmdb.js dependency \ No newline at end of file +- Update lmdb.js dependency diff --git a/docs/technical-details/release-notes/4.tucker/4.1.0.md b/docs/technical-details/release-notes/4.tucker/4.1.0.md index d2424c79..fde09ef7 100644 --- a/docs/technical-details/release-notes/4.tucker/4.1.0.md +++ b/docs/technical-details/release-notes/4.tucker/4.1.0.md @@ -20,39 +20,39 @@ Updates to S3 import and export mean that these operations now require the bucke Due to the AWS SDK v2 reaching end of life support we have updated to v3. This has caused some breaking changes in our operations `import_from_s3` and `export_to_s3`: -* A new attribute `region` will need to be supplied -* The `bucket` attribute can no longer have trailing slashes. Slashes will now need to be in the `key`. +- A new attribute `region` will need to be supplied +- The `bucket` attribute can no longer have trailing slashes. Slashes will now need to be in the `key`. Starting HarperDB without any command (just `harperdb`) now runs HarperDB like a standard process, in the foreground. This means you can use standard unix tooling for interacting with the process and is conducive for running HarperDB with systemd or any other process management tool. If you wish to have HarperDB launch itself in separate background process (and immediately terminate the shell process), you can do so by running `harperdb start`. Internal Tickets completed: -* CORE-609 - Ensure that attribute names are always added to global schema as Strings -* CORE-1549 - Remove fastify-static code from Custom Functions server which auto serves content from "static" folder -* CORE-1655 - Iterator based queries -* CORE-1764 - Fix issue where describe\_all operation returns an empty object for non super-users if schema(s) do not yet have table(s) -* CORE-1854 - Switch to using worker threads instead of processes for handling concurrency -* CORE-1877 - Extend the csv\_url\_load operation to allow for additional headers to be passed to the remote server when the csv is being downloaded -* CORE-1893 - Add last updated timestamp to describe operations -* CORE-1896 - Fix issue where Select \* from system.hdb\_info returns wrong HDB version number after Instance Upgrade -* CORE-1904 - Fix issue when executing GEOJSON query in SQL -* CORE-1905 - Add HarperDB YAML configuration setting which defines the storage location of NATS streams -* CORE-1906 - Add HarperDB YAML configuration setting defining the storage location of tables. -* CORE-1655 - Streaming binary format serialization -* CORE-1943 - Add configuration option to set mount point for audit tables -* CORE-1921 - Update NATS transaction lifecycle to handle message deduplication in work queue streams. -* CORE-1963 - Update logging for better readability, reduced duplication, and request context information. -* CORE-1968 - In server\nats\natsIngestService.js remove the js\_msg.working(); line to improve performance. -* CORE-1976 - Fix error when calling describe\_table operation with no schema or table defined in payload. -* CORE-1983 - Fix issue where create\_attribute operation does not validate request for required attributes -* CORE-2015 - Remove PM2 logs that get logged in console when starting HDB -* CORE-2048 - systemd script for 4.1 -* CORE-2052 - Include thread information in system\_information for visibility of threads -* CORE-2061 - Add a better error msg when clustering is enabled without a cluster user set -* CORE-2068 - Create new log rotate logic since pm2 log-rotate no longer used -* CORE-2072 - Update to Node 18.15.0 -* CORE-2090 - Upgrade Testing from v4.0.x and v3.x to v4.1. -* CORE-2091 - Run the performance tests -* CORE-2092 - Allow for automatic patch version updates of certain packages -* CORE-2109 - Add verify option to clustering TLS configuration -* CORE-2111 - Update AWS SDK to v3 +- CORE-609 - Ensure that attribute names are always added to global schema as Strings +- CORE-1549 - Remove fastify-static code from Custom Functions server which auto serves content from "static" folder +- CORE-1655 - Iterator based queries +- CORE-1764 - Fix issue where describe_all operation returns an empty object for non super-users if schema(s) do not yet have table(s) +- CORE-1854 - Switch to using worker threads instead of processes for handling concurrency +- CORE-1877 - Extend the csv_url_load operation to allow for additional headers to be passed to the remote server when the csv is being downloaded +- CORE-1893 - Add last updated timestamp to describe operations +- CORE-1896 - Fix issue where Select \* from system.hdb_info returns wrong HDB version number after Instance Upgrade +- CORE-1904 - Fix issue when executing GEOJSON query in SQL +- CORE-1905 - Add HarperDB YAML configuration setting which defines the storage location of NATS streams +- CORE-1906 - Add HarperDB YAML configuration setting defining the storage location of tables. +- CORE-1655 - Streaming binary format serialization +- CORE-1943 - Add configuration option to set mount point for audit tables +- CORE-1921 - Update NATS transaction lifecycle to handle message deduplication in work queue streams. +- CORE-1963 - Update logging for better readability, reduced duplication, and request context information. +- CORE-1968 - In server\nats\natsIngestService.js remove the js_msg.working(); line to improve performance. +- CORE-1976 - Fix error when calling describe_table operation with no schema or table defined in payload. +- CORE-1983 - Fix issue where create_attribute operation does not validate request for required attributes +- CORE-2015 - Remove PM2 logs that get logged in console when starting HDB +- CORE-2048 - systemd script for 4.1 +- CORE-2052 - Include thread information in system_information for visibility of threads +- CORE-2061 - Add a better error msg when clustering is enabled without a cluster user set +- CORE-2068 - Create new log rotate logic since pm2 log-rotate no longer used +- CORE-2072 - Update to Node 18.15.0 +- CORE-2090 - Upgrade Testing from v4.0.x and v3.x to v4.1. +- CORE-2091 - Run the performance tests +- CORE-2092 - Allow for automatic patch version updates of certain packages +- CORE-2109 - Add verify option to clustering TLS configuration +- CORE-2111 - Update AWS SDK to v3 diff --git a/docs/technical-details/release-notes/4.tucker/4.1.1.md b/docs/technical-details/release-notes/4.tucker/4.1.1.md index bf20776d..2da797d1 100644 --- a/docs/technical-details/release-notes/4.tucker/4.1.1.md +++ b/docs/technical-details/release-notes/4.tucker/4.1.1.md @@ -2,9 +2,9 @@ 06/16/2023 -* HarperDB uses improved logic for determining default heap limits and thread counts. When running in a restricted container and on NodeJS 18.15+, HarperDB will use the constrained memory limit to determine heap limits for each thread. In more memory constrained servers with many CPU cores, a reduced default thread count will be used to ensure that excessive memory is not used by many workers. You may still define your own thread count (with `http`/`threads`) in the [configuration](../../../deployments/configuration.md). -* An option has been added for [disabling the republishing NATS messages](../../../deployments/configuration.md), which can provide improved replication performance in a fully connected network. -* Improvements to our OpenShift container. -* Dependency security updates. -* **Bug Fixes** -* Fixed a bug in reporting database metrics in the `system_information` operation. +- HarperDB uses improved logic for determining default heap limits and thread counts. When running in a restricted container and on NodeJS 18.15+, HarperDB will use the constrained memory limit to determine heap limits for each thread. In more memory constrained servers with many CPU cores, a reduced default thread count will be used to ensure that excessive memory is not used by many workers. You may still define your own thread count (with `http`/`threads`) in the [configuration](../../../deployments/configuration.md). +- An option has been added for [disabling the republishing NATS messages](../../../deployments/configuration.md), which can provide improved replication performance in a fully connected network. +- Improvements to our OpenShift container. +- Dependency security updates. +- **Bug Fixes** +- Fixed a bug in reporting database metrics in the `system_information` operation. diff --git a/docs/technical-details/release-notes/4.tucker/4.1.2.md b/docs/technical-details/release-notes/4.tucker/4.1.2.md index 91808710..aabb838d 100644 --- a/docs/technical-details/release-notes/4.tucker/4.1.2.md +++ b/docs/technical-details/release-notes/4.tucker/4.1.2.md @@ -1,8 +1,8 @@ ### HarperDB 4.1.2, Tucker Release -06/16/2023 -* HarperDB has updated binary dependencies to support older glibc versions back 2.17. -* A new CLI command was added to get the current status of whether HarperDB is running and the cluster status. This is available with `harperdb status`. -* Improvements to our OpenShift container. -* Dependency security updates. +06/16/2023 +- HarperDB has updated binary dependencies to support older glibc versions back 2.17. +- A new CLI command was added to get the current status of whether HarperDB is running and the cluster status. This is available with `harperdb status`. +- Improvements to our OpenShift container. +- Dependency security updates. diff --git a/docs/technical-details/release-notes/4.tucker/4.2.0.md b/docs/technical-details/release-notes/4.tucker/4.2.0.md index a6185d2a..9735f223 100644 --- a/docs/technical-details/release-notes/4.tucker/4.2.0.md +++ b/docs/technical-details/release-notes/4.tucker/4.2.0.md @@ -60,24 +60,24 @@ There have been significant changes to `harperdb-config.yaml`, however none of t `harperdb-config.yaml` has had some configuration values added, removed, renamed and defaults changed. Please refer to [harperdb-config.yaml](../../../deployments/configuration.md) for the most current configuration parameters. -* The `http` element has been expanded. - * `compressionThreshold` was added. - * All `customFunction` configuration now lives here, except for the `tls` section. -* `threads` has moved out of the `http` element and now is its own top level element. -* `authentication` section was moved out of the `operationsApi` section and is now its own top level element/section. -* `analytics.aggregatePeriod` was added. -* Default logging level was changed to `warn`. -* Default clustering log level was changed to `info`. -* `clustering.republishMessages` now defaults to `false`. -* `operationsApi.foreground` was removed. To start HarperDB in the foreground, from the CLI run `harperdb`. -* Made `operationsApi` configuration optional. Any config not defined here will default to the `http` section. -* Added a `securePort` parameter to `operationsApi` and `http` used for setting the https port. -* Added a new top level `tls` section. -* Removed `customFunctions.enabled`, `customFunctions.network.https`, `operationsApi.network.https` and `operationsApi.nodeEnv`. -* Added an element called `componentRoot` which replaces `customFunctions.root`. -* Updated custom pathing to use `databases` instead of `schemas`. -* Added `logging.auditAuthEvents.logFailed` and `logging.auditAuthEvents.logSuccessful` for enabling logging of auth events. -* A new `mqtt` section was added. +- The `http` element has been expanded. + - `compressionThreshold` was added. + - All `customFunction` configuration now lives here, except for the `tls` section. +- `threads` has moved out of the `http` element and now is its own top level element. +- `authentication` section was moved out of the `operationsApi` section and is now its own top level element/section. +- `analytics.aggregatePeriod` was added. +- Default logging level was changed to `warn`. +- Default clustering log level was changed to `info`. +- `clustering.republishMessages` now defaults to `false`. +- `operationsApi.foreground` was removed. To start HarperDB in the foreground, from the CLI run `harperdb`. +- Made `operationsApi` configuration optional. Any config not defined here will default to the `http` section. +- Added a `securePort` parameter to `operationsApi` and `http` used for setting the https port. +- Added a new top level `tls` section. +- Removed `customFunctions.enabled`, `customFunctions.network.https`, `operationsApi.network.https` and `operationsApi.nodeEnv`. +- Added an element called `componentRoot` which replaces `customFunctions.root`. +- Updated custom pathing to use `databases` instead of `schemas`. +- Added `logging.auditAuthEvents.logFailed` and `logging.auditAuthEvents.logSuccessful` for enabling logging of auth events. +- A new `mqtt` section was added. ### Socket Management diff --git a/docs/technical-details/release-notes/4.tucker/4.2.1.md b/docs/technical-details/release-notes/4.tucker/4.2.1.md index 48349d56..acc4f8cf 100644 --- a/docs/technical-details/release-notes/4.tucker/4.2.1.md +++ b/docs/technical-details/release-notes/4.tucker/4.2.1.md @@ -1,8 +1,9 @@ ### HarperDB 4.2.1, Tucker Release + 11/3/2023 -* Downgrade NATS 2.10.3 back to 2.10.1 due to regression in connection handling. -* Handle package names with underscores. -* Improved validation of queries and comparators -* Avoid double replication on transactions with multiple commits -* Added file metadata on get_component_file +- Downgrade NATS 2.10.3 back to 2.10.1 due to regression in connection handling. +- Handle package names with underscores. +- Improved validation of queries and comparators +- Avoid double replication on transactions with multiple commits +- Added file metadata on get_component_file diff --git a/docs/technical-details/release-notes/4.tucker/4.2.2.md b/docs/technical-details/release-notes/4.tucker/4.2.2.md index 14a9196c..fa9b8e36 100644 --- a/docs/technical-details/release-notes/4.tucker/4.2.2.md +++ b/docs/technical-details/release-notes/4.tucker/4.2.2.md @@ -1,10 +1,11 @@ ### HarperDB 4.2.2, Tucker Release + 11/8/2023 -* Increase timeouts for NATS connections. -* Fix for database snapshots for backups (and for clone node). -* Fix application of permissions for default tables exposed through REST. -* Log replication failures with record information. -* Fix application of authorization/permissions for MQTT commands. -* Fix copying of local components in clone node. -* Fix calculation of overlapping start time in clone node. \ No newline at end of file +- Increase timeouts for NATS connections. +- Fix for database snapshots for backups (and for clone node). +- Fix application of permissions for default tables exposed through REST. +- Log replication failures with record information. +- Fix application of authorization/permissions for MQTT commands. +- Fix copying of local components in clone node. +- Fix calculation of overlapping start time in clone node. diff --git a/docs/technical-details/release-notes/4.tucker/4.2.3.md b/docs/technical-details/release-notes/4.tucker/4.2.3.md index 09712bd6..b064310b 100644 --- a/docs/technical-details/release-notes/4.tucker/4.2.3.md +++ b/docs/technical-details/release-notes/4.tucker/4.2.3.md @@ -1,8 +1,9 @@ ### HarperDB 4.2.3, Tucker Release + 11/15/2023 -* When setting setting securePort, disable unsecure port setting on same port -* Fix `harperdb status` when pid file is missing -* Fix/include missing icons/fonts from local studio -* Fix crash that can occur when concurrently accessing records > 16KB -* Apply a lower heap limit to better ensure that memory leaks are quickly caught/mitigated \ No newline at end of file +- When setting setting securePort, disable unsecure port setting on same port +- Fix `harperdb status` when pid file is missing +- Fix/include missing icons/fonts from local studio +- Fix crash that can occur when concurrently accessing records > 16KB +- Apply a lower heap limit to better ensure that memory leaks are quickly caught/mitigated diff --git a/docs/technical-details/release-notes/4.tucker/4.2.4.md b/docs/technical-details/release-notes/4.tucker/4.2.4.md index f851f0a8..ae3fa18d 100644 --- a/docs/technical-details/release-notes/4.tucker/4.2.4.md +++ b/docs/technical-details/release-notes/4.tucker/4.2.4.md @@ -1,5 +1,6 @@ ### HarperDB 4.2.4, Tucker Release + 11/16/2023 -* Prevent coercion of strings to numbers in SQL queries (in WHERE clause) -* Address fastify deprecation warning about accessing config \ No newline at end of file +- Prevent coercion of strings to numbers in SQL queries (in WHERE clause) +- Address fastify deprecation warning about accessing config diff --git a/docs/technical-details/release-notes/4.tucker/4.2.5.md b/docs/technical-details/release-notes/4.tucker/4.2.5.md index d5a3a0d4..603b5a97 100644 --- a/docs/technical-details/release-notes/4.tucker/4.2.5.md +++ b/docs/technical-details/release-notes/4.tucker/4.2.5.md @@ -1,7 +1,8 @@ ### HarperDB 4.2.5, Tucker Release + 11/22/2023 -* Disable compression on server-sent events to ensure messages are immediately sent (not queued for later deliver) -* Update geoNear function to tolerate null values -* lmdb-js fix to ensure prefetched keys are pinned in memory until retrieved -* Add header to indicate start of a new authenticated session (for studio to identify authenticated sessions) +- Disable compression on server-sent events to ensure messages are immediately sent (not queued for later deliver) +- Update geoNear function to tolerate null values +- lmdb-js fix to ensure prefetched keys are pinned in memory until retrieved +- Add header to indicate start of a new authenticated session (for studio to identify authenticated sessions) diff --git a/docs/technical-details/release-notes/4.tucker/4.2.6.md b/docs/technical-details/release-notes/4.tucker/4.2.6.md index 2d289d0f..fabbe679 100644 --- a/docs/technical-details/release-notes/4.tucker/4.2.6.md +++ b/docs/technical-details/release-notes/4.tucker/4.2.6.md @@ -1,5 +1,6 @@ ### HarperDB 4.2.6, Tucker Release + 11/29/2023 -* Update various geo SQL functions to tolerate invalid values -* Properly report component installation/load errors in `get_components` (for studio to load components after an installation failure) \ No newline at end of file +- Update various geo SQL functions to tolerate invalid values +- Properly report component installation/load errors in `get_components` (for studio to load components after an installation failure) diff --git a/docs/technical-details/release-notes/4.tucker/4.2.7.md b/docs/technical-details/release-notes/4.tucker/4.2.7.md index 4721402e..58d0069f 100644 --- a/docs/technical-details/release-notes/4.tucker/4.2.7.md +++ b/docs/technical-details/release-notes/4.tucker/4.2.7.md @@ -1,6 +1,7 @@ ### HarperDB 4.2.7 + 12/6/2023 -* Add support for cloning over the top of an existing HarperDB instance -* Add health checks for NATS consumer with ability to restart consumer loops for better resiliency -* Revert Fastify autoload module due to a regression that had caused EcmaScript modules for Fastify route modules to fail to load on Windows \ No newline at end of file +- Add support for cloning over the top of an existing HarperDB instance +- Add health checks for NATS consumer with ability to restart consumer loops for better resiliency +- Revert Fastify autoload module due to a regression that had caused EcmaScript modules for Fastify route modules to fail to load on Windows diff --git a/docs/technical-details/release-notes/4.tucker/4.2.8.md b/docs/technical-details/release-notes/4.tucker/4.2.8.md index d931a396..5e88ed45 100644 --- a/docs/technical-details/release-notes/4.tucker/4.2.8.md +++ b/docs/technical-details/release-notes/4.tucker/4.2.8.md @@ -1,9 +1,10 @@ ### HarperDB 4.2.8 + 12/19/2023 -* Added support CLI command line arguments for clone node -* Added support for cloning a node without enabling clustering -* Clear NATS client cache on closed event -* Fix check for attribute permissions so that an empty attribute permissions array is treated as a table level permission definition -* Improve speed of cross-node health checks -* Fix for using `database` in describe operations +- Added support CLI command line arguments for clone node +- Added support for cloning a node without enabling clustering +- Clear NATS client cache on closed event +- Fix check for attribute permissions so that an empty attribute permissions array is treated as a table level permission definition +- Improve speed of cross-node health checks +- Fix for using `database` in describe operations diff --git a/docs/technical-details/release-notes/4.tucker/4.3.0.md b/docs/technical-details/release-notes/4.tucker/4.3.0.md index 61a9b74f..affdbc0e 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.0.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.0.md @@ -15,7 +15,7 @@ type Product @table { # foreign key used to reference a brand brandId: ID @indexed # many-to-one relationship to brand - brand: Related @relation(from: "brandId") + brand: Related @relation(from: "brandId") } type Brand @table { id: ID @primaryKey diff --git a/docs/technical-details/release-notes/4.tucker/4.3.1.md b/docs/technical-details/release-notes/4.tucker/4.3.1.md index 274eaa98..53880170 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.1.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.1.md @@ -1,6 +1,7 @@ ### HarperDB 4.3.1 + 3/25/2024 -* Fix Fastify warning about responseTime usage -* Add access to the MQTT topic in the context -* Fix for ensuring local NATS streams are created +- Fix Fastify warning about responseTime usage +- Add access to the MQTT topic in the context +- Fix for ensuring local NATS streams are created diff --git a/docs/technical-details/release-notes/4.tucker/4.3.10.md b/docs/technical-details/release-notes/4.tucker/4.3.10.md index 440faeee..37a0dd4c 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.10.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.10.md @@ -1,7 +1,8 @@ ### HarperDB 4.3.10 + 5/5/2024 -* Provide a `data` property on the request/context with deserialized data from the request body for any request including methods that don't typically have a request body -* Ensure that CRDTs are not double applied after committing a transaction -* Delete MQTT will after publishing even if it fails to publish -* Improve transaction retry logic to use async non-optimistic transactions after multiple retries \ No newline at end of file +- Provide a `data` property on the request/context with deserialized data from the request body for any request including methods that don't typically have a request body +- Ensure that CRDTs are not double applied after committing a transaction +- Delete MQTT will after publishing even if it fails to publish +- Improve transaction retry logic to use async non-optimistic transactions after multiple retries diff --git a/docs/technical-details/release-notes/4.tucker/4.3.11.md b/docs/technical-details/release-notes/4.tucker/4.3.11.md index a3051aaf..e3bd75cd 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.11.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.11.md @@ -1,5 +1,6 @@ ### HarperDB 4.3.11 + 5/15/2024 -* Add support for multiple certificates with SNI-based selection of certificates for HTTPS/TLS -* Fix warning in Node v22 \ No newline at end of file +- Add support for multiple certificates with SNI-based selection of certificates for HTTPS/TLS +- Fix warning in Node v22 diff --git a/docs/technical-details/release-notes/4.tucker/4.3.12.md b/docs/technical-details/release-notes/4.tucker/4.3.12.md index ad8fd25e..7732ef38 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.12.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.12.md @@ -1,5 +1,6 @@ ### HarperDB 4.3.12 + 5/16/2024 -* Fix for handling ciphers in multiple certificates -* Allow each certificate config to have multiple hostnames \ No newline at end of file +- Fix for handling ciphers in multiple certificates +- Allow each certificate config to have multiple hostnames diff --git a/docs/technical-details/release-notes/4.tucker/4.3.13.md b/docs/technical-details/release-notes/4.tucker/4.3.13.md index 17b51924..798aa0e0 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.13.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.13.md @@ -1,6 +1,7 @@ ### HarperDB 4.3.13 + 5/22/2024 -* Fix for handling HTTPS/TLS with IP address targets (no hostname) where SNI is not available -* Fix for memory leak when a node is down and consumers are trying to reconnect -* Faster cross-thread notification mechanism for transaction events \ No newline at end of file +- Fix for handling HTTPS/TLS with IP address targets (no hostname) where SNI is not available +- Fix for memory leak when a node is down and consumers are trying to reconnect +- Faster cross-thread notification mechanism for transaction events diff --git a/docs/technical-details/release-notes/4.tucker/4.3.14.md b/docs/technical-details/release-notes/4.tucker/4.3.14.md index e9941557..00d0f8e7 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.14.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.14.md @@ -1,4 +1,5 @@ ### HarperDB 4.3.14 + 5/24/2024 -* Fix application of ciphers to multi-certificate TLS configuration \ No newline at end of file +- Fix application of ciphers to multi-certificate TLS configuration diff --git a/docs/technical-details/release-notes/4.tucker/4.3.15.md b/docs/technical-details/release-notes/4.tucker/4.3.15.md index ea1a5c25..f845b44d 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.15.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.15.md @@ -1,5 +1,6 @@ ### HarperDB 4.3.15 + 5/29/2024 -* Add support for wildcards in hostnames for SNI -* Properly apply ciphers settings on multiple TLS configurations \ No newline at end of file +- Add support for wildcards in hostnames for SNI +- Properly apply ciphers settings on multiple TLS configurations diff --git a/docs/technical-details/release-notes/4.tucker/4.3.16.md b/docs/technical-details/release-notes/4.tucker/4.3.16.md index b39588ed..43e61103 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.16.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.16.md @@ -1,5 +1,6 @@ ### HarperDB 4.3.16 + 6/3/2024 -* Properly shim legacy TLS configuration with new multi-certificate support -* Show the changed filenames when an application is reloaded \ No newline at end of file +- Properly shim legacy TLS configuration with new multi-certificate support +- Show the changed filenames when an application is reloaded diff --git a/docs/technical-details/release-notes/4.tucker/4.3.17.md b/docs/technical-details/release-notes/4.tucker/4.3.17.md index 5e5fcf33..89112db4 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.17.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.17.md @@ -1,9 +1,10 @@ ### HarperDB 4.3.17 + 6/13/2024 -* Add MQTT analytics of incoming messages and separate by QoS level -* Ensure that any installed `harperdb` package in components is relinked to running harperdb. -* Upgrade storage to more efficiently avoid storage increases -* Fix to improve database metrics in system_information -* Fix for pathing on Windows with extension modules -* Add ability to define a range of listening threads \ No newline at end of file +- Add MQTT analytics of incoming messages and separate by QoS level +- Ensure that any installed `harperdb` package in components is relinked to running harperdb. +- Upgrade storage to more efficiently avoid storage increases +- Fix to improve database metrics in system_information +- Fix for pathing on Windows with extension modules +- Add ability to define a range of listening threads diff --git a/docs/technical-details/release-notes/4.tucker/4.3.18.md b/docs/technical-details/release-notes/4.tucker/4.3.18.md index d711a934..e9673f04 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.18.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.18.md @@ -1,4 +1,5 @@ ### HarperDB 4.3.18 + 6/18/2024 -* Immediately terminate an MQTT connection when there is a keep-alive timeout. \ No newline at end of file +- Immediately terminate an MQTT connection when there is a keep-alive timeout. diff --git a/docs/technical-details/release-notes/4.tucker/4.3.19.md b/docs/technical-details/release-notes/4.tucker/4.3.19.md index 9e67ef1d..8d493c28 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.19.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.19.md @@ -1,6 +1,7 @@ ### HarperDB 4.3.19 + 7/2/2024 -* Properly return records for the existing value for subscriptions used for retained messages, so they are correctly serialized. -* Ensure that deploy components empty the target directory for a clean installation and expansion of a `package` sub-directory. -* Ensure that we do not double load components that are referenced by symlink from node_modules and in components directory. \ No newline at end of file +- Properly return records for the existing value for subscriptions used for retained messages, so they are correctly serialized. +- Ensure that deploy components empty the target directory for a clean installation and expansion of a `package` sub-directory. +- Ensure that we do not double load components that are referenced by symlink from node_modules and in components directory. diff --git a/docs/technical-details/release-notes/4.tucker/4.3.2.md b/docs/technical-details/release-notes/4.tucker/4.3.2.md index 4b0cc29d..00cad16a 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.2.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.2.md @@ -1,10 +1,11 @@ ### HarperDB 4.3.2 + 3/29/2024 -* Clone node updates to individually clone missing parts -* Fixes for publishing OpenShift container -* Increase purge stream timeout -* Fixed declaration of analytics schema so queries work before a restart -* Fix for iterating queries when deleted records exist -* LMDB stability upgrade -* Fix for cleanup of last will in MQTT \ No newline at end of file +- Clone node updates to individually clone missing parts +- Fixes for publishing OpenShift container +- Increase purge stream timeout +- Fixed declaration of analytics schema so queries work before a restart +- Fix for iterating queries when deleted records exist +- LMDB stability upgrade +- Fix for cleanup of last will in MQTT diff --git a/docs/technical-details/release-notes/4.tucker/4.3.20.md b/docs/technical-details/release-notes/4.tucker/4.3.20.md index a7546fa9..e0132480 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.20.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.20.md @@ -1,12 +1,13 @@ ### HarperDB 4.3.20 + 7/11/2024 -* The restart_service operation is now executed as a job, making it possible to track the progress of a restart (which is performed as a rolling restart of threads) -* Disable Nagle's algorithm for TCP connections to improve performance -* Append Server-Timing header if a fastify route has already added one -* Avoid symlinking the harperdb directory to itself -* Fix for deleting an empty database -* Upgrade ws and pm2 packages for security vulnerabilities -* Improved TypeScript definitions for Resource and Context. -* The context of a source can set `noCacheStore` to avoid caching the results of a retrieval from source -* Better error reporting of MQTT parsing errors and termination of connections for compliance +- The restart_service operation is now executed as a job, making it possible to track the progress of a restart (which is performed as a rolling restart of threads) +- Disable Nagle's algorithm for TCP connections to improve performance +- Append Server-Timing header if a fastify route has already added one +- Avoid symlinking the harperdb directory to itself +- Fix for deleting an empty database +- Upgrade ws and pm2 packages for security vulnerabilities +- Improved TypeScript definitions for Resource and Context. +- The context of a source can set `noCacheStore` to avoid caching the results of a retrieval from source +- Better error reporting of MQTT parsing errors and termination of connections for compliance diff --git a/docs/technical-details/release-notes/4.tucker/4.3.21.md b/docs/technical-details/release-notes/4.tucker/4.3.21.md index 734554ee..37bbf2bd 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.21.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.21.md @@ -1,8 +1,9 @@ ### HarperDB 4.3.21 + 8/21/2024 -* Fixed an issue with iterating/serializing query results with a `limit`. -* Fixed an issue that was preventing the caching of structured records in memory. -* Fixed and added several TypeScript exported types including `tables`, `databases`, `Query`, and `Context`. -* Fixed logging warnings about license limits after a license is updated. -* Don't register a certificate as the default certificate for non-SNI connections unless it lists an IP address in the SAN field. \ No newline at end of file +- Fixed an issue with iterating/serializing query results with a `limit`. +- Fixed an issue that was preventing the caching of structured records in memory. +- Fixed and added several TypeScript exported types including `tables`, `databases`, `Query`, and `Context`. +- Fixed logging warnings about license limits after a license is updated. +- Don't register a certificate as the default certificate for non-SNI connections unless it lists an IP address in the SAN field. diff --git a/docs/technical-details/release-notes/4.tucker/4.3.22.md b/docs/technical-details/release-notes/4.tucker/4.3.22.md index 0b3918f5..04cd71ac 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.22.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.22.md @@ -1,9 +1,10 @@ ### HarperDB 4.3.22 + 9/6/2024 -* Adding improved back-pressure handling for large subscriptions and backlogs with durable MQTT sessions -* Allow .extension in URL paths to indicate both preferred encoding and decoding -* Added support for multi-part ids in query parameters -* Limit describe calls by time before using statistical sampling -* Proper cleanup of a transaction when it is aborted due to running out of available read transactions -* Updates to release/builds \ No newline at end of file +- Adding improved back-pressure handling for large subscriptions and backlogs with durable MQTT sessions +- Allow .extension in URL paths to indicate both preferred encoding and decoding +- Added support for multi-part ids in query parameters +- Limit describe calls by time before using statistical sampling +- Proper cleanup of a transaction when it is aborted due to running out of available read transactions +- Updates to release/builds diff --git a/docs/technical-details/release-notes/4.tucker/4.3.23.md b/docs/technical-details/release-notes/4.tucker/4.3.23.md index 0cdc22e0..43fab00d 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.23.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.23.md @@ -1,6 +1,7 @@ ### HarperDB 4.3.23 + 9/12/2024 -* Avoid long-running read transactions on subscription catch-ups -* Reverted change to setting default certificate for IP address only -* Better handling of last-will messages on startup \ No newline at end of file +- Avoid long-running read transactions on subscription catch-ups +- Reverted change to setting default certificate for IP address only +- Better handling of last-will messages on startup diff --git a/docs/technical-details/release-notes/4.tucker/4.3.24.md b/docs/technical-details/release-notes/4.tucker/4.3.24.md index 398cc15f..a953af6b 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.24.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.24.md @@ -1,4 +1,5 @@ ### HarperDB 4.3.24 + 9/12/2024 -* Fix for querying for large strings (over 255 characters) \ No newline at end of file +- Fix for querying for large strings (over 255 characters) diff --git a/docs/technical-details/release-notes/4.tucker/4.3.25.md b/docs/technical-details/release-notes/4.tucker/4.3.25.md index 29023130..dee3f9a7 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.25.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.25.md @@ -1,7 +1,8 @@ ### HarperDB 4.3.25 + 9/24/2024 -* Add analytics for replication latency -* Fix iteration issue over asynchronous joined queries -* Local studio fix for loading applications in insecure context (HTTP) -* Local studio fix for loading configuration tab \ No newline at end of file +- Add analytics for replication latency +- Fix iteration issue over asynchronous joined queries +- Local studio fix for loading applications in insecure context (HTTP) +- Local studio fix for loading configuration tab diff --git a/docs/technical-details/release-notes/4.tucker/4.3.26.md b/docs/technical-details/release-notes/4.tucker/4.3.26.md index 5ee177d2..14eaba4f 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.26.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.26.md @@ -1,5 +1,6 @@ ### HarperDB 4.3.26 + 9/27/2024 -* Fixed a security issue that allowed users to bypass access controls with the operations API -* Previously expiration handling was limited to tables with a source, but now it can be applied to any table \ No newline at end of file +- Fixed a security issue that allowed users to bypass access controls with the operations API +- Previously expiration handling was limited to tables with a source, but now it can be applied to any table diff --git a/docs/technical-details/release-notes/4.tucker/4.3.27.md b/docs/technical-details/release-notes/4.tucker/4.3.27.md index dc2c0a4f..ddec8731 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.27.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.27.md @@ -1,8 +1,9 @@ ### HarperDB 4.3.27 + 10/2/2024 -* Fixed handling HTTP upgrade with Connection header that does not use Upgrade as the sole value (for Firefox) -* Added metrics for requests by status code -* Properly remove attributes from the stored metadata when removed from GraphQL schema -* Fixed a regression in clustering retrieval of schema description -* Fix attribute validation/handling to ensure that sequential ids can be assigned with insert/upsert operations \ No newline at end of file +- Fixed handling HTTP upgrade with Connection header that does not use Upgrade as the sole value (for Firefox) +- Added metrics for requests by status code +- Properly remove attributes from the stored metadata when removed from GraphQL schema +- Fixed a regression in clustering retrieval of schema description +- Fix attribute validation/handling to ensure that sequential ids can be assigned with insert/upsert operations diff --git a/docs/technical-details/release-notes/4.tucker/4.3.28.md b/docs/technical-details/release-notes/4.tucker/4.3.28.md index 067860d0..51319670 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.28.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.28.md @@ -1,6 +1,7 @@ ### HarperDB 4.3.28 + 10/3/2024 -* Tolerate user with no role when building NATS config -* Change metrics for requests by status code to be prefixed with "response_" -* Log error `cause`, and other properties, when available. +- Tolerate user with no role when building NATS config +- Change metrics for requests by status code to be prefixed with "response\_" +- Log error `cause`, and other properties, when available. diff --git a/docs/technical-details/release-notes/4.tucker/4.3.29.md b/docs/technical-details/release-notes/4.tucker/4.3.29.md index 474015b9..2130c555 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.29.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.29.md @@ -1,11 +1,12 @@ ### HarperDB 4.3.29 + 10/7/2024 -* Avoid unnecessary cookie session creation without explicit login -* Added support for caching directives in operations API -* Fixed issue with creating metadata for table with no primary key -* Local studio upgrade: - * Added support for "cache only" mode to view table data without origin resolution - * Added partial support for cookie-based authentication - * Added support for browsing tables with no primary key - * Improved performance for sorting tables +- Avoid unnecessary cookie session creation without explicit login +- Added support for caching directives in operations API +- Fixed issue with creating metadata for table with no primary key +- Local studio upgrade: + - Added support for "cache only" mode to view table data without origin resolution + - Added partial support for cookie-based authentication + - Added support for browsing tables with no primary key + - Improved performance for sorting tables diff --git a/docs/technical-details/release-notes/4.tucker/4.3.3.md b/docs/technical-details/release-notes/4.tucker/4.3.3.md index 1fa870d9..656ef8be 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.3.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.3.md @@ -1,4 +1,5 @@ ### HarperDB 4.3.3 + 4/01/2024 -* Improve MQTT logging by properly logging auth failures, logging disconnections +- Improve MQTT logging by properly logging auth failures, logging disconnections diff --git a/docs/technical-details/release-notes/4.tucker/4.3.30.md b/docs/technical-details/release-notes/4.tucker/4.3.30.md index f9d2a97a..e374c19d 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.30.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.30.md @@ -1,4 +1,5 @@ ### HarperDB 4.3.30 + 10/9/2024 -* Properly assign transaction timestamp to writes from cache resolutions (ensuring that latencies can be calculated on replicating nodes) +- Properly assign transaction timestamp to writes from cache resolutions (ensuring that latencies can be calculated on replicating nodes) diff --git a/docs/technical-details/release-notes/4.tucker/4.3.31.md b/docs/technical-details/release-notes/4.tucker/4.3.31.md index 755b39b0..f1c66323 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.31.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.31.md @@ -1,6 +1,7 @@ ### HarperDB 4.3.31 + 10/10/2024 -* Reset the restart limit for manual restarts to ensure that NATS process will continue to restart after more than 10 manual restarts -* Only apply caching directives (from headers) to tables/resources that are configured to be caching, sourced from another resource -* Catch/tolerate errors on serializing objects for logging +- Reset the restart limit for manual restarts to ensure that NATS process will continue to restart after more than 10 manual restarts +- Only apply caching directives (from headers) to tables/resources that are configured to be caching, sourced from another resource +- Catch/tolerate errors on serializing objects for logging diff --git a/docs/technical-details/release-notes/4.tucker/4.3.32.md b/docs/technical-details/release-notes/4.tucker/4.3.32.md index 0ab3d32f..558d906a 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.32.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.32.md @@ -1,6 +1,7 @@ ### HarperDB 4.3.32 + 10/16/2024 -* Fix a memory leak when cluster_network closes a hub connection -* Improved MQTT error handling, with less verbose logging of more common errors, and treat a missing subscription as an invalid/missing topic -* Record analytics and server-timing header even when cache resolution fails +- Fix a memory leak when cluster_network closes a hub connection +- Improved MQTT error handling, with less verbose logging of more common errors, and treat a missing subscription as an invalid/missing topic +- Record analytics and server-timing header even when cache resolution fails diff --git a/docs/technical-details/release-notes/4.tucker/4.3.33.md b/docs/technical-details/release-notes/4.tucker/4.3.33.md index 078ee703..1d934f0e 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.33.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.33.md @@ -1,4 +1,5 @@ ### HarperDB 4.3.33 + 10/24/2024 -* Change the default maximum length for a fastify route parameter from 100 to 1000 characters. +- Change the default maximum length for a fastify route parameter from 100 to 1000 characters. diff --git a/docs/technical-details/release-notes/4.tucker/4.3.34.md b/docs/technical-details/release-notes/4.tucker/4.3.34.md index 576f4e64..c3ca47ec 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.34.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.34.md @@ -1,4 +1,5 @@ ### HarperDB 4.3.34 + 10/24/2024 -* lmdb-js upgrade +- lmdb-js upgrade diff --git a/docs/technical-details/release-notes/4.tucker/4.3.35.md b/docs/technical-details/release-notes/4.tucker/4.3.35.md index b304c116..1f8c2073 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.35.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.35.md @@ -1,5 +1,6 @@ ### HarperDB 4.3.35 + 11/12/2024 -* Upgrades for supporting Node.js V23 -* Fix for handling a change in the schema for nested data structures +- Upgrades for supporting Node.js V23 +- Fix for handling a change in the schema for nested data structures diff --git a/docs/technical-details/release-notes/4.tucker/4.3.36.md b/docs/technical-details/release-notes/4.tucker/4.3.36.md index c2bdf9cf..40e8b726 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.36.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.36.md @@ -1,4 +1,5 @@ ### HarperDB 4.3.36 + 11/14/2024 -* lmdb-js upgrade for better free-space management +- lmdb-js upgrade for better free-space management diff --git a/docs/technical-details/release-notes/4.tucker/4.3.37.md b/docs/technical-details/release-notes/4.tucker/4.3.37.md index d87d4a3c..8f067b9c 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.37.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.37.md @@ -1,4 +1,5 @@ ### HarperDB 4.3.37 + 12/6/2024 -* lmdb-js upgrade for preventing crashes with shared user buffers +- lmdb-js upgrade for preventing crashes with shared user buffers diff --git a/docs/technical-details/release-notes/4.tucker/4.3.38.md b/docs/technical-details/release-notes/4.tucker/4.3.38.md index 1e9f82ef..1dde2665 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.38.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.38.md @@ -1,4 +1,5 @@ ### HarperDB 4.3.38 + 1/10/2025 -* Fixes for audit log cleanup +- Fixes for audit log cleanup diff --git a/docs/technical-details/release-notes/4.tucker/4.3.4.md b/docs/technical-details/release-notes/4.tucker/4.3.4.md index 4acaaec8..ee9909ad 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.4.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.4.md @@ -1,5 +1,6 @@ ### HarperDB 4.3.4 + 4/9/2024 -* Fixed a buffer overrun issue with decompressing compressed data -* Better keep-alive of transactions with long running queries \ No newline at end of file +- Fixed a buffer overrun issue with decompressing compressed data +- Better keep-alive of transactions with long running queries diff --git a/docs/technical-details/release-notes/4.tucker/4.3.5.md b/docs/technical-details/release-notes/4.tucker/4.3.5.md index 9bc127ad..04e51594 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.5.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.5.md @@ -1,4 +1,5 @@ ### HarperDB 4.3.5 + 4/10/2024 -* Fixed a buffer overrun issue with decompressing compressed data \ No newline at end of file +- Fixed a buffer overrun issue with decompressing compressed data diff --git a/docs/technical-details/release-notes/4.tucker/4.3.6.md b/docs/technical-details/release-notes/4.tucker/4.3.6.md index 1de9c508..704640e5 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.6.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.6.md @@ -1,8 +1,9 @@ ### HarperDB 4.3.6 + 4/12/2024 -* Fixed parsing of dates from epoch millisecond times in queries -* Fixed CRDT incrementation of different data types -* Adjustments to text/plain content type q-value handling -* Fixed parsing of passwords with a colon -* Added MQTT events for connections, authorization, and disconnections \ No newline at end of file +- Fixed parsing of dates from epoch millisecond times in queries +- Fixed CRDT incrementation of different data types +- Adjustments to text/plain content type q-value handling +- Fixed parsing of passwords with a colon +- Added MQTT events for connections, authorization, and disconnections diff --git a/docs/technical-details/release-notes/4.tucker/4.3.7.md b/docs/technical-details/release-notes/4.tucker/4.3.7.md index 34e339d2..878ef822 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.7.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.7.md @@ -1,8 +1,9 @@ ### HarperDB 4.3.7 + 4/16/2024 -* Fixed transaction handling to stay on open on long compaction operations -* Fixed handling of sorting on non-indexed attributes -* Storage stability improvements -* Fixed authentication/authorization of WebSockets connection and use of cookies -* Fixes for clone node operations \ No newline at end of file +- Fixed transaction handling to stay on open on long compaction operations +- Fixed handling of sorting on non-indexed attributes +- Storage stability improvements +- Fixed authentication/authorization of WebSockets connection and use of cookies +- Fixes for clone node operations diff --git a/docs/technical-details/release-notes/4.tucker/4.3.8.md b/docs/technical-details/release-notes/4.tucker/4.3.8.md index 9f6d2df0..2f858e6f 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.8.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.8.md @@ -1,8 +1,9 @@ ### HarperDB 4.3.8 + 4/26/2024 -* Added support for the MQTT keep-alive feature (disconnecting if no control messages are received within keep-alive window) -* Improved handling of write queue timeouts, with configurability -* Fixed a memory leak that can occur with NATS reconnections after heartbeat misses -* Fixed a bug in clone node with a null port -* Add error events to MQTT events system \ No newline at end of file +- Added support for the MQTT keep-alive feature (disconnecting if no control messages are received within keep-alive window) +- Improved handling of write queue timeouts, with configurability +- Fixed a memory leak that can occur with NATS reconnections after heartbeat misses +- Fixed a bug in clone node with a null port +- Add error events to MQTT events system diff --git a/docs/technical-details/release-notes/4.tucker/4.3.9.md b/docs/technical-details/release-notes/4.tucker/4.3.9.md index d35a2fcf..b693c746 100644 --- a/docs/technical-details/release-notes/4.tucker/4.3.9.md +++ b/docs/technical-details/release-notes/4.tucker/4.3.9.md @@ -1,4 +1,5 @@ ### HarperDB 4.3.9 + 4/30/2024 -* lmdb-js upgrade \ No newline at end of file +- lmdb-js upgrade diff --git a/docs/technical-details/release-notes/4.tucker/4.4.1.md b/docs/technical-details/release-notes/4.tucker/4.4.1.md index 49c244da..0963bc93 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.1.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.1.md @@ -1,7 +1,8 @@ ### HarperDB 4.4.1 + 10/17/2024 -* Fix issue where non-RSA keys were not being parsed correctly on startup. -* Fix a memory leak when cluster_network closes a hub connection -* Improved MQTT error handling, with less verbose logging of more common errors, and treat a missing subscription as an invalid/missing topic -* Record analytics and server-timing header even when cache resolution fails \ No newline at end of file +- Fix issue where non-RSA keys were not being parsed correctly on startup. +- Fix a memory leak when cluster_network closes a hub connection +- Improved MQTT error handling, with less verbose logging of more common errors, and treat a missing subscription as an invalid/missing topic +- Record analytics and server-timing header even when cache resolution fails diff --git a/docs/technical-details/release-notes/4.tucker/4.4.10.md b/docs/technical-details/release-notes/4.tucker/4.4.10.md index 768a19b6..9767c6e7 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.10.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.10.md @@ -1,4 +1,5 @@ ### HarperDB 4.4.10 + 12/17/2024 -* Fix for deploying packages and detecting node_modules directory \ No newline at end of file +- Fix for deploying packages and detecting node_modules directory diff --git a/docs/technical-details/release-notes/4.tucker/4.4.11.md b/docs/technical-details/release-notes/4.tucker/4.4.11.md index 91750012..8eb248f9 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.11.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.11.md @@ -1,5 +1,6 @@ ### HarperDB 4.4.11 + 12/18/2024 -* Fix for initial certification creation on upgrade -* Docker build fix \ No newline at end of file +- Fix for initial certification creation on upgrade +- Docker build fix diff --git a/docs/technical-details/release-notes/4.tucker/4.4.12.md b/docs/technical-details/release-notes/4.tucker/4.4.12.md index a636d899..1b1b4e31 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.12.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.12.md @@ -1,5 +1,6 @@ ### HarperDB 4.4.12 + 12/19/2024 -* Move components installed by reference into hdb/components for consistency and compatibility with next.js -* Use npm install --force to ensure modules are installed \ No newline at end of file +- Move components installed by reference into hdb/components for consistency and compatibility with next.js +- Use npm install --force to ensure modules are installed diff --git a/docs/technical-details/release-notes/4.tucker/4.4.13.md b/docs/technical-details/release-notes/4.tucker/4.4.13.md index a3fcb53a..2e4427ea 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.13.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.13.md @@ -1,10 +1,11 @@ ### HarperDB 4.4.13 + 1/2/2025 -* Fix for not using requestCert if the port doesn't need replication -* Fix for applying timeouts HTTP server for ancient node versions -* Updates for different replication configuration settings, including sharding and replication using stored credentials -* Mitigation crashing due GC'ed shared array buffers -* Fix for error handling with CLI failures -* Updated dependencies -* Fix for allow securePort to be set on authentication \ No newline at end of file +- Fix for not using requestCert if the port doesn't need replication +- Fix for applying timeouts HTTP server for ancient node versions +- Updates for different replication configuration settings, including sharding and replication using stored credentials +- Mitigation crashing due GC'ed shared array buffers +- Fix for error handling with CLI failures +- Updated dependencies +- Fix for allow securePort to be set on authentication diff --git a/docs/technical-details/release-notes/4.tucker/4.4.14.md b/docs/technical-details/release-notes/4.tucker/4.4.14.md index db7acddb..0c649e13 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.14.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.14.md @@ -1,7 +1,8 @@ ### HarperDB 4.4.14 + 1/3/2025 -* Fix for starting HTTP server if headersTimeout is omitted in the configuration -* Fix for avoiding ping timeouts for large/long-duration WS messages between nodes -* Don't report errors for component that only uses a directory -* Add flag for disabling WebSocket on REST component \ No newline at end of file +- Fix for starting HTTP server if headersTimeout is omitted in the configuration +- Fix for avoiding ping timeouts for large/long-duration WS messages between nodes +- Don't report errors for component that only uses a directory +- Add flag for disabling WebSocket on REST component diff --git a/docs/technical-details/release-notes/4.tucker/4.4.15.md b/docs/technical-details/release-notes/4.tucker/4.4.15.md index 898ca322..8f9c0757 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.15.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.15.md @@ -1,6 +1,7 @@ ### HarperDB 4.4.15 + 1/8/2025 -* Fix for manage the state of replication sequences for node -* Fix for better concurrency with ongoing replication -* Fix for accessing audit log entries \ No newline at end of file +- Fix for manage the state of replication sequences for node +- Fix for better concurrency with ongoing replication +- Fix for accessing audit log entries diff --git a/docs/technical-details/release-notes/4.tucker/4.4.16.md b/docs/technical-details/release-notes/4.tucker/4.4.16.md index fc405334..3becc679 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.16.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.16.md @@ -1,10 +1,11 @@ ### HarperDB 4.4.16 + 1/22/2025 -* Fix for cleaning up old audit entries and associated deletion entries -* Allow CLI operations to be run when cloning is enabled -* Report table size in describe operations -* Fix for cleaning up symlinks when dropping components -* Fix for enumerating components when symlinks are used -* Add an option for using a specific installation command with deploys -* Add an API for registering an HTTP upgrade listener with `server.upgrade` \ No newline at end of file +- Fix for cleaning up old audit entries and associated deletion entries +- Allow CLI operations to be run when cloning is enabled +- Report table size in describe operations +- Fix for cleaning up symlinks when dropping components +- Fix for enumerating components when symlinks are used +- Add an option for using a specific installation command with deploys +- Add an API for registering an HTTP upgrade listener with `server.upgrade` diff --git a/docs/technical-details/release-notes/4.tucker/4.4.17.md b/docs/technical-details/release-notes/4.tucker/4.4.17.md index 0c60240c..dce496cf 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.17.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.17.md @@ -1,7 +1,8 @@ ### HarperDB 4.4.17 + 1/29/2025 -* Provide statistics on the size of the audit log store -* Fix handling of symlinks to HarperDB package that to avoid NPM's errors in restricted containers -* Add option for rolling/consecutive restarts for deployments -* Fix for enabling root CAs for replication authorization \ No newline at end of file +- Provide statistics on the size of the audit log store +- Fix handling of symlinks to HarperDB package that to avoid NPM's errors in restricted containers +- Add option for rolling/consecutive restarts for deployments +- Fix for enabling root CAs for replication authorization diff --git a/docs/technical-details/release-notes/4.tucker/4.4.18.md b/docs/technical-details/release-notes/4.tucker/4.4.18.md index f8b276fc..c2836edd 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.18.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.18.md @@ -1,6 +1,7 @@ ### HarperDB 4.4.18 + 1/29/2025 -* Add option for disabling full table copy in replication -* Add option for startTime in route configuration -* Add/fix option to deploy with package from CLI \ No newline at end of file +- Add option for disabling full table copy in replication +- Add option for startTime in route configuration +- Add/fix option to deploy with package from CLI diff --git a/docs/technical-details/release-notes/4.tucker/4.4.19.md b/docs/technical-details/release-notes/4.tucker/4.4.19.md index 69cf5184..ae882c74 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.19.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.19.md @@ -1,7 +1,8 @@ ### HarperDB 4.4.19 + 2/4/2025 -* LMDB upgrade for free-list verification on commit -* Add check to avoid compacting database multiple times with compactOnStart -* Fix handling of denied/absent subscription -* Add support for including symlinked directories in packaging a deployed component \ No newline at end of file +- LMDB upgrade for free-list verification on commit +- Add check to avoid compacting database multiple times with compactOnStart +- Fix handling of denied/absent subscription +- Add support for including symlinked directories in packaging a deployed component diff --git a/docs/technical-details/release-notes/4.tucker/4.4.2.md b/docs/technical-details/release-notes/4.tucker/4.4.2.md index e933a6e3..9d013c46 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.2.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.2.md @@ -1,4 +1,5 @@ ### HarperDB 4.4.2 + 10/18/2024 -* Republish of 4.4.1 with Git merge correction. \ No newline at end of file +- Republish of 4.4.1 with Git merge correction. diff --git a/docs/technical-details/release-notes/4.tucker/4.4.20.md b/docs/technical-details/release-notes/4.tucker/4.4.20.md index 015534f2..62cb86b2 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.20.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.20.md @@ -1,4 +1,5 @@ ### HarperDB 4.4.20 + 2/11/2025 -* LMDB upgrade for improved handling of page boundaries with free-space lists +- LMDB upgrade for improved handling of page boundaries with free-space lists diff --git a/docs/technical-details/release-notes/4.tucker/4.4.21.md b/docs/technical-details/release-notes/4.tucker/4.4.21.md index 33e7d467..74012d81 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.21.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.21.md @@ -1,6 +1,7 @@ ### HarperDB 4.4.21 + 2/25/2025 -* Fix for saving audit log entries for large keys (> 1KB) -* Security fix for handling missing passwords -* Skip bin links for NPM installation to avoid access issues \ No newline at end of file +- Fix for saving audit log entries for large keys (> 1KB) +- Security fix for handling missing passwords +- Skip bin links for NPM installation to avoid access issues diff --git a/docs/technical-details/release-notes/4.tucker/4.4.22.md b/docs/technical-details/release-notes/4.tucker/4.4.22.md index 76fb8f73..a12b4747 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.22.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.22.md @@ -1,4 +1,5 @@ ### HarperDB 4.4.22 + 3/5/2025 -* Add new http configuration option `corsAccessControlAllowHeaders` \ No newline at end of file +- Add new http configuration option `corsAccessControlAllowHeaders` diff --git a/docs/technical-details/release-notes/4.tucker/4.4.23.md b/docs/technical-details/release-notes/4.tucker/4.4.23.md index 288a1003..2fd31927 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.23.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.23.md @@ -1,5 +1,6 @@ ### HarperDB 4.4.23 + 3/7/2025 -* Fix for subscriptions to children of segmented id -* Fix for better error reporting on NPM failures \ No newline at end of file +- Fix for subscriptions to children of segmented id +- Fix for better error reporting on NPM failures diff --git a/docs/technical-details/release-notes/4.tucker/4.4.24.md b/docs/technical-details/release-notes/4.tucker/4.4.24.md index 7cb4f477..fddf569d 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.24.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.24.md @@ -1,5 +1,6 @@ ### HarperDB 4.4.24 + 3/10/2025 -* Use process.exit(0) to restart when enabled by env var -* Reset the cwd on thread restart \ No newline at end of file +- Use process.exit(0) to restart when enabled by env var +- Reset the cwd on thread restart diff --git a/docs/technical-details/release-notes/4.tucker/4.4.3.md b/docs/technical-details/release-notes/4.tucker/4.4.3.md index 88d23970..91b221cc 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.3.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.3.md @@ -1,8 +1,9 @@ ### HarperDB 4.4.3 + 10/25/2024 -* Fix for notification of records through classes that override get for multi-tier caching -* Fix for CLI operations -* Support for longer route parameters in Fastify routes -* Fix for accessing `harperdb` package/module from user threads -* Improvements to clone node for cloning without credentials \ No newline at end of file +- Fix for notification of records through classes that override get for multi-tier caching +- Fix for CLI operations +- Support for longer route parameters in Fastify routes +- Fix for accessing `harperdb` package/module from user threads +- Improvements to clone node for cloning without credentials diff --git a/docs/technical-details/release-notes/4.tucker/4.4.4.md b/docs/technical-details/release-notes/4.tucker/4.4.4.md index a86d39f7..05cd5af8 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.4.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.4.md @@ -1,6 +1,7 @@ ### HarperDB 4.4.4 + 11/4/2024 -* Re-introduce declarative roles and permissions -* Fix for OpenAPI endpoint -* Fix for exports of `harperdb` package/module \ No newline at end of file +- Re-introduce declarative roles and permissions +- Fix for OpenAPI endpoint +- Fix for exports of `harperdb` package/module diff --git a/docs/technical-details/release-notes/4.tucker/4.4.5.md b/docs/technical-details/release-notes/4.tucker/4.4.5.md index 00a042c4..7652820c 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.5.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.5.md @@ -1,10 +1,11 @@ ### HarperDB 4.4.5 + 11/15/2024 -* Fix for DOS vulnerability in large headers with cache-control and replication headers -* Fix for handling a change in the schema type for sub-fields in a nested object -* Add support for content type handlers to return iterators -* Fix for session management with custom authentication handler -* Updates for Node.js V23 compatibility -* Fix for sorting on nested properties -* Fix for querying on not_equal to a null with object values \ No newline at end of file +- Fix for DOS vulnerability in large headers with cache-control and replication headers +- Fix for handling a change in the schema type for sub-fields in a nested object +- Add support for content type handlers to return iterators +- Fix for session management with custom authentication handler +- Updates for Node.js V23 compatibility +- Fix for sorting on nested properties +- Fix for querying on not_equal to a null with object values diff --git a/docs/technical-details/release-notes/4.tucker/4.4.6.md b/docs/technical-details/release-notes/4.tucker/4.4.6.md index 2030c18d..c7131f3b 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.6.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.6.md @@ -1,7 +1,8 @@ ### HarperDB 4.4.6 + 11/25/2024 -* Fix queries with only sorting applied -* Fix for handling invalidation events propagating through sources -* Expanded CLI support for deploying packages -* Support for deploying large packages \ No newline at end of file +- Fix queries with only sorting applied +- Fix for handling invalidation events propagating through sources +- Expanded CLI support for deploying packages +- Support for deploying large packages diff --git a/docs/technical-details/release-notes/4.tucker/4.4.7.md b/docs/technical-details/release-notes/4.tucker/4.4.7.md index 776acd0c..39579988 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.7.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.7.md @@ -1,5 +1,6 @@ ### HarperDB 4.4.7 + 11/27/2024 -* Allow for package to deploy own modules -* Fix for preventing double sourcing of resources \ No newline at end of file +- Allow for package to deploy own modules +- Fix for preventing double sourcing of resources diff --git a/docs/technical-details/release-notes/4.tucker/4.4.8.md b/docs/technical-details/release-notes/4.tucker/4.4.8.md index ef49cfbf..67d6a4e9 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.8.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.8.md @@ -1,4 +1,5 @@ ### HarperDB 4.4.8 + 12/2/2024 -* Add multiple node versions of published docker containers \ No newline at end of file +- Add multiple node versions of published docker containers diff --git a/docs/technical-details/release-notes/4.tucker/4.4.9.md b/docs/technical-details/release-notes/4.tucker/4.4.9.md index f941229a..fffb30c7 100644 --- a/docs/technical-details/release-notes/4.tucker/4.4.9.md +++ b/docs/technical-details/release-notes/4.tucker/4.4.9.md @@ -1,8 +1,9 @@ ### HarperDB 4.4.9 + 12/12/2024 -* Change enableRootCAs to default to true -* Fixes for install and clone commands -* Add rejectUnauthorized to the CLI options -* Fixes for cloning -* Install modules in own component when deploying package by payload \ No newline at end of file +- Change enableRootCAs to default to true +- Fixes for install and clone commands +- Add rejectUnauthorized to the CLI options +- Fixes for cloning +- Install modules in own component when deploying package by payload diff --git a/docs/technical-details/release-notes/4.tucker/4.5.0.md b/docs/technical-details/release-notes/4.tucker/4.5.0.md index 8155d22b..595b7e1e 100644 --- a/docs/technical-details/release-notes/4.tucker/4.5.0.md +++ b/docs/technical-details/release-notes/4.tucker/4.5.0.md @@ -5,69 +5,90 @@ 3/13/2025 ### Blob Storage -4.5 introduces a new [Blob storage system](../../reference/blob.md), that is designed to efficiently handle large binary objects, with built-in support for streaming large content/media in and out of storage. This provides significantly better performance and functionality for large unstructured data, such as HTML, images, video, and other large files. Components can leverage this functionality through the JavaScript `Blob` interface, and the new `createBlob` function. Blobs are fully replicated and integrated. Harper can also coerce strings to `Blob`s (when dictated by the field type), making it feasible to use blobs for large string data, including with MQTT messaging. + +4.5 introduces a new [Blob storage system](../../reference/blob.md), that is designed to efficiently handle large binary objects, with built-in support for streaming large content/media in and out of storage. This provides significantly better performance and functionality for large unstructured data, such as HTML, images, video, and other large files. Components can leverage this functionality through the JavaScript `Blob` interface, and the new `createBlob` function. Blobs are fully replicated and integrated. Harper can also coerce strings to `Blob`s (when dictated by the field type), making it feasible to use blobs for large string data, including with MQTT messaging. ### Password Hashing Upgrade + 4.5 adds two new password hashing algorithms for better security (to replace md5): `sha256`: This is a solid general purpose of password hashing, with good security properties and excellent performance. This is the default algorithm in 4.5. -`argon2id`: This provides the highest level of security, and is the recommended algorithm that do not require frequent password verifications. However, it is more CPU intensive, and may not be suitable for environments with a high frequency of password verifications. +`argon2id`: This provides the highest level of security, and is the recommended algorithm that do not require frequent password verifications. However, it is more CPU intensive, and may not be suitable for environments with a high frequency of password verifications. ### Resource and Storage Analytics + 4.5 includes numerous new analytics for resources and storage, including page faults, context switches, free space, disk usage, and other metrics. #### Default Replication Port + The default port for replication has been changed from 9925 to 9933. ### Property Forwarding -Accessing record properties from resource instances should be accessible through standard property access syntax, regardless of whether the property was declared in a schema. Previously only properties declared in a schema were accessible through standard property access syntax. This change allows for more consistent and intuitive access to record properties, regardless of how they were defined. It is still recommended to declare properties in a schema for better performance and documentation. + +Accessing record properties from resource instances should be accessible through standard property access syntax, regardless of whether the property was declared in a schema. Previously only properties declared in a schema were accessible through standard property access syntax. This change allows for more consistent and intuitive access to record properties, regardless of how they were defined. It is still recommended to declare properties in a schema for better performance and documentation. ### Storage Reclamation + Harper now includes functionality for automatically trying to clean up and evict non-essential data when storage is running low. When free space drops below 40% (configurable), Harper will start to: -* Evict older entries from caching tables -* Evict older audit log entries -* Remove older rotated logs files -These efforts will become progressively more aggressive as free space decreases. + +- Evict older entries from caching tables +- Evict older audit log entries +- Remove older rotated logs files + These efforts will become progressively more aggressive as free space decreases. ### Expanded Sharding Functionality + When sharding is being used, Harper can now honor write requests with residency information that will not be written to the local node's table. Harper also now allows nodes to be declaratively configured as part of a shard. ### Certificate Revocation + Certificates can now be revoked by configuring nodes with a list of revoked certificate serial numbers. ### Built-in `loadEnv` Component -There is a [new `loadEnv` component loader](../../../developers/components/built-in.md) that can be used to load environmental variables from a .env in a component. + +There is a [new `loadEnv` component loader](../../../developers/components/built-in.md) that can be used to load environmental variables from a .env in a component. ### Cluster Status Information + The [`cluster_status` operation](../../../developers/operations-api/clustering.md) now includes new statistics for replication, including the timestamps of last received transactions, sent transactions, and committed transactions. ### Improved URL path parsing -Resources can be defined with nested paths and directly accessed by the exact path without requiring a trailing slash. The `id.property` syntax for accessing properties in URLs will only be applied to properties that are declared in a schema. This allows for URLs to generally include dots in paths without being interpreted as property access. A new [`directURLMapping` option/flag](../../../deployments/configuration.md) on resources that allows for more direct URL path handling as well. + +Resources can be defined with nested paths and directly accessed by the exact path without requiring a trailing slash. The `id.property` syntax for accessing properties in URLs will only be applied to properties that are declared in a schema. This allows for URLs to generally include dots in paths without being interpreted as property access. A new [`directURLMapping` option/flag](../../../deployments/configuration.md) on resources that allows for more direct URL path handling as well. ### `server.authenticateUser` API + In addition to the `server.getUser` API that allows for retrieval of users by username, the `server.authenticateUser` API is now available which will _always_ verify the user by the provided password. #### Improved Message Delivery + Performance of delivery of messages has been improved. ### HTTP/2 + HarperDB now supports HTTP/2 for all API endpoints. This can be enabled with the `http2` option in the configuration file. ### `harperdb` symlink + Using `import from 'harperdb'` will more consistently work when directly running a component locally. ### Transaction Reuse + By default, transactions can now be reused after calling `transaction.commit()`. ### GraphQL configuration + The GraphQL query endpoint can be configured to listen on different ports. GraphQL query endpoing is now also disabled by default, to avoid any conflicts. ### Glob support for components + Glob file handling for specifying files used by components has been improved for better consistency. ### Table.getRecordCount + `Table.getRecordCount()` is now available to get the number of records in a table. ### Removal of record counts from REST API + Previously the root path for a resource in the REST API would return a record count. However, this is a significant performance hazard and was never documented to exist, so this has been removed to ensure better performance and reliability. -Note that downgrading from 4.5 to 4.4 is *not* supported. \ No newline at end of file +Note that downgrading from 4.5 to 4.4 is _not_ supported. diff --git a/docs/technical-details/release-notes/4.tucker/4.5.1.md b/docs/technical-details/release-notes/4.tucker/4.5.1.md index 95268c40..96743caa 100644 --- a/docs/technical-details/release-notes/4.tucker/4.5.1.md +++ b/docs/technical-details/release-notes/4.tucker/4.5.1.md @@ -1,10 +1,11 @@ ### HarperDB 4.5.1 + 3/18/2025 -* Fix/implementation for sharding data that is written for cache resolution -* Add support for replication.shard in configuration for defining local node's shard id -* Fix for source map handling in stack traces -* Improved error reporting for syntax errors in component code -* Improved logging on deployment and NPM installation -* Added shard information to cluster_status -* Fix for audit entry eviction when a table is deleted \ No newline at end of file +- Fix/implementation for sharding data that is written for cache resolution +- Add support for replication.shard in configuration for defining local node's shard id +- Fix for source map handling in stack traces +- Improved error reporting for syntax errors in component code +- Improved logging on deployment and NPM installation +- Added shard information to cluster_status +- Fix for audit entry eviction when a table is deleted diff --git a/docs/technical-details/release-notes/4.tucker/4.5.10.md b/docs/technical-details/release-notes/4.tucker/4.5.10.md index 5a6a902e..5e360f6e 100644 --- a/docs/technical-details/release-notes/4.tucker/4.5.10.md +++ b/docs/technical-details/release-notes/4.tucker/4.5.10.md @@ -1,5 +1,6 @@ ### HarperDB 4.5.10 + 5/20/2025 -* Expose the `resources` map for being able to set and access custom resources -* Fix for cleaning up blob files that are used when a database is deleted \ No newline at end of file +- Expose the `resources` map for being able to set and access custom resources +- Fix for cleaning up blob files that are used when a database is deleted diff --git a/docs/technical-details/release-notes/4.tucker/4.5.2.md b/docs/technical-details/release-notes/4.tucker/4.5.2.md index e6e5fa88..624416e8 100644 --- a/docs/technical-details/release-notes/4.tucker/4.5.2.md +++ b/docs/technical-details/release-notes/4.tucker/4.5.2.md @@ -1,7 +1,8 @@ ### HarperDB 4.5.2 + 3/25/2025 -* For defined schemas, don't allow updates from remote nodes that could cause conflicts and repeated schema change requests -* New harper-chrome docker container for accessing Chrome binaries for use with tools like Puppeteer -* Improved rolling restart handling of errors with reaching individual nodes -* Defined cleaner operation object to avoid accident leaking of credentials with logging \ No newline at end of file +- For defined schemas, don't allow updates from remote nodes that could cause conflicts and repeated schema change requests +- New harper-chrome docker container for accessing Chrome binaries for use with tools like Puppeteer +- Improved rolling restart handling of errors with reaching individual nodes +- Defined cleaner operation object to avoid accident leaking of credentials with logging diff --git a/docs/technical-details/release-notes/4.tucker/4.5.3.md b/docs/technical-details/release-notes/4.tucker/4.5.3.md index d87a4c72..04f5d25e 100644 --- a/docs/technical-details/release-notes/4.tucker/4.5.3.md +++ b/docs/technical-details/release-notes/4.tucker/4.5.3.md @@ -1,5 +1,6 @@ ### HarperDB 4.5.3 + 4/3/2025 -* Fix for immediately reloading updated certificates and private key files to ensure that certificates properly match the private key -* Fix for analytics of storage size when tables are deleted \ No newline at end of file +- Fix for immediately reloading updated certificates and private key files to ensure that certificates properly match the private key +- Fix for analytics of storage size when tables are deleted diff --git a/docs/technical-details/release-notes/4.tucker/4.5.4.md b/docs/technical-details/release-notes/4.tucker/4.5.4.md index 600312c4..0029dd1e 100644 --- a/docs/technical-details/release-notes/4.tucker/4.5.4.md +++ b/docs/technical-details/release-notes/4.tucker/4.5.4.md @@ -1,6 +1,7 @@ ### HarperDB 4.5.4 + 4/11/2025 -* Fix for replication of (non-retained) published messages -* Make cookie domain be configurable to allow for cookies shared across sub-hostnames -* Fix for on-demand loading of shared blobs \ No newline at end of file +- Fix for replication of (non-retained) published messages +- Make cookie domain be configurable to allow for cookies shared across sub-hostnames +- Fix for on-demand loading of shared blobs diff --git a/docs/technical-details/release-notes/4.tucker/4.5.5.md b/docs/technical-details/release-notes/4.tucker/4.5.5.md index c80e3867..3b93b046 100644 --- a/docs/technical-details/release-notes/4.tucker/4.5.5.md +++ b/docs/technical-details/release-notes/4.tucker/4.5.5.md @@ -1,5 +1,6 @@ ### HarperDB 4.5.5 + 4/15/2025 -* Updates for better messaging with symlinks in Windows -* Fix for saving replicated blobs \ No newline at end of file +- Updates for better messaging with symlinks in Windows +- Fix for saving replicated blobs diff --git a/docs/technical-details/release-notes/4.tucker/4.5.6.md b/docs/technical-details/release-notes/4.tucker/4.5.6.md index dc02ec29..0f26c1de 100644 --- a/docs/technical-details/release-notes/4.tucker/4.5.6.md +++ b/docs/technical-details/release-notes/4.tucker/4.5.6.md @@ -1,6 +1,7 @@ ### HarperDB 4.5.6 + 4/17/2025 -* Fix for changing the type of the primary key attribute -* Added a new `includeExpensiveRecordCountEstimates` property to the REST component for returning record count estimates -* Fix for dropping attributes \ No newline at end of file +- Fix for changing the type of the primary key attribute +- Added a new `includeExpensiveRecordCountEstimates` property to the REST component for returning record count estimates +- Fix for dropping attributes diff --git a/docs/technical-details/release-notes/4.tucker/4.5.7.md b/docs/technical-details/release-notes/4.tucker/4.5.7.md index cdd47441..f4481712 100644 --- a/docs/technical-details/release-notes/4.tucker/4.5.7.md +++ b/docs/technical-details/release-notes/4.tucker/4.5.7.md @@ -1,5 +1,6 @@ ### HarperDB 4.5.7 + 4/23/2025 -* Fix for handling buffers from replicated sharded blob records to prevent overwriting while using -* Updated included studio version for fix for logging in \ No newline at end of file +- Fix for handling buffers from replicated sharded blob records to prevent overwriting while using +- Updated included studio version for fix for logging in diff --git a/docs/technical-details/release-notes/4.tucker/4.5.8.md b/docs/technical-details/release-notes/4.tucker/4.5.8.md index fa105814..80482d46 100644 --- a/docs/technical-details/release-notes/4.tucker/4.5.8.md +++ b/docs/technical-details/release-notes/4.tucker/4.5.8.md @@ -1,7 +1,8 @@ ### HarperDB 4.5.8 + 4/30/2025 -* Fix MQTT subscription topics with trailing slashes to ensure they are not treated as a wildcard -* Fix the arguments that are used for the default connect/subscribe calls so they pass the second argument from connect like `connect(incomingMessages, query) -> subscribe(query)` -* Add support for replication connections using any configured certificate authorities to verify the server certificates -* Added more descriptive error messages on errors in user residency functions \ No newline at end of file +- Fix MQTT subscription topics with trailing slashes to ensure they are not treated as a wildcard +- Fix the arguments that are used for the default connect/subscribe calls so they pass the second argument from connect like `connect(incomingMessages, query) -> subscribe(query)` +- Add support for replication connections using any configured certificate authorities to verify the server certificates +- Added more descriptive error messages on errors in user residency functions diff --git a/docs/technical-details/release-notes/4.tucker/4.5.9.md b/docs/technical-details/release-notes/4.tucker/4.5.9.md index be025f19..c7c972ed 100644 --- a/docs/technical-details/release-notes/4.tucker/4.5.9.md +++ b/docs/technical-details/release-notes/4.tucker/4.5.9.md @@ -1,4 +1,5 @@ ### HarperDB 4.5.9 + 5/14/2025 -* Remove --no-bin-links directive for NPM that was causing installs of dependencies to fail \ No newline at end of file +- Remove --no-bin-links directive for NPM that was causing installs of dependencies to fail diff --git a/docs/technical-details/release-notes/4.tucker/4.6.0.md b/docs/technical-details/release-notes/4.tucker/4.6.0.md index 811ddad8..426c8aa4 100644 --- a/docs/technical-details/release-notes/4.tucker/4.6.0.md +++ b/docs/technical-details/release-notes/4.tucker/4.6.0.md @@ -5,12 +5,15 @@ 6/13/2025 ### Vector Indexing: Hierarchical Navigable Small World + Harper 4.6 now includes support for vector indexing, which allows for efficient and fast queries on large semantic data sets. Vector indexing is powered by the [Hierarchical Navigable Small World (HNSW) algorithm](https://arxiv.org/abs/1603.09320) and can be used to index any vector-valued property, and is particularly useful for vector text-embedding data. This provides powerful efficient vector-based searching for semantic and AI-based querying functionality. HNSW is a preferred algorithm for vector indexing and searching because it provides an excellent balance of recall and performance. ### New Extension API with support for dynamic reloading + 4.6 introduces a new extension API with significant ergonomic improvements for creating new extension components that are more robust and dynamic. The new API also provides a mechanism for dynamic reloading of some files and configuration without restarts. ### Logging Improvements + 4.6 includes significant expansions to logging configurability, allowing for specific logging configurations of individual components. This also leverages the new extension API to allow for dynamic reloading of logging configuration. With the more granular logging, logs can be directed to different files and/or different log levels. The logger includes support for HTTP logging, which configurability for logging standard HTTP methods and paths as well headers, ids, and timing information. It also supports distinct logging configuration for different components. The new logger is now based on the Node.js Console API, with improved the formatting of log messages for various types of objects. @@ -21,6 +24,7 @@ An important change is that logging to standard out/error will _not_ include the 4.6 includes a new [data loader](../../../developers/applications/data-loader.md) that can be used to load data into HarperDB as part of a component. The data loader can be used to load data from JSON file and can be deployed and distributed with a component to provide a reliable mechanism for ensuring specific records are loaded into Harper. ### Resource API Upgrades + 4.6 includes an upgraded form of the Resource API that can be selected with significant improvements in ease of use. ### only-if-cached behavior diff --git a/docs/technical-details/release-notes/4.tucker/README.md b/docs/technical-details/release-notes/4.tucker/README.md index e5619434..d703d9e2 100644 --- a/docs/technical-details/release-notes/4.tucker/README.md +++ b/docs/technical-details/release-notes/4.tucker/README.md @@ -3,45 +3,47 @@ HarperDB version 4 ([Tucker release](tucker.md)) represents major step forward in database technology. This release line has ground-breaking architectural advancements including: ## [4.6](4.6.0.md) -* Vector Indexing - 4.6 introduces a new Vector Indexing system based on Hierarchical Navigable Small World Graphs. -* New extension API - 4.6 introduces a new extension API for creating extensions components. -* Improved logging configurability - Logging can be dynamically updated and specifically configured for each component. -* Resource API - 4.6 has updated Resource APIs for ease of use. -* Data loader - 4.6 introduces a new data loader that allows for ensuring records exist as part of a component. + +- Vector Indexing - 4.6 introduces a new Vector Indexing system based on Hierarchical Navigable Small World Graphs. +- New extension API - 4.6 introduces a new extension API for creating extensions components. +- Improved logging configurability - Logging can be dynamically updated and specifically configured for each component. +- Resource API - 4.6 has updated Resource APIs for ease of use. +- Data loader - 4.6 introduces a new data loader that allows for ensuring records exist as part of a component. ## [4.5](4.5.0.md) -* Blob Storage - 4.5 introduces a new [Blob storage system](../../reference/blob.md). -* Password Hashing Upgrade - two new password hashing algorithms for better security (to replace md5). -* New resource and storage Analytics + +- Blob Storage - 4.5 introduces a new [Blob storage system](../../reference/blob.md). +- Password Hashing Upgrade - two new password hashing algorithms for better security (to replace md5). +- New resource and storage Analytics ## [4.4](4.4.0.md) -* Native replication (codename "Plexus") which is faster, more efficient, secure, and reliable than the previous replication system and provides provisional sharding capabilities with a foundation for the future -* Computed properties that allow applications to define properties that are computed from other properties, allowing for composite properties that are calculated from other data stored in records without requiring actual storage of the computed value -* Custom indexing including composite, full-text indexing, and vector indexing +- Native replication (codename "Plexus") which is faster, more efficient, secure, and reliable than the previous replication system and provides provisional sharding capabilities with a foundation for the future +- Computed properties that allow applications to define properties that are computed from other properties, allowing for composite properties that are calculated from other data stored in records without requiring actual storage of the computed value +- Custom indexing including composite, full-text indexing, and vector indexing ## [4.3](4.3.0.md) -* Relationships, joins, and broad new querying capabilities for complex and nested conditions, sorting, joining, and selecting with significant query optimizations -* More advanced transaction support for CRDTs and storage of large integers (with BigInt) -* Better management with new upgraded local studio and new CLI features +- Relationships, joins, and broad new querying capabilities for complex and nested conditions, sorting, joining, and selecting with significant query optimizations +- More advanced transaction support for CRDTs and storage of large integers (with BigInt) +- Better management with new upgraded local studio and new CLI features ## [4.2](4.2.0.md) -* New component architecture and Resource API for advanced, robust custom database application development -* Real-time capabilites through MQTT, WebSockets, and Server-Sent Events -* REST interface for intuitive, fast, and standards-compliant HTTP interaction -* Native caching capabilities for high-performance cache scenarios -* Clone node functionality +- New component architecture and Resource API for advanced, robust custom database application development +- Real-time capabilites through MQTT, WebSockets, and Server-Sent Events +- REST interface for intuitive, fast, and standards-compliant HTTP interaction +- Native caching capabilities for high-performance cache scenarios +- Clone node functionality ## [4.1](4.1.0.md) -* New streaming iterators mechanism that allows query results to be delivered to clients _while_ querying results are being processed, for incredibly fast time-to-first-byte and concurrent processing/delivery -* New thread-based concurrency model for more efficient resource usage +- New streaming iterators mechanism that allows query results to be delivered to clients _while_ querying results are being processed, for incredibly fast time-to-first-byte and concurrent processing/delivery +- New thread-based concurrency model for more efficient resource usage ## [4.0](4.0.0.md) -* New clustering technology that delivers robust, resilient and high-performance replication -* Major storage improvements with highly-efficient adaptive-structure modified MessagePack format, with on-demand deserialization capabilities +- New clustering technology that delivers robust, resilient and high-performance replication +- Major storage improvements with highly-efficient adaptive-structure modified MessagePack format, with on-demand deserialization capabilities Did you know our release names are dedicated to employee pups? For our fourth release, [meet Tucker!](tucker.md) diff --git a/docs/technical-details/release-notes/README.md b/docs/technical-details/release-notes/README.md index 05de0977..ad00ce70 100644 --- a/docs/technical-details/release-notes/README.md +++ b/docs/technical-details/release-notes/README.md @@ -236,7 +236,7 @@ [3.0.0 Monkey](3.monkey/3.0.0.md) -*** +--- [Meet Penny](2.penny/) Our 2nd Release Pup @@ -252,7 +252,7 @@ [2.1.1 Penny](2.penny/2.1.1.md) -*** +--- [Meet Alby](1.alby/) Our 1st Release Pup diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 00000000..9624dbd0 --- /dev/null +++ b/package-lock.json @@ -0,0 +1,56 @@ +{ + "name": "@harperdb/documentation", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "@harperdb/documentation", + "devDependencies": { + "@harperdb/code-guidelines": "^0.0.2", + "prettier": "^3.6.1" + } + }, + "node_modules/@harperdb/code-guidelines": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/@harperdb/code-guidelines/-/code-guidelines-0.0.2.tgz", + "integrity": "sha512-EJguPZCQr3sNLqUGE5mr5i37g2J1aJEeGb7kkppJCXmLClOOn3v6nwx0FxaSyS/D+Iry6WJpb3qIWgu7Z1Yjvg==", + "dev": true, + "license": "MIT", + "dependencies": { + "prettier": "3.3.3" + } + }, + "node_modules/@harperdb/code-guidelines/node_modules/prettier": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.3.3.tgz", + "integrity": "sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==", + "dev": true, + "license": "MIT", + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/prettier": { + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.1.tgz", + "integrity": "sha512-5xGWRa90Sp2+x1dQtNpIpeOQpTDBs9cZDmA/qs2vDNN2i18PdapqY7CmBeyLlMuGqXJRIOPaCaVZTLNQRWUH/A==", + "dev": true, + "license": "MIT", + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + } + } +} diff --git a/package.json b/package.json new file mode 100644 index 00000000..708cad5e --- /dev/null +++ b/package.json @@ -0,0 +1,12 @@ +{ + "name": "@harperdb/documentation", + "private": true, + "scripts": { + "format": "prettier --write 'docs/**/*' package.json" + }, + "devDependencies": { + "@harperdb/code-guidelines": "^0.0.2", + "prettier": "^3.6.1" + }, + "prettier": "@harperdb/code-guidelines/prettier" +}