diff --git a/.gitbook.yaml b/.gitbook.yaml deleted file mode 100644 index ad136a9d..00000000 --- a/.gitbook.yaml +++ /dev/null @@ -1,119 +0,0 @@ -root: ./docs/ - -redirects: - developers/operations-api/utilities: developers/operations-api/system-operations.md - install-harperdb: deployments/install-harper/README.md - install-harperdb/linux: deployments/install-harper/linux.md - install-harperdb/other: deployments/install-harper/README.md - install-harperdb/docker: deployments/install-harper/README.md - install-harperdb/mac: deployments/install-harper/README.md - install-harperdb/windows: deployments/install-harper/README.md - install-harperdb/linux-quickstart: deployments/install-harper/linux.md - install-harperdb/offline: deployments/install-harper/README.md - install-harperdb/node-ver-requirement: deployments/install-harper/README.md - deployments/install-harperdb: deployments/install-harper/README.md - deployments/install-harperdb/linux: deployments/install-harper/linux.md - harperdb-studio: administration/harperdb-studio/README.md - harperdb-studio/create-account: administration/harperdb-studio/create-account.md - harperdb-studio/login-password-reset: administration/harperdb-studio/login-password-reset.md - harperdb-studio/resources: administration/harperdb-studio/resources.md - harperdb-studio/organizations: administration/harperdb-studio/organizations.md - harperdb-studio/instances: administration/harperdb-studio/instances.md - harperdb-studio/query-instance-data: administration/harperdb-studio/query-instance-data.md - harperdb-studio/manage-schemas-browse-data: administration/harperdb-studio/manage-databases-browse-data.md - harperdb-studio/manage-charts: administration/harperdb-studio/manage-charts.md - harperdb-studio/manage-clustering: administration/harperdb-studio/manage-replication.md - harperdb-studio/manage-instance-users: administration/harperdb-studio/manage-instance-users.md - harperdb-studio/manage-instance-roles: administration/harperdb-studio/manage-instance-users.md - harperdb-studio/manage-functions: administration/harperdb-studio/manage-applications.md - harperdb-studio/instance-metrics: administration/harperdb-studio/instance-metrics.md - harperdb-studio/instance-configuration: administration/harperdb-studio/instance-configuration.md - harperdb-studio/enable-mixed-content: administration/harperdb-studio/enable-mixed-content.md - administration/harperdb-studio: administration/harper-studio/README.md - administration/harperdb-studio/create-account: administration/harper-studio/create-account.md - administration/harperdb-studio/enable-mixed-content: administration/harper-studio/enable-mixed-content.md - administration/harperdb-studio/instance-configuration: administration/harper-studio/instance-configuration.md - administration/harperdb-studio/instance-metrics: administration/harper-studio/instance-metrics.md - administration/harperdb-studio/instances: administration/harper-studio/instances.md - administration/harperdb-studio/login-password-reset: administration/harper-studio/login-password-reset.md - administration/harperdb-studio/manage-applications: administration/harper-studio/manage-applications.md - administration/harperdb-studio/manage-charts: administration/harper-studio/README.md - administration/harperdb-studio/manage-databases-browse-data: administration/harper-studio/manage-databases-browse-data.md - administration/harperdb-studio/manage-instance-roles: administration/harper-studio/manage-instance-roles.md - administration/harperdb-studio/manage-instance-users: administration/harper-studio/manage-instance-users.md - administration/harperdb-studio/manage-replication: administration/harper-studio/manage-replication.md - administration/harperdb-studio/organizations: administration/harper-studio/organizations.md - harperdb-cloud: deployments/harper-cloud/README.md - harperdb-cloud/iops-impact: deployments/harper-cloud/iops-impact.md - harperdb-cloud/instance-size-hardware-specs: deployments/harper-cloud/instance-size-hardware-specs.md - harperdb-cloud/alarms: deployments/harper-cloud/alarms.md - harperdb-cloud/verizon-5g-wavelength-instances: deployments/harper-cloud/verizon-5g-wavelength-instances.md - deployments/harperdb-cloud: deployments/harper-cloud/README.md - deployments/harperdb-cloud/iops-impact: deployments/harper-cloud/iops-impact.md - deployments/harperdb-cloud/instance-size-hardware-specs: deployments/harper-cloud/instance-size-hardware-specs.md - deployments/harperdb-cloud/alarms: deployments/harper-cloud/alarms.md - deployments/harperdb-cloud/verizon-5g-wavelength-instances: deployments/harper-cloud/verizon-5g-wavelength-instances.md - security: developers/security/README.md - security/jwt-auth: developers/security/jwt-auth.md - security/basic-auth: developers/security/basic-auth.md - security/configuration: developers/security/configuration.md) - security/users-and-roles: developers/security/users-and-roles.md) - clustering: developers/clustering/README.md - clustering/requirements-and-definitions: developers/clustering/requirements-and-definitions.md - clustering/creating-a-cluster-user: developers/clustering/creating-a-cluster-user.md - clustering/naming-a-node: developers/clustering/naming-a-node.md - clustering/enabling-clustering: developers/clustering/enabling-clustering.md - clustering/establishing-routes: developers/clustering/establishing-routes.md - clustering/subscription-overview: developers/clustering/subscription-overview.md - clustering/managing-subscriptions: developers/clustering/managing-subscriptions.md - clustering/things-worth-knowing: developers/clustering/things-worth-knowing.md - custom-functions: developers/applications/README.md - custom-functions/define-routes: developers/applications/define-routes.md - custom-functions/using-npm-git: developers/custom-functions/create-project.md - custom-functions/custom-functions-operations: developers/operations-api/README.md - custom-functions/debugging-custom-function: developers/applications/debugging.md - custom-functions/example-projects: developers/applications/example-projects.md - add-ons-and-sdks: developers/applications/README.md - add-ons-and-sdks/google-data-studio: developers/miscellaneous/google-data-studio.md - sql-guide: developers/sql-guide/README.md - sql-guide/features-matrix: developers/sql-guide/features-matrix.md - sql-guide/insert: developers/sql-guide/README.md - sql-guide/update: developers/sql-guide/README.md - sql-guide/delete: developers/sql-guide/README.md - sql-guide/select: developers/sql-guide/README.md - sql-guide/joins: developers/sql-guide/README.md - sql-guide/date-functions: developers/sql-guide/date-functions.md - sql-guide/reserved-word: developers/sql-guide/reserved-word.md - sql-guide/functions: developers/sql-guide/functions.md - sql-guide/json-search: developers/sql-guide/json-search.md - sql-guide/sql-geospatial-functions: developers/sql-guide/sql-geospatial-functions.md - sql-guide/sql-geospatial-functions/geoarea: developers/sql-guide/sql-geospatial-functions.md - sql-guide/sql-geospatial-functions/geolength: developers/sql-guide/sql-geospatial-functions.md - sql-guide/sql-geospatial-functions/geodifference: developers/sql-guide/sql-geospatial-functions.md - sql-guide/sql-geospatial-functions/geodistance: developers/sql-guide/sql-geospatial-functions.md - sql-guide/sql-geospatial-functions/geonear: developers/sql-guide/sql-geospatial-functions.md - sql-guide/sql-geospatial-functions/geocontains: developers/sql-guide/sql-geospatial-functions.md - sql-guide/sql-geospatial-functions/geoequal: developers/sql-guide/sql-geospatial-functions.md - sql-guide/sql-geospatial-functions/geocrosses: developers/sql-guide/sql-geospatial-functions.md - sql-guide/sql-geospatial-functions/geoconvert: developers/sql-guide/sql-geospatial-functions.md - harperdb-cli: deployments/harper-cli.md - deployments/harperdb-cli: deployments/harper-cli.md - configuration: deployments/configuration.md - logging: administration/logging/logging.md - transaction-logging: administration/logging/transaction-logging.md - audit-logging: administration/logging/audit-logging.md - jobs: administration/jobs.md - upgrade-hdb-instance: deployments/upgrade-hdb-instance.md - reference: technical-details/reference/README.md - reference/storage-algorithm: technical-details/reference/storage-algorithm.md - reference/dynamic-schema: technical-details/reference/dynamic-schema.md - reference/data-types: technical-details/reference/data-types.md - reference/content-types: technical-details/reference/content-types.md - reference/headers: technical-details/reference/headers.md - reference/limits: technical-details/reference/limits.md - release-notes: technical-details/release-notes/README.md - operations-api: developers/operations-api/README.md - rest: developers/rest.md - api: developers/operations-api/README.md - administration/harperdb-studio/query-instance-data: developers/rest.md - diff --git a/.github/workflows/deploy.yaml b/.github/workflows/deploy.yaml new file mode 100644 index 00000000..175d5cff --- /dev/null +++ b/.github/workflows/deploy.yaml @@ -0,0 +1,101 @@ +name: Deploy Docusaurus to GitHub Pages + +on: + # Trigger the workflow on pull requests and pushes to specific branches + pull_request: + push: + branches: + - main + - docs-in-hdb + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these deployments to complete. +# This shouldn't be necessary for most cases, but it can help avoid conflicts if multiple pushes happen in quick succession. +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + build: + name: Build Docusaurus + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: actions/setup-node@v4 + with: + node-version: '22' + cache: 'npm' + cache-dependency-path: 'package-lock.json' + + - name: Debug - Show directory structure + run: | + echo "Current directory: $(pwd)" + echo "Repository root contents:" + ls -la + echo "Site directory contents:" + ls -la site/ || echo "Site directory not found" + echo "Looking for package.json files:" + find . -name "package.json" -type f + + - name: Install root dependencies + run: | + echo "Installing root dependencies from $(pwd)" + npm ci || (echo "Root npm ci failed, uploading logs" && exit 1) + + - name: Install site dependencies + run: | + echo "Installing site dependencies..." + npm run site:install || (echo "Site install failed" && exit 1) + + - name: Build Docusaurus site + env: + DOCUSAURUS_ROUTE_BASE_PATH: ${{ vars.DOCUSAURUS_ROUTE_BASE_PATH }} + DOCUSAURUS_BASE_URL: ${{ vars.DOCUSAURUS_BASE_URL }} + DOCUSAURUS_URL: ${{ vars.DOCUSAURUS_URL }} + IMAGES_PATH: ${{ vars.IMAGES_PATH }} + run: | + echo "Building Docusaurus site..." + echo "Using DOCUSAURUS_ROUTE_BASE_PATH: $DOCUSAURUS_ROUTE_BASE_PATH" + echo "Using DOCUSAURUS_BASE_URL: $DOCUSAURUS_BASE_URL" + echo "Using DOCUSAURUS_URL: $DOCUSAURUS_URL" + echo "Using IMAGES_PATH: $IMAGES_PATH" + npm run site:build || (echo "Site build failed" && exit 1) + + - name: Upload npm logs on failure + if: failure() + uses: actions/upload-artifact@v4 + with: + name: npm-logs + path: | + ~/.npm/_logs/ + + - name: Upload Build Artifact + uses: actions/upload-pages-artifact@v3 + with: + path: site/build + + deploy: + needs: build + name: Deploy to GitHub Pages + runs-on: ubuntu-latest + # Only deploy on push to specific branches, not on PRs + if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/docs-in-hdb') + + permissions: + pages: write + id-token: write + + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + + + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 00000000..ae27f845 --- /dev/null +++ b/.prettierignore @@ -0,0 +1,2 @@ +# Keep this file minimized +site/static/js/reo.js diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 150789fc..00000000 --- a/docs/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Harper Docs - -{% hint style="info" %} -[Connect with our team!](https://www.harpersystems.dev/contact) -{% endhint %} - -Welcome to the Harper Documentation! Here, you'll find all things Harper, and everything you need to get started, troubleshoot issues, and make the most of our platform. - -## Getting Started - -
Install HarperPick the installation method that best suits your environmentinstall-harper
What is HarperLearn about Harper, how it works, and some of its usecaseswhat-is-harper
Harper ConceptsLearn about Harper's fundamental concepts and how they interactharper-concepts
- -## Building with Harper - -
Harper ApplicationsBuild your a fully featured Harper Component with custom functionalityapplications
REST QueriesThe recommended HTTP interface for data access, querying, and manipulationrest.md
Operations APIConfigure, deploy, administer, and control your Harper instanceoperations-api
- -
Clustering & ReplicationThe process of connecting multiple Harper databases together to create a database mesh network that enables users to define data replication patterns.clustering
Explore the Harper StudioThe web-based GUI for Harper. Studio enables you to administer, navigate, and monitor all of your Harper instances in a simple, user friendly interface.harper-studio
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md deleted file mode 100644 index d1c98476..00000000 --- a/docs/SUMMARY.md +++ /dev/null @@ -1,271 +0,0 @@ -# Table of contents - -* [Harper Docs](README.md) -* [Getting Started](getting-started/README.md) - * [What is Harper](getting-started/what-is-harper.md) - * [Install Harper](getting-started/install-harper.md) - * [Harper Concepts](getting-started/harper-concepts.md) - * [Create Your First Application](getting-started/first-harper-app.md) - -## Developers - -* [Applications](developers/applications/README.md) - * [Caching](developers/applications/caching.md) - * [Defining Schemas](developers/applications/defining-schemas.md) - * [Defining Roles](developers/applications/defining-roles.md) - * [Data Loader](developers/applications/data-loader.md) - * [Debugging Applications](developers/applications/debugging.md) - * [Define Fastify Routes](developers/applications/define-routes.md) - * [Web Applications](developers/applications/web-applications.md) - * [Example Projects](developers/applications/example-projects.md) -* [REST](developers/rest.md) -* [Operations API](developers/operations-api/README.md) - * [Quick Start Examples](developers/operations-api/quickstart-examples.md) - * [Databases and Tables](developers/operations-api/databases-and-tables.md) - * [NoSQL Operations](developers/operations-api/nosql-operations.md) - * [Bulk Operations](developers/operations-api/bulk-operations.md) - * [Users and Roles](developers/operations-api/users-and-roles.md) - * [Clustering](developers/operations-api/clustering.md) - * [Clustering with NATS](developers/operations-api/clustering-nats.md) - * [Components](developers/operations-api/components.md) - * [Registration](developers/operations-api/registration.md) - * [Jobs](developers/operations-api/jobs.md) - * [Logs](developers/operations-api/logs.md) - * [System Operations](developers/operations-api/system-operations.md) - * [Configuration](developers/operations-api/configuration.md) - * [Certificate Management](developers/operations-api/certificate-management.md) - * [Token Authentication](developers/operations-api/token-authentication.md) - * [SQL Operations](developers/operations-api/sql-operations.md) - * [Advanced JSON SQL Examples](developers/operations-api/advanced-json-sql-examples.md) - * [Analytics](developers/operations-api/analytics.md) -* [Real-Time](developers/real-time.md) -* [Replication/Clustering](developers/replication/README.md) - * [Sharding](developers/replication/sharding.md) - * [Legacy NATS Clustering](developers/clustering/README.md) - * [Requirements and Definitions](developers/clustering/requirements-and-definitions.md) - * [Creating A Cluster User](developers/clustering/creating-a-cluster-user.md) - * [Naming A Node](developers/clustering/naming-a-node.md) - * [Enabling Clustering](developers/clustering/enabling-clustering.md) - * [Establishing Routes](developers/clustering/establishing-routes.md) - * [Subscription Overview](developers/clustering/subscription-overview.md) - * [Managing Subscriptions](developers/clustering/managing-subscriptions.md) - * [Things Worth Knowing](developers/clustering/things-worth-knowing.md) - * [Certificate Management](developers/clustering/certificate-management.md) -* [Security](developers/security/README.md) - * [JWT Authentication](developers/security/jwt-auth.md) - * [Basic Authentication](developers/security/basic-auth.md) - * [mTLS Authentication](developers/security/mtls-auth.md) - * [Configuration](developers/security/configuration.md) - * [Users & Roles](developers/security/users-and-roles.md) - * [Certificate Management](developers/security/certificate-management.md) -* [SQL Guide](developers/sql-guide/README.md) - * [SQL Features Matrix](developers/sql-guide/features-matrix.md) - * [SQL Date Functions](developers/sql-guide/date-functions.md) - * [SQL Reserved Word](developers/sql-guide/reserved-word.md) - * [SQL Functions](developers/sql-guide/functions.md) - * [SQL JSON Search](developers/sql-guide/json-search.md) - * [SQL Geospatial Functions](developers/sql-guide/sql-geospatial-functions.md) -* [Miscellaneous](developers/miscellaneous/README.md) - * [Google Data Studio](developers/miscellaneous/google-data-studio.md) - * [SDKs](developers/miscellaneous/sdks.md) - * [Query Optimization](developers/miscellaneous/query-optimization.md) - -## Administration - -* [Best Practices and Recommendations](administration/administration.md) -* [Logging](administration/logging/README.md) - * [Standard Logging](administration/logging/logging.md) - * [Audit Logging](administration/logging/audit-logging.md) - * [Transaction Logging](administration/logging/transaction-logging.md) -* [Clone Node](administration/cloning.md) -* [Compact](administration/compact.md) -* [Jobs](administration/jobs.md) -* [Harper Studio](administration/harper-studio/README.md) - * [Create an Account](administration/harper-studio/create-account.md) - * [Log In & Password Reset](administration/harper-studio/login-password-reset.md) - * [Organizations](administration/harper-studio/organizations.md) - * [Instances](administration/harper-studio/instances.md) - * [Query Instance Data](administration/harper-studio/query-instance-data.md) - * [Manage Databases / Browse Data](administration/harper-studio/manage-databases-browse-data.md) - * [Manage Clustering](administration/harper-studio/manage-replication.md) - * [Manage Instance Users](administration/harper-studio/manage-instance-users.md) - * [Manage Instance Roles](administration/harper-studio/manage-instance-roles.md) - * [Manage Applications](administration/harper-studio/manage-applications.md) - * [Instance Metrics](administration/harper-studio/instance-metrics.md) - * [Instance Configuration](administration/harper-studio/instance-configuration.md) - * [Enable Mixed Content](administration/harper-studio/enable-mixed-content.md) - -## Deployments - -* [Configuration File](deployments/configuration.md) -* [Harper CLI](deployments/harper-cli.md) -* [Install Harper](deployments/install-harper/README.md) - * [On Linux](deployments/install-harper/linux.md) -* [Upgrade a Harper Instance](deployments/upgrade-hdb-instance.md) -* [Harper Cloud](deployments/harper-cloud/README.md) - * [IOPS Impact on Performance](deployments/harper-cloud/iops-impact.md) - * [Instance Size Hardware Specs](deployments/harper-cloud/instance-size-hardware-specs.md) - * [Alarms](deployments/harper-cloud/alarms.md) - * [Verizon 5G Wavelength](deployments/harper-cloud/verizon-5g-wavelength-instances.md) - -## Technical Details - -* [Reference](technical-details/reference/README.md) - * [Analytics](technical-details/reference/analytics.md) - * [Architecture](technical-details/reference/architecture.md) - * [Blob](technical-details/reference/blob.md) - * [Content Types](technical-details/reference/content-types.md) - * [Components](technical-details/reference/components/README.md) - * [Applications](technical-details/reference/components/applications.md) - * [Built-In Extensions](technical-details/reference/components/built-in-extensions.md) - * [Configuration](technical-details/reference/components/configuration.md) - * [Extensions](technical-details/reference/components/extensions.md) - * [(Experimental) Plugins](technical-details/reference/components/plugins.md) - * [Data Types](technical-details/reference/data-types.md) - * [Dynamic Schema](technical-details/reference/dynamic-schema.md) - * [Globals](technical-details/reference/globals.md) - * [GraphQL](technical-details/reference/graphql.md) - * [Headers](technical-details/reference/headers.md) - * [Limits](technical-details/reference/limits.md) - * [Resources](technical-details/reference/resources/README.md) - * [Migration](technical-details/reference/resources/migration.md) - * [Instance Binding](technical-details/reference/resources/instance-binding.md) - * [Storage Algorithm](technical-details/reference/storage-algorithm.md) - * [Transactions](technical-details/reference/transactions.md) -* [Release Notes](technical-details/release-notes/README.md) - * [Harper Tucker (Version 4)](technical-details/release-notes/4.tucker/README.md) - * [4.6.3](technical-details/release-notes/4.tucker/4.6.3.md) - * [4.6.2](technical-details/release-notes/4.tucker/4.6.2.md) - * [4.6.1](technical-details/release-notes/4.tucker/4.6.1.md) - * [4.6.0](technical-details/release-notes/4.tucker/4.6.0.md) - * [4.5.16](technical-details/release-notes/4.tucker/4.5.16.md) - * [4.5.15](technical-details/release-notes/4.tucker/4.5.15.md) - * [4.5.14](technical-details/release-notes/4.tucker/4.5.14.md) - * [4.5.13](technical-details/release-notes/4.tucker/4.5.13.md) - * [4.5.12](technical-details/release-notes/4.tucker/4.5.12.md) - * [4.5.11](technical-details/release-notes/4.tucker/4.5.11.md) - * [4.5.10](technical-details/release-notes/4.tucker/4.5.10.md) - * [4.5.9](technical-details/release-notes/4.tucker/4.5.9.md) - * [4.5.8](technical-details/release-notes/4.tucker/4.5.8.md) - * [4.5.7](technical-details/release-notes/4.tucker/4.5.7.md) - * [4.5.6](technical-details/release-notes/4.tucker/4.5.6.md) - * [4.5.5](technical-details/release-notes/4.tucker/4.5.5.md) - * [4.5.4](technical-details/release-notes/4.tucker/4.5.4.md) - * [4.5.3](technical-details/release-notes/4.tucker/4.5.3.md) - * [4.5.2](technical-details/release-notes/4.tucker/4.5.2.md) - * [4.5.1](technical-details/release-notes/4.tucker/4.5.1.md) - * [4.5.0](technical-details/release-notes/4.tucker/4.5.0.md) - * [4.4.24](technical-details/release-notes/4.tucker/4.4.24.md) - * [4.4.23](technical-details/release-notes/4.tucker/4.4.23.md) - * [4.4.22](technical-details/release-notes/4.tucker/4.4.22.md) - * [4.4.21](technical-details/release-notes/4.tucker/4.4.21.md) - * [4.4.20](technical-details/release-notes/4.tucker/4.4.20.md) - * [4.4.19](technical-details/release-notes/4.tucker/4.4.19.md) - * [4.4.18](technical-details/release-notes/4.tucker/4.4.18.md) - * [4.4.17](technical-details/release-notes/4.tucker/4.4.17.md) - * [4.4.16](technical-details/release-notes/4.tucker/4.4.16.md) - * [4.4.15](technical-details/release-notes/4.tucker/4.4.15.md) - * [4.4.14](technical-details/release-notes/4.tucker/4.4.14.md) - * [4.4.13](technical-details/release-notes/4.tucker/4.4.13.md) - * [4.4.12](technical-details/release-notes/4.tucker/4.4.12.md) - * [4.4.11](technical-details/release-notes/4.tucker/4.4.11.md) - * [4.4.10](technical-details/release-notes/4.tucker/4.4.10.md) - * [4.4.9](technical-details/release-notes/4.tucker/4.4.9.md) - * [4.4.8](technical-details/release-notes/4.tucker/4.4.8.md) - * [4.4.7](technical-details/release-notes/4.tucker/4.4.7.md) - * [4.4.6](technical-details/release-notes/4.tucker/4.4.6.md) - * [4.4.5](technical-details/release-notes/4.tucker/4.4.5.md) - * [4.4.4](technical-details/release-notes/4.tucker/4.4.4.md) - * [4.4.3](technical-details/release-notes/4.tucker/4.4.3.md) - * [4.4.2](technical-details/release-notes/4.tucker/4.4.2.md) - * [4.4.1](technical-details/release-notes/4.tucker/4.4.1.md) - * [4.4.0](technical-details/release-notes/4.tucker/4.4.0.md) - * [4.3.38](technical-details/release-notes/4.tucker/4.3.38.md) - * [4.3.37](technical-details/release-notes/4.tucker/4.3.37.md) - * [4.3.36](technical-details/release-notes/4.tucker/4.3.36.md) - * [4.3.35](technical-details/release-notes/4.tucker/4.3.35.md) - * [4.3.34](technical-details/release-notes/4.tucker/4.3.34.md) - * [4.3.33](technical-details/release-notes/4.tucker/4.3.33.md) - * [4.3.32](technical-details/release-notes/4.tucker/4.3.32.md) - * [4.3.31](technical-details/release-notes/4.tucker/4.3.31.md) - * [4.3.30](technical-details/release-notes/4.tucker/4.3.30.md) - * [4.3.29](technical-details/release-notes/4.tucker/4.3.29.md) - * [4.3.28](technical-details/release-notes/4.tucker/4.3.28.md) - * [4.3.27](technical-details/release-notes/4.tucker/4.3.27.md) - * [4.3.26](technical-details/release-notes/4.tucker/4.3.26.md) - * [4.3.25](technical-details/release-notes/4.tucker/4.3.25.md) - * [4.3.24](technical-details/release-notes/4.tucker/4.3.24.md) - * [4.3.23](technical-details/release-notes/4.tucker/4.3.23.md) - * [4.3.22](technical-details/release-notes/4.tucker/4.3.22.md) - * [4.3.21](technical-details/release-notes/4.tucker/4.3.21.md) - * [4.3.20](technical-details/release-notes/4.tucker/4.3.20.md) - * [4.3.19](technical-details/release-notes/4.tucker/4.3.19.md) - * [4.3.18](technical-details/release-notes/4.tucker/4.3.18.md) - * [4.3.17](technical-details/release-notes/4.tucker/4.3.17.md) - * [4.3.16](technical-details/release-notes/4.tucker/4.3.16.md) - * [4.3.15](technical-details/release-notes/4.tucker/4.3.15.md) - * [4.3.14](technical-details/release-notes/4.tucker/4.3.14.md) - * [4.3.13](technical-details/release-notes/4.tucker/4.3.13.md) - * [4.3.12](technical-details/release-notes/4.tucker/4.3.12.md) - * [4.3.11](technical-details/release-notes/4.tucker/4.3.11.md) - * [4.3.10](technical-details/release-notes/4.tucker/4.3.10.md) - * [4.3.9](technical-details/release-notes/4.tucker/4.3.9.md) - * [4.3.8](technical-details/release-notes/4.tucker/4.3.8.md) - * [4.3.7](technical-details/release-notes/4.tucker/4.3.7.md) - * [4.3.6](technical-details/release-notes/4.tucker/4.3.6.md) - * [4.3.5](technical-details/release-notes/4.tucker/4.3.5.md) - * [4.3.4](technical-details/release-notes/4.tucker/4.3.4.md) - * [4.3.3](technical-details/release-notes/4.tucker/4.3.3.md) - * [4.3.2](technical-details/release-notes/4.tucker/4.3.2.md) - * [4.3.1](technical-details/release-notes/4.tucker/4.3.1.md) - * [4.3.0](technical-details/release-notes/4.tucker/4.3.0.md) - * [4.2.8](technical-details/release-notes/4.tucker/4.2.8.md) - * [4.2.7](technical-details/release-notes/4.tucker/4.2.7.md) - * [4.2.6](technical-details/release-notes/4.tucker/4.2.6.md) - * [4.2.5](technical-details/release-notes/4.tucker/4.2.5.md) - * [4.2.4](technical-details/release-notes/4.tucker/4.2.4.md) - * [4.2.3](technical-details/release-notes/4.tucker/4.2.3.md) - * [4.2.2](technical-details/release-notes/4.tucker/4.2.2.md) - * [4.2.1](technical-details/release-notes/4.tucker/4.2.1.md) - * [4.2.0](technical-details/release-notes/4.tucker/4.2.0.md) - * [4.1.2](technical-details/release-notes/4.tucker/4.1.2.md) - * [4.1.1](technical-details/release-notes/4.tucker/4.1.1.md) - * [4.1.0](technical-details/release-notes/4.tucker/4.1.0.md) - * [4.0.7](technical-details/release-notes/4.tucker/4.0.7.md) - * [4.0.6](technical-details/release-notes/4.tucker/4.0.6.md) - * [4.0.5](technical-details/release-notes/4.tucker/4.0.5.md) - * [4.0.4](technical-details/release-notes/4.tucker/4.0.4.md) - * [4.0.3](technical-details/release-notes/4.tucker/4.0.3.md) - * [4.0.2](technical-details/release-notes/4.tucker/4.0.2.md) - * [4.0.1](technical-details/release-notes/4.tucker/4.0.1.md) - * [4.0.0](technical-details/release-notes/4.tucker/4.0.0.md) - * [Tucker](technical-details/release-notes/4.tucker/tucker.md) - * [HarperDB Monkey (Version 3)](technical-details/release-notes/3.monkey/README.md) - * [3.3.0](technical-details/release-notes/3.monkey/3.3.0.md) - * [3.2.1](technical-details/release-notes/3.monkey/3.2.1.md) - * [3.2.0](technical-details/release-notes/3.monkey/3.2.0.md) - * [3.1.5](technical-details/release-notes/3.monkey/3.1.5.md) - * [3.1.4](technical-details/release-notes/3.monkey/3.1.4.md) - * [3.1.3](technical-details/release-notes/3.monkey/3.1.3.md) - * [3.1.2](technical-details/release-notes/3.monkey/3.1.2.md) - * [3.1.1](technical-details/release-notes/3.monkey/3.1.1.md) - * [3.1.0](technical-details/release-notes/3.monkey/3.1.0.md) - * [3.0.0](technical-details/release-notes/3.monkey/3.0.0.md) - * [HarperDB Penny (Version 2)](technical-details/release-notes/2.penny/README.md) - * [2.3.1](technical-details/release-notes/2.penny/2.3.1.md) - * [2.3.0](technical-details/release-notes/2.penny/2.3.0.md) - * [2.2.3](technical-details/release-notes/2.penny/2.2.3.md) - * [2.2.2](technical-details/release-notes/2.penny/2.2.2.md) - * [2.2.0](technical-details/release-notes/2.penny/2.2.0.md) - * [2.1.1](technical-details/release-notes/2.penny/2.1.1.md) - * [HarperDB Alby (Version 1)](technical-details/release-notes/1.alby/README.md) - * [1.3.1](technical-details/release-notes/1.alby/1.3.1.md) - * [1.3.0](technical-details/release-notes/1.alby/1.3.0.md) - * [1.2.0](technical-details/release-notes/1.alby/1.2.0.md) - * [1.1.0](technical-details/release-notes/1.alby/1.1.0.md) - -## More Help - -* [Support](https://harperdbhelp.zendesk.com/hc/en-us/requests/new) -* [Slack](https://harperdbcommunity.slack.com/join/shared_invite/zt-e8w6u1pu-2UFAXl_f4ZHo7F7DVkHIDA#/) -* [Contact Us](https://www.harpersystems.dev/contact) diff --git a/docs/administration/_category_.json b/docs/administration/_category_.json new file mode 100644 index 00000000..828e0998 --- /dev/null +++ b/docs/administration/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Administration", + "position": 2, + "link": { + "type": "generated-index", + "title": "Administration Documentation", + "description": "Guides for managing and administering HarperDB instances", + "keywords": [ + "administration" + ] + } +} \ No newline at end of file diff --git a/docs/administration/administration.md b/docs/administration/administration.md index 2931b09a..741c24f5 100644 --- a/docs/administration/administration.md +++ b/docs/administration/administration.md @@ -1,3 +1,7 @@ +--- +title: Best Practices and Recommendations +--- + # Best Practices and Recommendations Harper is designed for minimal administrative effort, and with managed services these are handled for you. But there are important things to consider for managing your own Harper servers. @@ -7,22 +11,22 @@ Harper is designed for minimal administrative effort, and with managed services As a distributed database, data protection and recovery can benefit from different data protection strategies than a traditional single-server database. But multiple aspects of data protection and recovery should be considered: - Availability: As a distributed database Harper is intrinsically built for high-availability and a cluster will continue to run even with complete server(s) failure. This is the first and primary defense for protecting against any downtime or data loss. Harper provides fast horizontal scaling functionality with node cloning, which facilitates ease of establishing high availability clusters. -- [Audit log](logging/audit-logging.md): Harper defaults to tracking data changes so malicious data changes can be found, attributed, and reverted. This provides security-level defense against data loss, allowing for fine-grained isolation and reversion of individual data without the large-scale reversion/loss of data associated with point-in-time recovery approaches. -- Snapshots: When used as a source-of-truth database for crucial data, we recommend using snapshot tools to regularly snapshot databases as a final backup/defense against data loss (this should only be used as a last resort in recovery). Harper has a [`get_backup`](../developers/operations-api/databases-and-tables.md#get-backup) operation, which provides direct support for making and retrieving database snapshots. An HTTP request can be used to get a snapshot. Alternatively, volume snapshot tools can be used to snapshot data at the OS/VM level. Harper can also provide scripts for replaying transaction logs from snapshots to facilitate point-in-time recovery when necessary (often customization may be preferred in certain recovery situations to minimize data loss). +- [Audit log](logging/audit-logging): Harper defaults to tracking data changes so malicious data changes can be found, attributed, and reverted. This provides security-level defense against data loss, allowing for fine-grained isolation and reversion of individual data without the large-scale reversion/loss of data associated with point-in-time recovery approaches. +- Snapshots: When used as a source-of-truth database for crucial data, we recommend using snapshot tools to regularly snapshot databases as a final backup/defense against data loss (this should only be used as a last resort in recovery). Harper has a [`get_backup`](../developers/operations-api/databases-and-tables#get-backup) operation, which provides direct support for making and retrieving database snapshots. An HTTP request can be used to get a snapshot. Alternatively, volume snapshot tools can be used to snapshot data at the OS/VM level. Harper can also provide scripts for replaying transaction logs from snapshots to facilitate point-in-time recovery when necessary (often customization may be preferred in certain recovery situations to minimize data loss). ### Horizontal Scaling with Node Cloning -Harper provides rapid horizontal scaling capabilities through [node cloning functionality described here](cloning.md). +Harper provides rapid horizontal scaling capabilities through [node cloning functionality described here](cloning). ### Monitoring Harper provides robust capabilities for analytics and observability to facilitate effective and informative monitoring: -- Analytics provides statistics on usage, request counts, load, memory usage with historical tracking. The analytics data can be [accessed through querying](../technical-details/reference/analytics.md). -- A large variety of real-time statistics about load, system information, database metrics, thread usage can be retrieved through the [`system_information` API](../developers/operations-api/system-operations.md). -- Information about the current cluster configuration and status can be found in the [cluster APIs](../developers/operations-api/clustering.md). -- Analytics and system information can easily be exported to Prometheus with our [Prometheus exporter component](https://github.com/HarperDB-Add-Ons/prometheus_exporter), making it easy visualize and monitor Harper with Graphana. +- Analytics provides statistics on usage, request counts, load, memory usage with historical tracking. The analytics data can be [accessed through querying](../technical-details/reference/analytics). +- A large variety of real-time statistics about load, system information, database metrics, thread usage can be retrieved through the [`system_information` API](../developers/operations-api/system-operations). +- Information about the current cluster configuration and status can be found in the [cluster APIs](../developers/operations-api/clustering). +- Analytics and system information can easily be exported to Prometheus with our [Prometheus exporter component](https:/github.com/HarperDB-Add-Ons/prometheus_exporter), making it easy visualize and monitor Harper with Graphana. ### Replication Transaction Logging -Harper utilizes NATS for replication, which maintains a transaction log. See the [transaction log documentation for information on how to query this log](logging/transaction-logging.md). +Harper utilizes NATS for replication, which maintains a transaction log. See the [transaction log documentation for information on how to query this log](logging/transaction-logging). diff --git a/docs/administration/cloning.md b/docs/administration/cloning.md index 789f1eea..dcea866a 100644 --- a/docs/administration/cloning.md +++ b/docs/administration/cloning.md @@ -1,3 +1,7 @@ +--- +title: Clone Node +--- + # Clone Node Clone node is a configurable node script that when pointed to another instance of Harper will create a clone of that @@ -22,7 +26,7 @@ To start clone run `harperdb` in the CLI with either of the following variables For example: ``` -HDB_LEADER_URL=https://node-1.my-domain.com:9925 REPLICATION_HOSTNAME=node-1.my-domain.com HDB_LEADER_USERNAME=... HDB_LEADER_PASSWORD=... harperdb +HDB_LEADER_URL=https:/node-1.my-domain.com:9925 REPLICATION_HOSTNAME=node-1.my-domain.com HDB_LEADER_USERNAME=... HDB_LEADER_PASSWORD=... harperdb ``` #### Command line variables @@ -35,7 +39,7 @@ HDB_LEADER_URL=https://node-1.my-domain.com:9925 REPLICATION_HOSTNAME=node-1.my- For example: ``` -harperdb --HDB_LEADER_URL https://node-1.my-domain.com:9925 --REPLICATION_HOSTNAME node-1.my-domain.com --HDB_LEADER_USERNAME ... --HDB_LEADER_PASSWORD ... +harperdb --HDB_LEADER_URL https:/node-1.my-domain.com:9925 --REPLICATION_HOSTNAME node-1.my-domain.com --HDB_LEADER_USERNAME ... --HDB_LEADER_PASSWORD ... ``` Each time clone is run it will set a value `cloned: true` in `harperdb-config.yaml`. This value will prevent clone from @@ -46,7 +50,7 @@ Clone node does not require any additional configuration apart from the variable However, if you wish to set any configuration during clone this can be done by passing the config as environment/CLI variables or cloning overtop of an existing `harperdb-config.yaml` file. -More can be found in the Harper config documentation [here](../deployments/configuration.md). +More can be found in the Harper config documentation [here](../deployments/configuration). ### Excluding database and components @@ -121,12 +125,12 @@ leader and the clone. Clone node will execute the following steps when ran: 1. Look for an existing Harper install. It does this by using the default (or user provided) `ROOTPATH`. -2. If an existing instance is found it will check for a `harperdb-config.yaml` file and search for the `cloned` value. If the value exists and is `true` clone will skip the clone logic and start Harper. -3. Clone harperdb-config.yaml values that don't already exist (excluding values unique to the leader node). -4. Fully clone any databases that don't already exist. -5. If classed as a "fresh clone", install Harper. An instance is classed as a fresh clone if there is no system database. -6. If `REPLICATION_HOSTNAME` is set, set up replication between the leader and clone. -7. Clone is complete, start Harper. +1. If an existing instance is found it will check for a `harperdb-config.yaml` file and search for the `cloned` value. If the value exists and is `true` clone will skip the clone logic and start Harper. +1. Clone harperdb-config.yaml values that don't already exist (excluding values unique to the leader node). +1. Fully clone any databases that don't already exist. +1. If classed as a "fresh clone", install Harper. An instance is classed as a fresh clone if there is no system database. +1. If `REPLICATION_HOSTNAME` is set, set up replication between the leader and clone. +1. Clone is complete, start Harper. ### Cloning with Docker @@ -139,7 +143,7 @@ docker run -d \ -v :/home/harperdb/hdb \ -e HDB_LEADER_PASSWORD=password \ -e HDB_LEADER_USERNAME=admin \ - -e HDB_LEADER_URL=https://1.123.45.6:9925 \ + -e HDB_LEADER_URL=https:/1.123.45.6:9925 \ -e REPLICATION_HOSTNAME=1.123.45.6 \ -p 9925:9925 \ -p 9926:9926 \ diff --git a/docs/administration/compact.md b/docs/administration/compact.md index aeaa81f1..1a71db14 100644 --- a/docs/administration/compact.md +++ b/docs/administration/compact.md @@ -1,3 +1,7 @@ +--- +title: Compact +--- + # Compact Database files can grow quickly as you use them, sometimes impeding performance. Harper has multiple compact features that can be used to reduce database file size and potentially improve performance. The compact process does not compress your data, it instead makes your database file smaller by eliminating free-space and fragmentation. @@ -6,7 +10,7 @@ There are two options that Harper offers for compacting a Database. _Note: Some of the storage configuration (such as compression) cannot be updated on existing databases, this is where the following options are useful. They will create a new compressed copy of the database with any updated configuration._ -More information on the storage configuration options can be [found here](../deployments/configuration.md#storage) +More information on the storage configuration options can be [found here](../deployments/configuration#storage) ### Copy compaction @@ -14,7 +18,7 @@ It is recommended that, to prevent any record loss, Harper is not running when p This will copy a Harper database with compaction. If you wish to use this new database in place of the original, you will need to move/rename it to the path of the original database. -This command should be run in the [CLI](../deployments/harper-cli.md) +This command should be run in the [CLI](../deployments/harper-cli) ```bash harperdb copy-db diff --git a/docs/administration/harper-studio/README.md b/docs/administration/harper-studio/README.md deleted file mode 100644 index de82e7e3..00000000 --- a/docs/administration/harper-studio/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Harper Studio - -Harper Studio is the web-based GUI for Harper. Studio enables you to administer, navigate, and monitor all of your Harper instances in a simple, user-friendly interface without any knowledge of the underlying Harper API. It’s free to sign up, get started today! - -[Sign up for free!](https://studio.harperdb.io/sign-up) - -Harper now includes a simplified local Studio that is packaged with all Harper installations and served directly from the instance. It can be enabled in the [configuration file](../../deployments/configuration.md#localstudio). This section is dedicated to the hosted Studio accessed at [studio.harperdb.io](https://studio.harperdb.io). - ---- - -## How does Studio Work? - -While Harper Studio is web based and hosted by us, all database interactions are performed on the Harper instance the studio is connected to. The Harper Studio loads in your browser, at which point you login to your Harper instances. Credentials are stored in your browser cache and are not transmitted back to Harper. All database interactions are made via the Harper Operations API directly from your browser to your instance. - -## What type of instances can I manage? - -Harper Studio enables users to manage both Harper Cloud instances and privately hosted instances all from a single UI. All Harper instances feature identical behavior whether they are hosted by us or by you. diff --git a/docs/administration/harper-studio/create-account.md b/docs/administration/harper-studio/create-account.md index 8230b777..73eb9d97 100644 --- a/docs/administration/harper-studio/create-account.md +++ b/docs/administration/harper-studio/create-account.md @@ -1,6 +1,10 @@ +--- +title: Create a Studio Account +--- + # Create a Studio Account -Start at the [Harper Studio sign up page](https://studio.harperdb.io/sign-up). +Start at the [Harper Studio sign up page](https:/studio.harperdb.io/sign-up). 1. Provide the following information: - First Name @@ -8,15 +12,15 @@ Start at the [Harper Studio sign up page](https://studio.harperdb.io/sign-up). - Email Address - Subdomain - _Part of the URL that will be used to identify your Harper Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https://c1-demo.harperdbcloud.com._ + _Part of the URL that will be used to identify your Harper Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com._ - Coupon Code (optional) -2. Review the Privacy Policy and Terms of Service. -3. Click the sign up for free button. -4. You will be taken to a new screen to add an account password. Enter your password. +1. Review the Privacy Policy and Terms of Service. +1. Click the sign up for free button. +1. You will be taken to a new screen to add an account password. Enter your password. _Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character._ -5. Click the add account password button. +1. Click the add account password button. You will receive a Studio welcome email confirming your registration. diff --git a/docs/administration/harper-studio/enable-mixed-content.md b/docs/administration/harper-studio/enable-mixed-content.md index 855df002..40d9877d 100644 --- a/docs/administration/harper-studio/enable-mixed-content.md +++ b/docs/administration/harper-studio/enable-mixed-content.md @@ -1,5 +1,9 @@ +--- +title: Enable Mixed Content +--- + # Enable Mixed Content Enabling mixed content is required in cases where you would like to connect the Harper Studio to Harper Instances via HTTP. This should not be used for production systems, but may be convenient for development and testing purposes. Doing so will allow your browser to reach HTTP traffic, which is considered insecure, through an HTTPS site like the Studio. -A comprehensive guide is provided by Adobe [here](https://experienceleague.adobe.com/docs/target/using/experiences/vec/troubleshoot-composer/mixed-content.html). +A comprehensive guide is provided by Adobe [here](https:/experienceleague.adobe.com/docs/target/using/experiences/vec/troubleshoot-composer/mixed-content.html). diff --git a/docs/administration/harper-studio/index.md b/docs/administration/harper-studio/index.md new file mode 100644 index 00000000..011c5923 --- /dev/null +++ b/docs/administration/harper-studio/index.md @@ -0,0 +1,21 @@ +--- +title: Harper Studio +--- + +# Harper Studio + +Harper Studio is the web-based GUI for Harper. Studio enables you to administer, navigate, and monitor all of your Harper instances in a simple, user-friendly interface without any knowledge of the underlying Harper API. It’s free to sign up, get started today! + +[Sign up for free!](https:/studio.harperdb.io/sign-up) + +Harper now includes a simplified local Studio that is packaged with all Harper installations and served directly from the instance. It can be enabled in the [configuration file](../../deployments/configuration#localstudio). This section is dedicated to the hosted Studio accessed at [studio.harperdb.io](https:/studio.harperdb.io). + +--- + +## How does Studio Work? + +While Harper Studio is web based and hosted by us, all database interactions are performed on the Harper instance the studio is connected to. The Harper Studio loads in your browser, at which point you login to your Harper instances. Credentials are stored in your browser cache and are not transmitted back to Harper. All database interactions are made via the Harper Operations API directly from your browser to your instance. + +## What type of instances can I manage? + +Harper Studio enables users to manage both Harper Cloud instances and privately hosted instances all from a single UI. All Harper instances feature identical behavior whether they are hosted by us or by you. diff --git a/docs/administration/harper-studio/instance-configuration.md b/docs/administration/harper-studio/instance-configuration.md index 84963e35..1c3dd2d5 100644 --- a/docs/administration/harper-studio/instance-configuration.md +++ b/docs/administration/harper-studio/instance-configuration.md @@ -1,16 +1,20 @@ +--- +title: Instance Configuration +--- + # Instance Configuration Harper instance configuration can be viewed and managed directly through the Harper Studio. Harper Cloud instances can be resized in two different ways via this page, either by modifying machine RAM or by increasing drive storage. Enterprise instances can have their licenses modified by modifying licensed RAM. All instance configuration is handled through the **config** page of the Harper Studio, accessed with the following instructions: -1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. -2. Click the appropriate organization that the instance belongs to. +1. Click the appropriate organization that the instance belongs to. -3. Select your desired instance. +1. Select your desired instance. -4. Click config in the instance control bar. +1. Click config in the instance control bar. _Note, the **config** page will only be available to super users and certain items are restricted to Studio organization owners._ @@ -54,11 +58,11 @@ Note: For Harper Cloud instances, upgrading RAM may add additional CPUs to your - If you do have a credit card associated, you will be presented with the updated billing information. - Click **Upgrade**. -2. The instance will shut down and begin reprovisioning/relicensing itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. +1. The instance will shut down and begin reprovisioning/relicensing itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. -3. Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. +1. Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. -_Note, if Harper Cloud instance reprovisioning takes longer than 20 minutes, please submit a support ticket here: https://harperdbhelp.zendesk.com/hc/en-us/requests/new._ +_Note, if Harper Cloud instance reprovisioning takes longer than 20 minutes, please submit a support ticket here: https:/harperdbhelp.zendesk.com/hc/en-us/requests/new._ ## Update Instance Storage @@ -72,10 +76,10 @@ Note: Instance storage can only be upgraded once every 6 hours. - If you do have a credit card associated, you will be presented with the updated billing information. - Click **Upgrade**. -2. The instance will shut down and begin reprovisioning itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. -3. Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. +1. The instance will shut down and begin reprovisioning itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. +1. Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. -_Note, if this process takes longer than 20 minutes, please submit a support ticket here: https://harperdbhelp.zendesk.com/hc/en-us/requests/new._ +_Note, if this process takes longer than 20 minutes, please submit a support ticket here: https:/harperdbhelp.zendesk.com/hc/en-us/requests/new._ ## Remove Instance @@ -86,7 +90,7 @@ The Harper instance can be deleted/removed from the Studio with the following in - The Studio will present you with a warning. - Click **Remove**. -2. The instance will begin deleting immediately. +1. The instance will begin deleting immediately. ## Restart Instance @@ -97,8 +101,8 @@ The Harper Cloud instance can be restarted with the following instructions. - The Studio will present you with a warning. - Click **Restart**. -2. The instance will begin restarting immediately. +1. The instance will begin restarting immediately. ## Instance Config (Read Only) -A JSON preview of the instance config is available for reference at the bottom of the page. This is a read only visual and is not editable via the Studio. To make changes to the instance config, review the [configuration file documentation](../../deployments/configuration.md#using-the-configuration-file-and-naming-conventions). +A JSON preview of the instance config is available for reference at the bottom of the page. This is a read only visual and is not editable via the Studio. To make changes to the instance config, review the [configuration file documentation](../../deployments/configuration#using-the-configuration-file-and-naming-conventions). diff --git a/docs/administration/harper-studio/instance-metrics.md b/docs/administration/harper-studio/instance-metrics.md index 39cde942..eae954f1 100644 --- a/docs/administration/harper-studio/instance-metrics.md +++ b/docs/administration/harper-studio/instance-metrics.md @@ -1,12 +1,16 @@ +--- +title: Instance Metrics +--- + # Instance Metrics The Harper Studio display instance status and metrics on the instance status page, which can be accessed with the following instructions: -1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. -2. Click the appropriate organization that the instance belongs to. -3. Select your desired instance. -4. Click **status** in the instance control bar. +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **status** in the instance control bar. -Once on the instance browse page you can view host system information, [Harper logs](../logging/logging.md), and [Harper Cloud alarms](../../deployments/harper-cloud/alarms.md) (if it is a cloud instance). +Once on the instance browse page you can view host system information, [Harper logs](../logging/standard-logging), and Harper Cloud alarms (if it is a cloud instance). _Note, the **status** page will only be available to super users._ diff --git a/docs/administration/harper-studio/instances.md b/docs/administration/harper-studio/instances.md index 6550e718..f6adc86a 100644 --- a/docs/administration/harper-studio/instances.md +++ b/docs/administration/harper-studio/instances.md @@ -1,99 +1,103 @@ +--- +title: Instances +--- + # Instances The Harper Studio allows you to administer all of your HarperDinstances in one place. Harper currently offers the following instance types: - **Harper Cloud Instance** Managed installations of Harper, what we call [Harper Cloud](../../deployments/harper-cloud/). -- **5G Wavelength Instance** Managed installations of Harper running on the Verizon network through AWS Wavelength, what we call [5G Wavelength Instances](../../deployments/harper-cloud/verizon-5g-wavelength-instances.md). _Note, these instances are only accessible via the Verizon network._ +- **5G Wavelength Instance** Managed installations of Harper running on the Verizon network through AWS Wavelength, what we call 5G Wavelength Instances. _Note, these instances are only accessible via the Verizon network._ - **Enterprise Instance** Any Harper installation that is managed by you. These include instances hosted within your cloud provider accounts (for example, from the AWS or Digital Ocean Marketplaces), privately hosted instances, or instances installed locally. All interactions between the Studio and your instances take place directly from your browser. Harper stores metadata about your instances, which enables the Studio to display these instances when you log in. Beyond that, all traffic is routed from your browser to the Harper instances using the standard [Harper API](../../developers/operations-api/). ## Organization Instance List -A summary view of all instances within an organization can be viewed by clicking on the appropriate organization from the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. Each instance gets their own card. Harper Cloud and Enterprise instances are listed together. +A summary view of all instances within an organization can be viewed by clicking on the appropriate organization from the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. Each instance gets their own card. Harper Cloud and Enterprise instances are listed together. ## Create a New Instance -1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. -2. Click the appropriate organization for the instance to be created under. -3. Click the **Create New Harper Cloud Instance + Register Enterprise Instance** card. -4. Select your desired Instance Type. -5. For a Harper Cloud Instance or a Harper 5G Wavelength Instance, click **Create Harper Cloud Instance**. +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization for the instance to be created under. +1. Click the **Create New Harper Cloud Instance + Register Enterprise Instance** card. +1. Select your desired Instance Type. +1. For a Harper Cloud Instance or a Harper 5G Wavelength Instance, click **Create Harper Cloud Instance**. 1. Fill out Instance Info. 1. Enter Instance Name - _This will be used to build your instance URL. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https://c1-demo.harperdbcloud.com. The Instance URL will be previewed below._ + _This will be used to build your instance URL. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com. The Instance URL will be previewed below._ - 2. Enter Instance Username + 1. Enter Instance Username _This is the username of the initial Harper instance super user._ - 3. Enter Instance Password + 1. Enter Instance Password _This is the password of the initial Harper instance super user._ - 2. Click **Instance Details** to move to the next page. - 3. Select Instance Specs + 1. Click **Instance Details** to move to the next page. + 1. Select Instance Specs 1. Select Instance RAM - _Harper Cloud Instances are billed based on Instance RAM, this will select the size of your provisioned instance._ [_More on instance specs_](../../deployments/harper-cloud/instance-size-hardware-specs.md)_._ + _Harper Cloud Instances are billed based on Instance RAM, this will select the size of your provisioned instance._ _More on instance specs__._ - 2. Select Storage Size + 1. Select Storage Size - _Each instance has a mounted storage volume where your Harper data will reside. Storage is provisioned based on space and IOPS._ [_More on IOPS Impact on Performance_](../../deployments/harper-cloud/iops-impact.md)_._ + _Each instance has a mounted storage volume where your Harper data will reside. Storage is provisioned based on space and IOPS._ _More on IOPS Impact on Performance__._ - 3. Select Instance Region + 1. Select Instance Region _The geographic area where your instance will be provisioned._ - 4. Click **Confirm Instance Details** to move to the next page. - 5. Review your Instance Details, if there is an error, use the back button to correct it. - 6. Review the [Privacy Policy](https://harperdb.io/legal/privacy-policy/) and [Terms of Service](https://harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. - 7. Click **Add Instance**. - 8. Your Harper Cloud instance will be provisioned in the background. Provisioning typically takes 5-15 minutes. You will receive an email notification when your instance is ready. + 1. Click **Confirm Instance Details** to move to the next page. + 1. Review your Instance Details, if there is an error, use the back button to correct it. + 1. Review the [Privacy Policy](https:/harperdb.io/legal/privacy-policy/) and [Terms of Service](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. + 1. Click **Add Instance**. + 1. Your Harper Cloud instance will be provisioned in the background. Provisioning typically takes 5-15 minutes. You will receive an email notification when your instance is ready. ## Register Enterprise Instance -1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. -2. Click the appropriate organization for the instance to be created under. -3. Click the **Create New Harper Cloud Instance + Register Enterprise Instance** card. -4. Select **Register Enterprise Instance**. +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization for the instance to be created under. +1. Click the **Create New Harper Cloud Instance + Register Enterprise Instance** card. +1. Select **Register Enterprise Instance**. 1. Fill out Instance Info. 1. Enter Instance Name _This is used for descriptive purposes only._ - 2. Enter Instance Username + 1. Enter Instance Username _The username of a Harper super user that is already configured in your Harper installation._ - 3. Enter Instance Password + 1. Enter Instance Password _The password of a Harper super user that is already configured in your Harper installation._ - 4. Enter Host + 1. Enter Host _The host to access the Harper instance. For example, `harperdb.myhost.com` or `localhost`._ - 5. Enter Port + 1. Enter Port _The port to access the Harper instance. Harper defaults `9925` for HTTP and `31283` for HTTPS._ - 6. Select SSL + 1. Select SSL _If your instance is running over SSL, select the SSL checkbox. If not, you will need to enable mixed content in your browser to allow the HTTPS Studio to access the HTTP instance. If there are issues connecting to the instance, the Studio will display a red error message._ - 2. Click **Instance Details** to move to the next page. - 3. Select Instance Specs + 1. Click **Instance Details** to move to the next page. + 1. Select Instance Specs 1. Select Instance RAM _Harper instances are billed based on Instance RAM. Selecting additional RAM will enable the ability for faster and more complex queries._ - 4. Click **Confirm Instance Details** to move to the next page. - 5. Review your Instance Details, if there is an error, use the back button to correct it. - 6. Review the [Privacy Policy](https://harperdb.io/legal/privacy-policy/) and [Terms of Service](https://harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. - 7. Click **Add Instance**. - 8. The Harper Studio will register your instance and restart it for the registration to take effect. Your instance will be immediately available after this is complete. + 1. Click **Confirm Instance Details** to move to the next page. + 1. Review your Instance Details, if there is an error, use the back button to correct it. + 1. Review the [Privacy Policy](https:/harperdb.io/legal/privacy-policy/) and [Terms of Service](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. + 1. Click **Add Instance**. + 1. The Harper Studio will register your instance and restart it for the registration to take effect. Your instance will be immediately available after this is complete. ## Delete an Instance @@ -104,39 +108,39 @@ Instance deletion has two different behaviors depending on the instance type. An instance can be deleted as follows: -1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. -2. Click the appropriate organization that the instance belongs to. -3. Identify the proper instance card and click the trash can icon. -4. Enter the instance name into the text box. +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card and click the trash can icon. +1. Enter the instance name into the text box. _This is done for confirmation purposes to ensure you do not accidentally delete an instance._ -5. Click the **Do It** button. +1. Click the **Do It** button. ## Upgrade an Instance -Harper instances can be resized on the [Instance Configuration](instance-configuration.md) page. +Harper instances can be resized on the [Instance Configuration](instance-configuration) page. ## Instance Log In/Log Out The Studio enables users to log in and out of different database users from the instance control panel. To log out of an instance: -1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. -2. Click the appropriate organization that the instance belongs to. -3. Identify the proper instance card and click the lock icon. -4. You will immediately be logged out of the instance. +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card and click the lock icon. +1. You will immediately be logged out of the instance. To log in to an instance: -1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. -2. Click the appropriate organization that the instance belongs to. -3. Identify the proper instance card, it will have an unlocked icon and a status reading PLEASE LOG IN, and click the center of the card. -4. Enter the database username. +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card, it will have an unlocked icon and a status reading PLEASE LOG IN, and click the center of the card. +1. Enter the database username. _The username of a Harper user that is already configured in your Harper instance._ -5. Enter the database password. +1. Enter the database password. _The password of a Harper user that is already configured in your Harper instance._ -6. Click **Log In**. +1. Click **Log In**. diff --git a/docs/administration/harper-studio/login-password-reset.md b/docs/administration/harper-studio/login-password-reset.md index 01dfa8fa..96f40020 100644 --- a/docs/administration/harper-studio/login-password-reset.md +++ b/docs/administration/harper-studio/login-password-reset.md @@ -1,38 +1,42 @@ +--- +title: Login and Password Reset +--- + # Login and Password Reset ## Log In to Your Harper Studio Account To log into your existing Harper Studio account: -1. Navigate to the [Harper Studio](https://studio.harperdb.io/). -2. Enter your email address. -3. Enter your password. -4. Click **sign in**. +1. Navigate to the [Harper Studio](https:/studio.harperdb.io/). +1. Enter your email address. +1. Enter your password. +1. Click **sign in**. ## Reset a Forgotten Password To reset a forgotten password: 1. Navigate to the Harper Studio password reset page. -2. Enter your email address. -3. Click **send password reset email**. -4. If the account exists, you will receive an email with a temporary password. -5. Navigate back to the Harper Studio login page. -6. Enter your email address. -7. Enter your temporary password. -8. Click **sign in**. -9. You will be taken to a new screen to reset your account password. Enter your new password. +1. Enter your email address. +1. Click **send password reset email**. +1. If the account exists, you will receive an email with a temporary password. +1. Navigate back to the Harper Studio login page. +1. Enter your email address. +1. Enter your temporary password. +1. Click **sign in**. +1. You will be taken to a new screen to reset your account password. Enter your new password. _Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character._ -10. Click the **add account password** button. +1. Click the **add account password** button. ## Change Your Password If you are already logged into the Studio, you can change your password though the user interface. 1. Navigate to the Harper Studio profile page. -2. In the **password** section, enter: +1. In the **password** section, enter: - Current password. - New password. - New password again _(for verification)_. -3. Click the **Update Password** button. +1. Click the **Update Password** button. diff --git a/docs/administration/harper-studio/manage-applications.md b/docs/administration/harper-studio/manage-applications.md index a732aa88..253f2ba5 100644 --- a/docs/administration/harper-studio/manage-applications.md +++ b/docs/administration/harper-studio/manage-applications.md @@ -1,3 +1,7 @@ +--- +title: Manage Applications +--- + # Manage Applications [Harper Applications](../../developers/applications/) are enabled by default and can be configured further through the Harper Studio. It is recommended to read through the [Applications](../../developers/applications/) documentation first to gain a strong understanding of Harper Applications behavior. @@ -5,9 +9,9 @@ All Applications configuration and development is handled through the **applications** page of the Harper Studio, accessed with the following instructions: 1. Navigate to the Harper Studio Organizations page. -2. Click the appropriate organization that the instance belongs to. -3. Select your desired instance. -4. Click **applications** in the instance control bar. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **applications** in the instance control bar. _Note, the **applications** page will only be available to super users._ @@ -23,33 +27,33 @@ The right side of the screen is the file editor. Here you can make edit individu To learn more about developing Harper Applications, make sure to read through the [Applications](../../developers/applications/) documentation. -When working with Applications in the Harper Studio, by default the editor will restart the Harper Applications server every time a file is saved. Note, this behavior can be turned off by toggling the `auto` toggle at the top right of the applications page. If you are constantly editing your application, it may result in errors causing the application not to run. These errors will not be visible on the application page, however they will be available in the Harper logs, which can be found on the [status page](instance-metrics.md). +When working with Applications in the Harper Studio, by default the editor will restart the Harper Applications server every time a file is saved. Note, this behavior can be turned off by toggling the `auto` toggle at the top right of the applications page. If you are constantly editing your application, it may result in errors causing the application not to run. These errors will not be visible on the application page, however they will be available in the Harper logs, which can be found on the [status page](instance-metrics). The Applications editor stores unsaved changes in cache. This means that occasionally your editor will show a discrepancy from the code that is stored and running on your Harper instance. You can identify if the code in your Studio differs if the "save" and "revert" buttons are active. To revert the cached version in your editor to the version of the file stored on your Harper instance click the "revert" button. ## Accessing Your Application Endpoints -Accessing your application endpoints varies with which type of endpoint you're creating. All endpoints, regardless of type, will be accessed via the [Harper HTTP port found in the Harper configuration file](../../deployments/configuration.md#http). The default port is `9926`, but you can verify what your instances is set to by navigating to the [instance config page](instance-configuration.md) and examining the read only JSON version of your instance's config file looking specifically for either the `http: port: 9926` or `http: securePort: 9926` configs. If `port` is set, you will access your endpoints via `http` and if `securePort` is set, you will access your endpoints via `https`. +Accessing your application endpoints varies with which type of endpoint you're creating. All endpoints, regardless of type, will be accessed via the [Harper HTTP port found in the Harper configuration file](../../deployments/configuration#http). The default port is `9926`, but you can verify what your instances is set to by navigating to the [instance config page](instance-configuration) and examining the read only JSON version of your instance's config file looking specifically for either the `http: port: 9926` or `http: securePort: 9926` configs. If `port` is set, you will access your endpoints via `http` and if `securePort` is set, you will access your endpoints via `https`. -Below is a breakdown of how to access each type of endpoint. In these examples, we will use a locally hosted instance with `securePort` set to `9926`: `https://localhost:9926`. +Below is a breakdown of how to access each type of endpoint. In these examples, we will use a locally hosted instance with `securePort` set to `9926`: `https:/localhost:9926`. - **Standard REST Endpoints**\ - Standard REST endpoints are defined via the `@export` directive to tables in your schema definition. You can read more about these in the [Adding an Endpoint section of the Applications documentation](../../developers/applications/#adding-an-endpoint). Here, if we are looking to access a record with ID `1` from table `Dog` on our instance, [per the REST documentation](../../developers/rest.md), we could send a `GET` (or since this is a GET, we could post the URL in our browser) to `https://localhost:9926/Dog/1`. + Standard REST endpoints are defined via the `@export` directive to tables in your schema definition. You can read more about these in the [Adding an Endpoint section of the Applications documentation](../../developers/applications/#adding-an-endpoint). Here, if we are looking to access a record with ID `1` from table `Dog` on our instance, [per the REST documentation](../../developers/rest), we could send a `GET` (or since this is a GET, we could post the URL in our browser) to `https:/localhost:9926/Dog/1`. - **Augmented REST Endpoints**\ - Harper Applications enable you to write [Custom Functionality with JavaScript](../../developers/applications/#custom-functionality-with-javascript) for your resources. Accessing these endpoints is identical to accessing the standard REST endpoints above, though you may have defined custom behavior in each function. Taking the example from the [Applications documentation](../../developers/applications/#custom-functionality-with-javascript), if we are looking to access the `DogWithHumanAge` example, we could send the GET to `https://localhost:9926/DogWithHumanAge/1`. + Harper Applications enable you to write [Custom Functionality with JavaScript](../../developers/applications/#custom-functionality-with-javascript) for your resources. Accessing these endpoints is identical to accessing the standard REST endpoints above, though you may have defined custom behavior in each function. Taking the example from the [Applications documentation](../../developers/applications/#custom-functionality-with-javascript), if we are looking to access the `DogWithHumanAge` example, we could send the GET to `https:/localhost:9926/DogWithHumanAge/1`. - **Fastify Routes**\ - If you need more functionality than the REST applications can provide, you can define your own custom endpoints using [Fastify Routes](../../developers/applications/#define-fastify-routes). The paths to these routes are defined via the application `config.yaml` file. You can read more about how you can customize the configuration options in the [Define Fastify Routes documentation](../../developers/applications/define-routes.md). By default, routes are accessed via the following pattern: `[Instance URL]:[HTTP Port]/[Project Name]/[Route URL]`. Using the example from the [Harper Application Template](https://github.com/HarperDB/application-template/), where we've named our project `application-template`, we would access the `getAll` route at `https://localhost/application-template/getAll`. + If you need more functionality than the REST applications can provide, you can define your own custom endpoints using [Fastify Routes](../../developers/applications/#define-fastify-routes). The paths to these routes are defined via the application `config.yaml` file. You can read more about how you can customize the configuration options in the [Define Fastify Routes documentation](../../developers/applications/define-routes). By default, routes are accessed via the following pattern: `[Instance URL]:[HTTP Port]/[Project Name]/[Route URL]`. Using the example from the [Harper Application Template](https:/github.com/HarperDB/application-template/), where we've named our project `application-template`, we would access the `getAll` route at `https:/localhost/application-template/getAll`. ## Creating a New Application 1. From the application page, click the "+ app" button at the top right. -2. Click "+ Create A New Application Using The Default Template". -3. Enter a name for your project, note project names must contain only alphanumeric characters, dashes and underscores. -4. Click OK. -5. Your project will be available in the applications file navigator on the left. Click a file to select a file to edit. +1. Click "+ Create A New Application Using The Default Template". +1. Enter a name for your project, note project names must contain only alphanumeric characters, dashes and underscores. +1. Click OK. +1. Your project will be available in the applications file navigator on the left. Click a file to select a file to edit. ## Editing an Application 1. From the applications page, click the file you would like to edit from the file navigator on the left. -2. Edit the file with any changes you'd like. -3. Click "save" at the top right. Note, as mentioned above, when you save a file, the Harper Applications server will be restarted immediately. +1. Edit the file with any changes you'd like. +1. Click "save" at the top right. Note, as mentioned above, when you save a file, the Harper Applications server will be restarted immediately. diff --git a/docs/administration/harper-studio/manage-databases-browse-data.md b/docs/administration/harper-studio/manage-databases-browse-data.md index 08641097..33482198 100644 --- a/docs/administration/harper-studio/manage-databases-browse-data.md +++ b/docs/administration/harper-studio/manage-databases-browse-data.md @@ -1,11 +1,15 @@ +--- +title: Manage Databases / Browse Data +--- + # Manage Databases / Browse Data Manage instance databases/tables and browse data in tabular format with the following instructions: 1. Navigate to the Harper Studio Organizations page. -2. Click the appropriate organization that the instance belongs to. -3. Select your desired instance. -4. Click **browse** in the instance control bar. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **browse** in the instance control bar. Once on the instance browse page you can view data, manage databases and tables, add new data, and more. @@ -14,36 +18,36 @@ Once on the instance browse page you can view data, manage databases and tables, #### Create a Database 1. Click the plus icon at the top right of the databases section. -2. Enter the database name. -3. Click the green check mark. +1. Enter the database name. +1. Click the green check mark. #### Delete a Database Deleting a database is permanent and irreversible. Deleting a database removes all tables and data within it. 1. Click the minus icon at the top right of the databases section. -2. Identify the appropriate database to delete and click the red minus sign in the same row. -3. Click the red check mark to confirm deletion. +1. Identify the appropriate database to delete and click the red minus sign in the same row. +1. Click the red check mark to confirm deletion. #### Create a Table 1. Select the desired database from the databases section. -2. Click the plus icon at the top right of the tables section. -3. Enter the table name. -4. Enter the primary key. +1. Click the plus icon at the top right of the tables section. +1. Enter the table name. +1. Enter the primary key. _The primary key is also often referred to as the hash attribute in the studio, and it defines the unique identifier for each row in your table._ -5. Click the green check mark. +1. Click the green check mark. #### Delete a Table Deleting a table is permanent and irreversible. Deleting a table removes all data within it. 1. Select the desired database from the databases section. -2. Click the minus icon at the top right of the tables section. -3. Identify the appropriate table to delete and click the red minus sign in the same row. -4. Click the red check mark to confirm deletion. +1. Click the minus icon at the top right of the tables section. +1. Identify the appropriate table to delete and click the red minus sign in the same row. +1. Click the red check mark to confirm deletion. ## Manage Table Data @@ -52,51 +56,51 @@ The following section assumes you have selected the appropriate table from the d #### Filter Table Data 1. Click the magnifying glass icon at the top right of the table browser. -2. This expands the search filters. -3. The results will be filtered appropriately. +1. This expands the search filters. +1. The results will be filtered appropriately. #### Load CSV Data 1. Click the data icon at the top right of the table browser. You will be directed to the CSV upload page where you can choose to import a CSV by URL or upload a CSV file. -2. To import a CSV by URL: +1. To import a CSV by URL: 1. Enter the URL in the **CSV file URL** textbox. - 2. Click **Import From URL**. - 3. The CSV will load, and you will be redirected back to browse table data. -3. To upload a CSV file: + 1. Click **Import From URL**. + 1. The CSV will load, and you will be redirected back to browse table data. +1. To upload a CSV file: 1. Click **Click or Drag to select a .csv file** (or drag your CSV file from your file browser). - 2. Navigate to your desired CSV file and select it. - 3. Click **Insert X Records**, where X is the number of records in your CSV. - 4. The CSV will load, and you will be redirected back to browse table data. + 1. Navigate to your desired CSV file and select it. + 1. Click **Insert X Records**, where X is the number of records in your CSV. + 1. The CSV will load, and you will be redirected back to browse table data. #### Add a Record 1. Click the plus icon at the top right of the table browser. -2. The Studio will pre-populate existing table attributes in JSON format. +1. The Studio will pre-populate existing table attributes in JSON format. _The primary key is not included, but you can add it in and set it to your desired value. Auto-maintained fields are not included and cannot be manually set. You may enter a JSON array to insert multiple records in a single transaction._ -3. Enter values to be added to the record. +1. Enter values to be added to the record. _You may add new attributes to the JSON; they will be reflexively added to the table._ -4. Click the **Add New** button. +1. Click the **Add New** button. #### Edit a Record 1. Click the record/row you would like to edit. -2. Modify the desired values. +1. Modify the desired values. _You may add new attributes to the JSON; they will be reflexively added to the table._ -3. Click the **save icon**. +1. Click the **save icon**. #### Delete a Record Deleting a record is permanent and irreversible. If transaction logging is turned on, the delete transaction will be recorded as well as the data that was deleted. 1. Click the record/row you would like to delete. -2. Click the **delete icon**. -3. Confirm deletion by clicking the **check icon**. +1. Click the **delete icon**. +1. Confirm deletion by clicking the **check icon**. ## Browse Table Data diff --git a/docs/administration/harper-studio/manage-instance-roles.md b/docs/administration/harper-studio/manage-instance-roles.md index 552d5eb5..3662013c 100644 --- a/docs/administration/harper-studio/manage-instance-roles.md +++ b/docs/administration/harper-studio/manage-instance-roles.md @@ -1,16 +1,20 @@ +--- +title: Manage Instance Roles +--- + # Manage Instance Roles -Harper users and roles can be managed directly through the Harper Studio. It is recommended to read through the [users & roles documentation](../../developers/security/users-and-roles.md) to gain a strong understanding of how they operate. +Harper users and roles can be managed directly through the Harper Studio. It is recommended to read through the [users & roles documentation](../../developers/security/users-and-roles) to gain a strong understanding of how they operate. Instance role configuration is handled through the **roles** page of the Harper Studio, accessed with the following instructions: 1. Navigate to the Harper Studio Organizations page. -2. Click the appropriate organization that the instance belongs to. +1. Click the appropriate organization that the instance belongs to. -3. Select your desired instance. +1. Select your desired instance. -4. Click **roles** in the instance control bar. +1. Click **roles** in the instance control bar. _Note, the **roles** page will only be available to super users._ @@ -40,27 +44,27 @@ _Note, when new tables are added that are not configured, the Studio will genera 1. Click the plus icon at the top right of the appropriate role section. -2. Enter the role name. +1. Enter the role name. -3. Click the green check mark. +1. Click the green check mark. -4. Optionally toggle the **manage databases/tables** switch to specify the `structure_user` config. +1. Optionally toggle the **manage databases/tables** switch to specify the `structure_user` config. -5. Configure the role permissions in the role permission editing panel. +1. Configure the role permissions in the role permission editing panel. _Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel._ -6. Click **Update Role Permissions**. +1. Click **Update Role Permissions**. #### Modify a Role 1. Click the appropriate role from the appropriate role section. -2. Modify the role permissions in the role permission editing panel. +1. Modify the role permissions in the role permission editing panel. _Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel._ -3. Click **Update Role Permissions**. +1. Click **Update Role Permissions**. #### Delete a Role @@ -68,6 +72,6 @@ Deleting a role is permanent and irreversible. A role cannot be remove if users 1. Click the minus icon at the top right of the roles section. -2. Identify the appropriate role to delete and click the red minus sign in the same row. +1. Identify the appropriate role to delete and click the red minus sign in the same row. -3. Click the red check mark to confirm deletion. +1. Click the red check mark to confirm deletion. diff --git a/docs/administration/harper-studio/manage-instance-users.md b/docs/administration/harper-studio/manage-instance-users.md index 0b2ab43b..c52f005d 100644 --- a/docs/administration/harper-studio/manage-instance-users.md +++ b/docs/administration/harper-studio/manage-instance-users.md @@ -1,16 +1,20 @@ +--- +title: Manage Instance Users +--- + # Manage Instance Users -Harper users and roles can be managed directly through the Harper Studio. It is recommended to read through the [users & roles documentation](../../developers/security/users-and-roles.md) to gain a strong understanding of how they operate. +Harper users and roles can be managed directly through the Harper Studio. It is recommended to read through the [users & roles documentation](../../developers/security/users-and-roles) to gain a strong understanding of how they operate. Instance user configuration is handled through the **users** page of the Harper Studio, accessed with the following instructions: -1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. -2. Click the appropriate organization that the instance belongs to. +1. Click the appropriate organization that the instance belongs to. -3. Select your desired instance. +1. Select your desired instance. -4. Click **users** in the instance control bar. +1. Click **users** in the instance control bar. _Note, the **users** page will only be available to super users._ @@ -23,9 +27,9 @@ Harper instance users can be added with the following instructions. - New user password. - Select a role. - _Learn more about role management here: [Manage Instance Roles](manage-instance-roles.md)._ + _Learn more about role management here: [Manage Instance Roles](manage-instance-roles)._ -2. Click **Add User**. +1. Click **Add User**. ## Edit a User @@ -33,17 +37,17 @@ Harper instance users can be modified with the following instructions. 1. In the **existing users** panel, click the row of the user you would like to edit. -2. To change a user’s password: +1. To change a user’s password: 1. In the **Change user password** section, enter the new password. - 2. Click **Update Password**. + 1. Click **Update Password**. -3. To change a user’s role: +1. To change a user’s role: 1. In the **Change user role** section, select the new role. - 2. Click **Update Role**. + 1. Click **Update Role**. -4. To delete a user: +1. To delete a user: 1. In the **Delete User** section, type the username into the textbox. _This is done for confirmation purposes._ - 2. Click **Delete User**. + 1. Click **Delete User**. diff --git a/docs/administration/harper-studio/manage-replication.md b/docs/administration/harper-studio/manage-replication.md index c99cded6..8987d71c 100644 --- a/docs/administration/harper-studio/manage-replication.md +++ b/docs/administration/harper-studio/manage-replication.md @@ -1,16 +1,20 @@ +--- +title: Manage Replication +--- + # Manage Replication -Harper instance clustering and replication can be configured directly through the Harper Studio. It is recommended to read through the [clustering documentation](../../developers/clustering/README.md) first to gain a strong understanding of Harper clustering behavior. +Harper instance clustering and replication can be configured directly through the Harper Studio. It is recommended to read through the [clustering documentation](../../developers/clustering/) first to gain a strong understanding of Harper clustering behavior. All clustering configuration is handled through the **replication** page of the Harper Studio, accessed with the following instructions: -1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. -2. Click the appropriate organization that the instance belongs to. +1. Click the appropriate organization that the instance belongs to. -3. Select your desired instance. +1. Select your desired instance. -4. Click **replication** in the instance control bar. +1. Click **replication** in the instance control bar. Note, the **replication** page will only be available to super users. @@ -18,12 +22,12 @@ Note, the **replication** page will only be available to super users. ## Initial Configuration -Harper instances do not have clustering configured by default. The Harper Studio will walk you through the initial configuration. Upon entering the **replication** screen for the first time you will need to complete the following configuration. Configurations are set in the **enable clustering** panel on the left while actions are described in the middle of the screen. It is worth reviewing the [Creating a Cluster User](../../developers/clustering/creating-a-cluster-user.md) document before proceeding. +Harper instances do not have clustering configured by default. The Harper Studio will walk you through the initial configuration. Upon entering the **replication** screen for the first time you will need to complete the following configuration. Configurations are set in the **enable clustering** panel on the left while actions are described in the middle of the screen. It is worth reviewing the [Creating a Cluster User](../../developers/clustering/creating-a-cluster-user) document before proceeding. 1. Enter Cluster User username. (Defaults to `cluster_user`). -2. Enter Cluster Password. -3. Review and/or Set Cluster Node Name. -4. Click **Enable Clustering**. +1. Enter Cluster Password. +1. Review and/or Set Cluster Node Name. +1. Click **Enable Clustering**. At this point the Studio will restart your Harper Instance, required for the configuration changes to take effect. @@ -57,11 +61,11 @@ Harper Instances can be clustered together with the following instructions. 1. Ensure clustering has been configured on both instances and a cluster user with identical credentials exists on both. -2. Identify the instance you would like to connect from the **unconnected instances** panel. +1. Identify the instance you would like to connect from the **unconnected instances** panel. -3. Click the plus icon next the appropriate instance. +1. Click the plus icon next the appropriate instance. -4. If configurations are correct, all databases will sync across the cluster, then appear in the **manage clustering** panel. If there is a configuration issue, a red exclamation icon will appear, click it to learn more about what could be causing the issue. +1. If configurations are correct, all databases will sync across the cluster, then appear in the **manage clustering** panel. If there is a configuration issue, a red exclamation icon will appear, click it to learn more about what could be causing the issue. --- @@ -71,7 +75,7 @@ Harper Instances can be disconnected with the following instructions. 1. Identify the instance you would like to disconnect from the **connected instances** panel. -2. Click the minus icon next the appropriate instance. +1. Click the minus icon next the appropriate instance. --- @@ -81,6 +85,6 @@ Subscriptions must be configured in order to move data between connected instanc 1. Identify the instance, database, and table for replication to be configured. -2. For publish, click the toggle switch in the **publish** column. +1. For publish, click the toggle switch in the **publish** column. -3. For subscribe, click the toggle switch in the **subscribe** column. +1. For subscribe, click the toggle switch in the **subscribe** column. diff --git a/docs/administration/harper-studio/organizations.md b/docs/administration/harper-studio/organizations.md index c273090b..e36b6fa6 100644 --- a/docs/administration/harper-studio/organizations.md +++ b/docs/administration/harper-studio/organizations.md @@ -1,3 +1,7 @@ +--- +title: Organizations +--- + # Organizations Harper Studio organizations provide the ability to group Harper Cloud Instances. Organization behavior is as follows: @@ -13,32 +17,32 @@ An organization is automatically created for you when you sign up for Harper Stu ## List Organizations -A summary view of all organizations your user belongs to can be viewed on the [Harper Studio Organizations](https://studio.harperdb.io/?redirect=/organizations) page. You can navigate to this page at any time by clicking the **all organizations** link at the top of the Harper Studio. +A summary view of all organizations your user belongs to can be viewed on the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. You can navigate to this page at any time by clicking the **all organizations** link at the top of the Harper Studio. ## Create a New Organization A new organization can be created as follows: -1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/?redirect=/organizations) page. -2. Click the **Create a New Organization** card. -3. Fill out new organization details +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +1. Click the **Create a New Organization** card. +1. Fill out new organization details - Enter Organization Name _This is used for descriptive purposes only._ - Enter Organization Subdomain - _Part of the URL that will be used to identify your Harper Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https://c1-demo.harperdbcloud.com._ -4. Click Create Organization. + _Part of the URL that will be used to identify your Harper Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com._ +1. Click Create Organization. ## Delete an Organization An organization cannot be deleted until all instances have been removed. An organization can be deleted as follows: 1. Navigate to the Harper Studio Organizations page. -2. Identify the proper organization card and click the trash can icon. -3. Enter the organization name into the text box. +1. Identify the proper organization card and click the trash can icon. +1. Enter the organization name into the text box. _This is done for confirmation purposes to ensure you do not accidentally delete an organization._ -4. Click the **Do It** button. +1. Click the **Do It** button. ## Manage Users @@ -48,11 +52,11 @@ Harper Studio organization owners can manage users including inviting new users, A new user can be invited to an organization as follows: -1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/?redirect=/organizations) page. -2. Click the appropriate organization card. -3. Click **users** at the top of the screen. -4. In the **add user** box, enter the new user’s email address. -5. Click **Add User**. +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +1. Click the appropriate organization card. +1. Click **users** at the top of the screen. +1. In the **add user** box, enter the new user’s email address. +1. Click **Add User**. Users may or may not already be Harper Studio users when adding them to an organization. If the Harper Studio account already exists, the user will receive an email notification alerting them to the organization invitation. If the user does not have a Harper Studio account, they will receive an email welcoming them to Harper Studio. @@ -63,10 +67,10 @@ Users may or may not already be Harper Studio users when adding them to an organ Organization owners have full access to the organization including the ability to manage organization users, create, modify, and delete instances, and delete the organization. Users must have accepted their invitation prior to being promoted to an owner. A user’s organization owner status can be toggled owner as follows: 1. Navigate to the Harper Studio Organizations page. -2. Click the appropriate organization card. -3. Click **users** at the top of the screen. -4. Click the appropriate user from the **existing users** section. -5. Toggle the **Is Owner** switch to the desired status. +1. Click the appropriate organization card. +1. Click **users** at the top of the screen. +1. Click the appropriate user from the **existing users** section. +1. Toggle the **Is Owner** switch to the desired status. --- @@ -74,23 +78,23 @@ Organization owners have full access to the organization including the ability t Users may be removed from an organization at any time. Removing a user from an organization will not delete their Harper Studio account, it will only remove their access to the specified organization. A user can be removed from an organization as follows: -1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/?redirect=/organizations) page. -2. Click the appropriate organization card. -3. Click **users** at the top of the screen. -4. Click the appropriate user from the **existing users** section. -5. Type **DELETE** in the text box in the **Delete User** row. +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +1. Click the appropriate organization card. +1. Click **users** at the top of the screen. +1. Click the appropriate user from the **existing users** section. +1. Type **DELETE** in the text box in the **Delete User** row. _This is done for confirmation purposes to ensure you do not accidentally delete a user._ -6. Click **Delete User**. +1. Click **Delete User**. ## Manage Billing Billing is configured per organization and will be billed to the stored credit card at appropriate intervals (monthly or annually depending on the registered instance). Billing settings can be configured as follows: -1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/?redirect=/organizations) page. -2. Click the appropriate organization card. -3. Click **billing** at the top of the screen. +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +1. Click the appropriate organization card. +1. Click **billing** at the top of the screen. Here organization owners can view invoices, manage coupons, and manage the associated credit card. @@ -101,5 +105,5 @@ _Harper billing and payments are managed via Stripe._ Coupons are applicable towards any paid tier or enterprise instance and you can change your subscription at any time. Coupons can be added to your Organization as follows: 1. In the coupons panel of the **billing** page, enter your coupon code. -2. Click **Add Coupon**. -3. The coupon will then be available and displayed in the coupons panel. +1. Click **Add Coupon**. +1. The coupon will then be available and displayed in the coupons panel. diff --git a/docs/administration/harper-studio/query-instance-data.md b/docs/administration/harper-studio/query-instance-data.md index 0db8a346..3e36c7cf 100644 --- a/docs/administration/harper-studio/query-instance-data.md +++ b/docs/administration/harper-studio/query-instance-data.md @@ -1,13 +1,17 @@ +--- +title: Query Instance Data +--- + # Query Instance Data SQL queries can be executed directly through the Harper Studio with the following instructions: -1. Navigate to the [Harper Studio Organizations](https://studio.harperdb.io/organizations) page. -2. Click the appropriate organization that the instance belongs to. -3. Select your desired instance. -4. Click **query** in the instance control bar. -5. Enter your SQL query in the SQL query window. -6. Click **Execute**. +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **query** in the instance control bar. +1. Enter your SQL query in the SQL query window. +1. Click **Execute**. _Please note, the Studio will execute the query exactly as entered. For example, if you attempt to `SELECT _` from a table with millions of rows, you will most likely crash your browser.\* diff --git a/docs/administration/jobs.md b/docs/administration/jobs.md index a4fd2cbf..c487f424 100644 --- a/docs/administration/jobs.md +++ b/docs/administration/jobs.md @@ -1,3 +1,7 @@ +--- +title: Jobs +--- + # Jobs Harper Jobs are asynchronous tasks performed by the Operations API. @@ -12,19 +16,19 @@ The job status can be **COMPLETE** or **IN_PROGRESS**. Example job operations include: -[csv data load](../developers/operations-api/bulk-operations.md#csv-data-load) +[csv data load](../developers/operations-api/bulk-operations#csv-data-load) -[csv file load](../developers/operations-api/bulk-operations.md#csv-file-load) +[csv file load](../developers/operations-api/bulk-operations#csv-file-load) -[csv url load](../developers/operations-api/bulk-operations.md#csv-url-load) +[csv url load](../developers/operations-api/bulk-operations#csv-url-load) -[import from s3](../developers/operations-api/bulk-operations.md#import-from-s3) +[import from s3](../developers/operations-api/bulk-operations#import-from-s3) -[delete_records_before](../developers/operations-api/bulk-operations.md#delete-records-before) +[delete_records_before](../developers/operations-api/bulk-operations#delete-records-before) -[export_local](../developers/operations-api/bulk-operations.md#export-local) +[export_local](../developers/operations-api/bulk-operations#export-local) -[export_to_s3](../developers/operations-api/bulk-operations.md#export-to-s3) +[export_to_s3](../developers/operations-api/bulk-operations#export-to-s3) Example Response from a Job Operation @@ -38,7 +42,7 @@ Whenever one of these operations is initiated, an asynchronous job is created an ## Managing Jobs -To check on a job's status, use the [get_job](../developers/operations-api/jobs.md#get-job) operation. +To check on a job's status, use the [get_job](../developers/operations-api/jobs#get-job) operation. Get Job Request @@ -73,7 +77,7 @@ Get Job Response ## Finding Jobs -To find jobs (if the ID is not known) use the [search_jobs_by_start_date](../developers/operations-api/jobs.md#search-jobs-by-start-date) operation. +To find jobs (if the ID is not known) use the [search_jobs_by_start_date](../developers/operations-api/jobs#search-jobs-by-start-date) operation. Search Jobs Request diff --git a/docs/administration/logging/README.md b/docs/administration/logging/README.md deleted file mode 100644 index 83226b1d..00000000 --- a/docs/administration/logging/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Logging - -Harper provides many different logging options for various features and functionality. - -- [Standard Logging](logging.md): Harper maintains a log of events that take place throughout operation. -- [Audit Logging](audit-logging.md): Harper uses a standard Harper table to track transactions. For each table a user creates, a corresponding table will be created to track transactions against that table. -- [Transaction Logging](transaction-logging.md): Harper stores a verbose history of all transactions logged for specified database tables, including original data records. diff --git a/docs/administration/logging/audit-logging.md b/docs/administration/logging/audit-logging.md index 658a31b9..209b4981 100644 --- a/docs/administration/logging/audit-logging.md +++ b/docs/administration/logging/audit-logging.md @@ -1,3 +1,7 @@ +--- +title: Audit Logging +--- + # Audit Logging ### Audit log @@ -10,7 +14,7 @@ Audit log is enabled by default. To disable the audit log, set `logging.auditLog #### read_audit_log -The `read_audit_log` operation is flexible, enabling users to query with many parameters. All operations search on a single table. Filter options include timestamps, usernames, and table hash values. Additional examples found in the [Harper API documentation](../../developers/operations-api/logs.md). +The `read_audit_log` operation is flexible, enabling users to query with many parameters. All operations search on a single table. Filter options include timestamps, usernames, and table hash values. Additional examples found in the [Harper API documentation](../../developers/operations-api/logs). **Search by Timestamp** diff --git a/docs/administration/logging/index.md b/docs/administration/logging/index.md new file mode 100644 index 00000000..d81017f2 --- /dev/null +++ b/docs/administration/logging/index.md @@ -0,0 +1,11 @@ +--- +title: Logging +--- + +# Logging + +Harper provides many different logging options for various features and functionality. + +- [Standard Logging](standard-logging): Harper maintains a log of events that take place throughout operation. +- [Audit Logging](audit-logging): Harper uses a standard Harper table to track transactions. For each table a user creates, a corresponding table will be created to track transactions against that table. +- [Transaction Logging](transaction-logging): Harper stores a verbose history of all transactions logged for specified database tables, including original data records. diff --git a/docs/administration/logging/logging.md b/docs/administration/logging/logging.md deleted file mode 100644 index 2338b761..00000000 --- a/docs/administration/logging/logging.md +++ /dev/null @@ -1,61 +0,0 @@ -# Standard Logging - -Harper maintains a log of events that take place throughout operation. Log messages can be used for diagnostics purposes as well as monitoring. - -All logs (except for the install log) are stored in the main log file in the hdb directory `/log/hdb.log`. The install log is located in the Harper application directory most likely located in your npm directory `npm/harperdb/logs`. - -Each log message has several key components for consistent reporting of events. A log message has a format of: - -``` - [] [] ...[]: -``` - -For example, a typical log entry looks like: - -``` -2023-03-09T14:25:05.269Z [notify] [main/0]: HarperDB successfully started. -``` - -The components of a log entry are: - -- timestamp - This is the date/time stamp when the event occurred -- level - This is an associated log level that gives a rough guide to the importance and urgency of the message. The available log levels in order of least urgent (and more verbose) are: `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. -- thread/ID - This reports the name of the thread and the thread ID that the event was reported on. Note that NATS logs are recorded by their process name and there is no thread id for them since they are a separate process. Key threads are: - - main - This is the thread that is responsible for managing all other threads and routes incoming requests to the other threads - - http - These are the worker threads that handle the primary workload of incoming HTTP requests to the operations API and custom functions. - - Clustering\* - These are threads and processes that handle replication. - - job - These are job threads that have been started to handle operations that are executed in a separate job thread. -- tags - Logging from a custom function will include a "custom-function" tag in the log entry. Most logs will not have any additional tags. -- message - This is the main message that was reported. - -We try to keep logging to a minimum by default, to do this the default log level is `error`. If you require more information from the logs, increasing the log level down will provide that. - -The log level can be changed by modifying `logging.level` in the config file `harperdb-config.yaml`. - -## Clustering Logging - -Harper clustering utilizes two [NATS](https://nats.io/) servers, named Hub and Leaf. The Hub server is responsible for establishing the mesh network that connects instances of Harper and the Leaf server is responsible for managing the message stores (streams) that replicate and store messages between instances. Due to the verbosity of these servers there is a separate log level configuration for them. To adjust their log verbosity, set `clustering.logLevel` in the config file `harperdb-config.yaml`. Valid log levels from least verbose are `error`, `warn`, `info`, `debug` and `trace`. - -## Log File vs Standard Streams - -Harper logs can optionally be streamed to standard streams. Logging to standard streams (stdout/stderr) is primarily used for container logging drivers. For more traditional installations, we recommend logging to a file. Logging to both standard streams and to a file can be enabled simultaneously. To log to standard streams effectively, make sure to directly run `harperdb` and don't start it as a separate process (don't use `harperdb start`) and `logging.stdStreams` must be set to true. Note, logging to standard streams only will disable clustering catchup. - -## Logging Rotation - -Log rotation allows for managing log files, such as compressing rotated log files, archiving old log files, determining when to rotate, and the like. This will allow for organized storage and efficient use of disk space. For more information see “logging” in our [config docs](../../deployments/configuration.md). - -## Read Logs via the API - -To access specific logs you may query the Harper API. Logs can be queried using the `read_log` operation. `read_log` returns outputs from the log based on the provided search criteria. - -```json -{ - "operation": "read_log", - "start": 0, - "limit": 1000, - "level": "error", - "from": "2021-01-25T22:05:27.464+0000", - "until": "2021-01-25T23:05:27.464+0000", - "order": "desc" -} -``` diff --git a/docs/administration/logging/standard-logging.md b/docs/administration/logging/standard-logging.md new file mode 100644 index 00000000..56711178 --- /dev/null +++ b/docs/administration/logging/standard-logging.md @@ -0,0 +1,65 @@ +--- +title: Standard Logging +--- + +# Standard Logging + +Harper maintains a log of events that take place throughout operation. Log messages can be used for diagnostics purposes as well as monitoring. + +All logs (except for the install log) are stored in the main log file in the hdb directory `/log/hdb.log`. The install log is located in the Harper application directory most likely located in your npm directory `npm/harperdb/logs`. + +Each log message has several key components for consistent reporting of events. A log message has a format of: + +``` + [] [] ...[]: +``` + +For example, a typical log entry looks like: + +``` +2023-03-09T14:25:05.269Z [notify] [main/0]: HarperDB successfully started. +``` + +The components of a log entry are: + +- timestamp - This is the date/time stamp when the event occurred +- level - This is an associated log level that gives a rough guide to the importance and urgency of the message. The available log levels in order of least urgent (and more verbose) are: `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. +- thread/ID - This reports the name of the thread and the thread ID that the event was reported on. Note that NATS logs are recorded by their process name and there is no thread id for them since they are a separate process. Key threads are: + - main - This is the thread that is responsible for managing all other threads and routes incoming requests to the other threads + - http - These are the worker threads that handle the primary workload of incoming HTTP requests to the operations API and custom functions. + - Clustering\* - These are threads and processes that handle replication. + - job - These are job threads that have been started to handle operations that are executed in a separate job thread. +- tags - Logging from a custom function will include a "custom-function" tag in the log entry. Most logs will not have any additional tags. +- message - This is the main message that was reported. + +We try to keep logging to a minimum by default, to do this the default log level is `error`. If you require more information from the logs, increasing the log level down will provide that. + +The log level can be changed by modifying `logging.level` in the config file `harperdb-config.yaml`. + +## Clustering Logging + +Harper clustering utilizes two [NATS](https:/nats.io/) servers, named Hub and Leaf. The Hub server is responsible for establishing the mesh network that connects instances of Harper and the Leaf server is responsible for managing the message stores (streams) that replicate and store messages between instances. Due to the verbosity of these servers there is a separate log level configuration for them. To adjust their log verbosity, set `clustering.logLevel` in the config file `harperdb-config.yaml`. Valid log levels from least verbose are `error`, `warn`, `info`, `debug` and `trace`. + +## Log File vs Standard Streams + +Harper logs can optionally be streamed to standard streams. Logging to standard streams (stdout/stderr) is primarily used for container logging drivers. For more traditional installations, we recommend logging to a file. Logging to both standard streams and to a file can be enabled simultaneously. To log to standard streams effectively, make sure to directly run `harperdb` and don't start it as a separate process (don't use `harperdb start`) and `logging.stdStreams` must be set to true. Note, logging to standard streams only will disable clustering catchup. + +## Logging Rotation + +Log rotation allows for managing log files, such as compressing rotated log files, archiving old log files, determining when to rotate, and the like. This will allow for organized storage and efficient use of disk space. For more information see “logging” in our [config docs](../../deployments/configuration). + +## Read Logs via the API + +To access specific logs you may query the Harper API. Logs can be queried using the `read_log` operation. `read_log` returns outputs from the log based on the provided search criteria. + +```json +{ + "operation": "read_log", + "start": 0, + "limit": 1000, + "level": "error", + "from": "2021-01-25T22:05:27.464+0000", + "until": "2021-01-25T23:05:27.464+0000", + "order": "desc" +} +``` diff --git a/docs/administration/logging/transaction-logging.md b/docs/administration/logging/transaction-logging.md index 0a9ae3b8..9003ff04 100644 --- a/docs/administration/logging/transaction-logging.md +++ b/docs/administration/logging/transaction-logging.md @@ -1,3 +1,7 @@ +--- +title: Transaction Logging +--- + # Transaction Logging Harper offers two options for logging transactions executed against a table. The options are similar but utilize different storage layers. diff --git a/docs/custom-functions/README.md b/docs/custom-functions/README.md deleted file mode 100644 index 687dcabb..00000000 --- a/docs/custom-functions/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Custom Functions - -Custom functions are a key part of building a complete Harper application. It is highly recommended that you use Custom Functions as the primary mechanism for your application to access your Harper database. Using Custom Functions gives you complete control over the accessible endpoints, how users are authenticated and authorized, what data is accessed from the database, and how it is aggregated and returned to users. - -- Add your own API endpoints to a standalone API server inside Harper - -- Use Harper Core methods to interact with your data at lightning speed - -- Custom Functions are powered by Fastify, so they’re extremely flexible - -- Manage in Harper Studio, or use your own IDE and Version Management System - -- Distribute your Custom Functions to all your Harper instances with a single click - ---- - -- [Requirements and Definitions](requirements-definitions.md) - -- [Create A Project](create-project.md) - -- [Define Routes](define-routes.md) - -- [Define Helpers](define-helpers.md) - -- [Host a Static UI](host-static.md) diff --git a/docs/custom-functions/create-project.md b/docs/custom-functions/create-project.md index 263890be..2e38e450 100644 --- a/docs/custom-functions/create-project.md +++ b/docs/custom-functions/create-project.md @@ -1,10 +1,14 @@ +--- +title: Create an in-place Project +--- + # Create an in-place Project -To create a project using our web-based GUI, Harper Studio, checkout out how to manage Custom Functions [here](../harper-studio/manage-functions.md). +To create a project using our web-based GUI, Harper Studio, checkout out how to manage Custom Functions [here](../harper-studio/manage-functions). Otherwise, to create a project, you have the following options: -1. **Use the add_custom_function_project operation** +1. **Use the add_custom_function_project operation** This operation creates a new project folder, and populates it with templates for the routes, helpers, and static subfolders. @@ -15,15 +19,15 @@ Otherwise, to create a project, you have the following options: } ``` -2. **Clone our public gitHub project template** +1. **Clone our public gitHub project template** _This requires a local installation. Remove the .git directory for a clean slate of git history._ ```bash -> git clone https://github.com/HarperDB/harperdb-custom-functions-template.git ~/hdb/custom_functions/dogs +> git clone https:/github.com/HarperDB/harperdb-custom-functions-template.git ~/hdb/custom_functions/dogs ``` -3. **Create a project folder in your Custom Functions root directory** and **initialize** +1. **Create a project folder in your Custom Functions root directory** and **initialize** _This requires a local installation._ diff --git a/docs/custom-functions/define-helpers.md b/docs/custom-functions/define-helpers.md index 5e7801d7..84e23c0d 100644 --- a/docs/custom-functions/define-helpers.md +++ b/docs/custom-functions/define-helpers.md @@ -1,3 +1,7 @@ +--- +title: Define Helpers +--- + # Define Helpers Helpers are functions for use within your routes. You may want to use the same helper in multiple route files, so this allows you to write it once, and include it wherever you need it. diff --git a/docs/custom-functions/host-static.md b/docs/custom-functions/host-static.md index 0cc9a591..643d2724 100644 --- a/docs/custom-functions/host-static.md +++ b/docs/custom-functions/host-static.md @@ -1,12 +1,16 @@ +--- +title: Host A Static Web UI +--- + # Host A Static Web UI -The [@fastify/static](https://github.com/fastify/fastify-static) module can be utilized to serve static files. +The [@fastify/static](https:/github.com/fastify/fastify-static) module can be utilized to serve static files. Install the module in your project by running `npm i @fastify/static` from inside your project directory. Register `@fastify/static` with the server and set `root` to the absolute path of the directory that contains the static files to serve. -For further information on how to send specific files see the [@fastify/static](https://github.com/fastify/fastify-static) docs. +For further information on how to send specific files see the [@fastify/static](https:/github.com/fastify/fastify-static) docs. ```javascript module.exports = async (server, { hdbCore, logger }) => { diff --git a/docs/custom-functions/index.md b/docs/custom-functions/index.md new file mode 100644 index 00000000..a5417b9d --- /dev/null +++ b/docs/custom-functions/index.md @@ -0,0 +1,29 @@ +--- +title: Custom Functions +--- + +# Custom Functions + +Custom functions are a key part of building a complete Harper application. It is highly recommended that you use Custom Functions as the primary mechanism for your application to access your Harper database. Using Custom Functions gives you complete control over the accessible endpoints, how users are authenticated and authorized, what data is accessed from the database, and how it is aggregated and returned to users. + +- Add your own API endpoints to a standalone API server inside Harper + +- Use Harper Core methods to interact with your data at lightning speed + +- Custom Functions are powered by Fastify, so they’re extremely flexible + +- Manage in Harper Studio, or use your own IDE and Version Management System + +- Distribute your Custom Functions to all your Harper instances with a single click + +--- + +- [Requirements and Definitions](requirements-definitions) + +- [Create A Project](create-project) + +- [Define Routes](define-routes) + +- [Define Helpers](define-helpers) + +- [Host a Static UI](host-static) diff --git a/docs/custom-functions/requirements-definitions.md b/docs/custom-functions/requirements-definitions.md index cd973906..46d0188a 100644 --- a/docs/custom-functions/requirements-definitions.md +++ b/docs/custom-functions/requirements-definitions.md @@ -1,3 +1,7 @@ +--- +title: Requirements And Definitions +--- + # Requirements And Definitions Before you get started with Custom Functions, here’s a primer on the basic configuration and the structure of a Custom Functions Project. @@ -35,17 +39,17 @@ customFunctions: - **`root`** This is the root directory where your Custom Functions projects and their files will live. By default, it’s in your \, but you can locate it anywhere--in a developer folder next to your other development projects, for example. -_Please visit our [configuration docs](../configuration.md) for a more comprehensive look at these settings._ +_Please visit our [configuration docs](../configuration) for a more comprehensive look at these settings._ ## Project Structure **project folder** -The name of the folder that holds your project files serves as the root prefix for all the routes you create. All routes created in the **dogs** project folder will have a URL like this: **https://my-server-url.com:9926/dogs/my/route**. As such, it’s important that any project folders you create avoid any characters that aren’t URL-friendly. You should avoid URL delimiters in your folder names. +The name of the folder that holds your project files serves as the root prefix for all the routes you create. All routes created in the **dogs** project folder will have a URL like this: **https:/my-server-url.com:9926/dogs/my/route**. As such, it’s important that any project folders you create avoid any characters that aren’t URL-friendly. You should avoid URL delimiters in your folder names. **/routes folder** -By default, files in the **routes** folder define the requests that your Custom Functions server will handle. They are [standard Fastify route declarations](https://www.fastify.io/docs/latest/Reference/Routes/), so if you’re familiar with them, you should be up and running in no time. The default components for a route are the url, method, preValidation, and handler. +By default, files in the **routes** folder define the requests that your Custom Functions server will handle. They are [standard Fastify route declarations](https:/www.fastify.io/docs/latest/Reference/Routes/), so if you’re familiar with them, you should be up and running in no time. The default components for a route are the url, method, preValidation, and handler. ```javascript module.exports = async (server, { hdbCore, logger }) => { diff --git a/docs/custom-functions/restarting-server.md b/docs/custom-functions/restarting-server.md index 4362efd5..fbabb514 100644 --- a/docs/custom-functions/restarting-server.md +++ b/docs/custom-functions/restarting-server.md @@ -1,6 +1,10 @@ +--- +title: Restarting the Server +--- + # Restarting the Server -One way to manage Custom Functions is through [Harper Studio](../harper-studio/README.md). It performs all the necessary operations automatically. To get started, navigate to your instance in Harper Studio and click the subnav link for “functions”. If you have not yet enabled Custom Functions, it will walk you through the process. Once configuration is complete, you can manage and deploy Custom Functions in minutes. +One way to manage Custom Functions is through [Harper Studio](../harper-studio/). It performs all the necessary operations automatically. To get started, navigate to your instance in Harper Studio and click the subnav link for “functions”. If you have not yet enabled Custom Functions, it will walk you through the process. Once configuration is complete, you can manage and deploy Custom Functions in minutes. For any changes made to your routes, helpers, or projects, you’ll need to restart the Custom Functions server to see them take effect. Harper Studio does this automatically whenever you create or delete a project, or add, edit, or edit a route or helper. If you need to start the Custom Functions server yourself, you can use the following operation to do so: diff --git a/docs/custom-functions/templates.md b/docs/custom-functions/templates.md index 4cfbd85c..2c4122ca 100644 --- a/docs/custom-functions/templates.md +++ b/docs/custom-functions/templates.md @@ -1,3 +1,7 @@ +--- +title: Templates +--- + # Templates -Check out our always-expanding library of templates in our open-source [Harper-Add-Ons GitHub repo](https://github.com/HarperDB-Add-Ons). +Check out our always-expanding library of templates in our open-source [Harper-Add-Ons GitHub repo](https:/github.com/HarperDB-Add-Ons). diff --git a/docs/deployments/_category_.json b/docs/deployments/_category_.json new file mode 100644 index 00000000..8fdd6e17 --- /dev/null +++ b/docs/deployments/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Deployments", + "position": 3, + "link": { + "type": "generated-index", + "title": "Deployments Documentation", + "description": "Installation and deployment guides for HarperDB", + "keywords": [ + "deployments" + ] + } +} \ No newline at end of file diff --git a/docs/deployments/configuration.md b/docs/deployments/configuration.md index c2d27f46..4b971694 100644 --- a/docs/deployments/configuration.md +++ b/docs/deployments/configuration.md @@ -1,6 +1,10 @@ +--- +title: Configuration File +--- + # Configuration File -Harper is configured through a [YAML](https://yaml.org/) file called `harperdb-config.yaml` located in the Harper root directory (by default this is a directory named `hdb` located in the home directory of the current user). +Harper is configured through a [YAML](https:/yaml.org/) file called `harperdb-config.yaml` located in the Harper root directory (by default this is a directory named `hdb` located in the home directory of the current user). Some configuration will be populated by default in the config file on install, regardless of whether it is used. @@ -12,7 +16,7 @@ The configuration elements in `harperdb-config.yaml` use camelcase, such as `ope To change a configuration value, edit the `harperdb-config.yaml` file and save any changes. **HarperDB must be restarted for changes to take effect.** -Alternatively, all configuration values can also be modified using environment variables, command line arguments, or the operations API via the [`set_configuration` operation](../developers/operations-api/configuration.md#set-configuration). +Alternatively, all configuration values can also be modified using environment variables, command line arguments, or the operations API via the [`set_configuration` operation](../developers/operations-api/configuration#set-configuration). For nested configuration elements, use underscores to represent parent-child relationships. When accessed this way, elements are case-insensitive. @@ -86,7 +90,7 @@ An array of allowable domains with CORS `corsAccessControlAllowHeaders` - _Type_: string; _Default_: 'Accept, Content-Type, Authorization' -A string representation of a comma separated list of header keys for the [Access-Control-Allow-Headers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers) header for OPTIONS requests. +A string representation of a comma separated list of header keys for the [Access-Control-Allow-Headers](https:/developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers) header for OPTIONS requests. `headersTimeout` - _Type_: integer; _Default_: 60,000 milliseconds (1 minute) @@ -229,10 +233,10 @@ The `replication` section configures [Harper replication](../developers/replicat ```yaml replication: hostname: server-one - url: wss://server-one:9925 + url: wss:/server-one:9925 databases: '*' routes: - - wss://server-two:9925 + - wss:/server-two:9925 port: null securePort: 9933, enableRootCAs: true @@ -274,7 +278,7 @@ replication: copyTablesToCatchUp: true hostname: server-one routes: - - wss://server-two:9925 # URL based route + - wss:/server-two:9925 # URL based route - hostname: server-three # define a hostname and port port: 9930 startTime: 2024-02-06T15:30:00Z @@ -309,7 +313,7 @@ This defines the shard id of this instance and is used in conjunction with the [ The `clustering` section configures the NATS clustering engine, this is used to replicate data between instances of Harper. -_Note: There exist two ways to create clusters and replicate data in Harper. One option is to use native Harper replication over Websockets. The other option is to use_ [_NATS_](https://nats.io/about/) _to facilitate the cluster._ +_Note: There exist two ways to create clusters and replicate data in Harper. One option is to use native Harper replication over Websockets. The other option is to use_ [_NATS_](https:/nats.io/about/) _to facilitate the cluster._ Clustering offers a lot of different configurations, however in a majority of cases the only options you will need to pay attention to are: @@ -547,7 +551,7 @@ clustering: ### `localStudio` -The `localStudio` section configures the local Harper Studio, a GUI for Harper hosted on the server. A hosted version of the Harper Studio with licensing and provisioning options is available at https://studio.harperdb.io. Note, all database traffic from either `localStudio` or Harper Studio is made directly from your browser to the instance. +The `localStudio` section configures the local Harper Studio, a GUI for Harper hosted on the server. A hosted version of the Harper Studio with licensing and provisioning options is available at https:/studio.harperdb.io. Note, all database traffic from either `localStudio` or Harper Studio is made directly from your browser to the instance. `enabled` - _Type_: boolean; _Default_: false @@ -708,8 +712,7 @@ Harper's logger supports defining multiple logging configurations for different `logging.external` -The `logging.external` section can be used to define logging for all external components that use the [`logger` API](../technical-details/reference/globals.md). For example: - +The `logging.external` section can be used to define logging for all external components that use the [`logger` API](../technical-details/reference/globals). For example: ```yaml logging: external: @@ -796,11 +799,11 @@ This will enable cookie-based sessions to maintain an authenticated session. Thi `operationTokenTimeout` - _Type_: string; _Default_: 1d -Defines the length of time an operation token will be valid until it expires. Example values: https://github.com/vercel/ms. +Defines the length of time an operation token will be valid until it expires. Example values: https:/github.com/vercel/ms. `refreshTokenTimeout` - _Type_: string; _Default_: 1d -Defines the length of time a refresh token will be valid until it expires. Example values: https://github.com/vercel/ms. +Defines the length of time a refresh token will be valid until it expires. Example values: https:/github.com/vercel/ms. ### `operationsApi` @@ -951,7 +954,7 @@ storage: `compactOnStart` - _Type_: boolean; _Default_: false -When `true` all non-system databases will be compacted when starting Harper, read more [here](../administration/compact.md). +When `true` all non-system databases will be compacted when starting Harper, read more [here](../administration/compact). `compactOnStartKeepBackup` - _Type_: boolean; _Default_: false @@ -1225,9 +1228,9 @@ The name of the component. This will be used to name the folder where the compon `package` - _Type_: string -A reference to your [component](../technical-details/reference/components/applications.md#adding-components-to-root) package. This could be a remote git repo, a local folder/file or an NPM package. Harper will add this package to a package.json file and call `npm install` on it, so any reference that works with that paradigm will work here. +A reference to your [component](../technical-details/reference/components/applications#adding-components-to-root) package. This could be a remote git repo, a local folder/file or an NPM package. Harper will add this package to a package.json file and call `npm install` on it, so any reference that works with that paradigm will work here. -Read more about npm install [here](https://docs.npmjs.com/cli/v8/commands/npm-install) +Read more about npm install [here](https:/docs.npmjs.com/cli/v8/commands/npm-install) `port` - _Type_: number _Default_: whatever is set in `http.port` diff --git a/docs/deployments/harper-cli.md b/docs/deployments/harper-cli.md index eb707c74..a572bc60 100644 --- a/docs/deployments/harper-cli.md +++ b/docs/deployments/harper-cli.md @@ -1,3 +1,7 @@ +--- +title: Harper CLI +--- + # Harper CLI ## Harper CLI @@ -12,7 +16,7 @@ To install Harper with CLI prompts, run the following command: harperdb install ``` -Alternatively, Harper installations can be automated with environment variables or command line arguments; [see a full list of configuration parameters here](configuration.md#Using-the-Configuration-File-and-Naming-Conventions). Note, when used in conjunction, command line arguments will override environment variables. +Alternatively, Harper installations can be automated with environment variables or command line arguments; [see a full list of configuration parameters here](configuration#using-the-configuration-file-and-naming-conventions). Note, when used in conjunction, command line arguments will override environment variables. **Environment Variables** @@ -153,7 +157,7 @@ last_updated_record: 1724483231970.9949 `harperdb set_configuration logging_level=error` -`harperdb deploy_component project=my-cool-app package=https://github.com/HarperDB/application-template` +`harperdb deploy_component project=my-cool-app package=https:/github.com/HarperDB/application-template` `harperdb get_components` @@ -170,7 +174,7 @@ The CLI can also be used to run operations on remote Harper instances. To do thi ```bash export CLI_TARGET_USERNAME=HDB_ADMIN export CLI_TARGET_PASSWORD=password -harperdb describe_database database=dev target=https://server.com:9925 +harperdb describe_database database=dev target=https:/server.com:9925 ``` The same set of operations API are available for remote operations as well. @@ -180,11 +184,11 @@ The same set of operations API are available for remote operations as well. When using remote operations, you can deploy a local component to the remote instance. If you omit the `package` parameter, you can deploy the current directory. This will package the current directory and send it to the target server (also `deploy` is allowed as an alias to `deploy_component`): ```bash -harperdb deploy target=https://server.com:9925 +harperdb deploy target=https:/server.com:9925 ``` If you are interacting with a cluster, you may wish to include the `replicated=true` parameter to ensure that the deployment operation is replicated to all nodes in the cluster. You will also need to restart afterwards to apply the changes (here seen with the replicated parameter): ```bash -harperdb restart target=https://server.com:9925 replicated=true +harperdb restart target=https:/server.com:9925 replicated=true ``` diff --git a/docs/deployments/harper-cloud/README.md b/docs/deployments/harper-cloud/README.md deleted file mode 100644 index dd077912..00000000 --- a/docs/deployments/harper-cloud/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Harper Cloud - -[Harper Cloud](https://studio.harperdb.io/) is the easiest way to test drive Harper, it’s Harper-as-a-Service. Cloud handles deployment and management of your instances in just a few clicks. Harper Cloud is currently powered by AWS with additional cloud providers on our roadmap for the future. - -You can create a new [Harper Cloud instance in the Harper Studio](../../administration/harper-studio/instances.md#create-a-new-instance). diff --git a/docs/deployments/harper-cloud/alarms.md b/docs/deployments/harper-cloud/alarms.md index d9c5c08c..8b695c37 100644 --- a/docs/deployments/harper-cloud/alarms.md +++ b/docs/deployments/harper-cloud/alarms.md @@ -1,6 +1,10 @@ +--- +title: Alarms +--- + # Alarms -Harper Cloud instance alarms are triggered when certain conditions are met. Once alarms are triggered organization owners will immediately receive an email alert and the alert will be available on the [Instance Configuration](../../administration/harper-studio/instance-configuration.md) page. The below table describes each alert and their evaluation metrics. +Harper Cloud instance alarms are triggered when certain conditions are met. Once alarms are triggered organization owners will immediately receive an email alert and the alert will be available on the [Instance Configuration](../../administration/harper-studio/instance-configuration) page. The below table describes each alert and their evaluation metrics. ### Heading Definitions @@ -11,6 +15,6 @@ Harper Cloud instance alarms are triggered when certain conditions are met. Once | Alarm | Threshold | Intervals | Proposed Remedy | | ------- | ---------- | --------- | ------------------------------------------------------------------------------------------------------------------------------ | -| Storage | > 90% Disk | 1 x 5min | [Increased storage volume](../../administration/harper-studio/instance-configuration.md#update-instance-storage) | -| CPU | > 90% Avg | 2 x 5min | [Increase instance size for additional CPUs](../../administration/harper-studio/instance-configuration.md#update-instance-ram) | -| Memory | > 90% RAM | 2 x 5min | [Increase instance size](../../administration/harper-studio/instance-configuration.md#update-instance-ram) | +| Storage | > 90% Disk | 1 x 5min | [Increased storage volume](../../administration/harper-studio/instance-configuration#update-instance-storage) | +| CPU | > 90% Avg | 2 x 5min | [Increase instance size for additional CPUs](../../administration/harper-studio/instance-configuration#update-instance-ram) | +| Memory | > 90% RAM | 2 x 5min | [Increase instance size](../../administration/harper-studio/instance-configuration#update-instance-ram) | diff --git a/docs/deployments/harper-cloud/index.md b/docs/deployments/harper-cloud/index.md new file mode 100644 index 00000000..fbf2d81e --- /dev/null +++ b/docs/deployments/harper-cloud/index.md @@ -0,0 +1,9 @@ +--- +title: Harper Cloud +--- + +# Harper Cloud + +[Harper Cloud](https:/studio.harperdb.io/) is the easiest way to test drive Harper, it’s Harper-as-a-Service. Cloud handles deployment and management of your instances in just a few clicks. Harper Cloud is currently powered by AWS with additional cloud providers on our roadmap for the future. + +You can create a new Harper Cloud instance in the Harper Studio. diff --git a/docs/deployments/harper-cloud/instance-size-hardware-specs.md b/docs/deployments/harper-cloud/instance-size-hardware-specs.md index e8e5e1da..72979d8d 100644 --- a/docs/deployments/harper-cloud/instance-size-hardware-specs.md +++ b/docs/deployments/harper-cloud/instance-size-hardware-specs.md @@ -1,3 +1,7 @@ +--- +title: Instance Size Hardware Specs +--- + # Instance Size Hardware Specs While Harper Cloud bills by RAM, each instance has other specifications associated with the RAM selection. The following table describes each instance size in detail\*. @@ -16,4 +20,4 @@ While Harper Cloud bills by RAM, each instance has other specifications associat | m5.16xlarge | 256 | 64 | 20 | Up to 3.1 GHz Intel Xeon Platinum 8000 | | m5.24xlarge | 384 | 96 | 25 | Up to 3.1 GHz Intel Xeon Platinum 8000 | -\*Specifications are subject to change. For the most up to date information, please refer to AWS documentation: [https://aws.amazon.com/ec2/instance-types/](https://aws.amazon.com/ec2/instance-types/). +\*Specifications are subject to change. For the most up to date information, please refer to AWS documentation: [https:/aws.amazon.com/ec2/instance-types/](https:/aws.amazon.com/ec2/instance-types/). diff --git a/docs/deployments/harper-cloud/iops-impact.md b/docs/deployments/harper-cloud/iops-impact.md index b74edd33..7c2390df 100644 --- a/docs/deployments/harper-cloud/iops-impact.md +++ b/docs/deployments/harper-cloud/iops-impact.md @@ -1,3 +1,7 @@ +--- +title: IOPS Impact on Performance +--- + # IOPS Impact on Performance Harper, like any database, can place a tremendous load on its storage resources. Storage, not CPU or memory, will more often be the bottleneck of server, virtual machine, or a container running Harper. Understanding how storage works, and how much storage performance your workload requires, is key to ensuring that Harper performs as expected. @@ -14,7 +18,7 @@ Harper Cloud utilizes AWS Elastic Block Storage (EBS) General Purpose SSD (gp3) AWS EBS gp3 volumes have a baseline performance level of 3,000 IOPS, as a result, all Harper Cloud storage options will offer 3,000 IOPS. We plan to offer scalable IOPS as an option in the future. -You can read more about AWS EBS volume IOPS here: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html. +You can read more about AWS EBS volume IOPS here: https:/docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html. ## Estimating IOPS for Harper Instance diff --git a/docs/deployments/harper-cloud/verizon-5g-wavelength-instances.md b/docs/deployments/harper-cloud/verizon-5g-wavelength-instances.md index b3caa3df..0ca2e5a6 100644 --- a/docs/deployments/harper-cloud/verizon-5g-wavelength-instances.md +++ b/docs/deployments/harper-cloud/verizon-5g-wavelength-instances.md @@ -1,10 +1,14 @@ +--- +title: Verizon 5G Wavelength +--- + # Verizon 5G Wavelength These instances are only accessible from the Verizon network. When accessing your Harper instance please ensure you are connected to the Verizon network, examples include Verizon 5G Internet, Verizon Hotspots, or Verizon mobile devices. Harper on Verizon 5G Wavelength brings Harper closer to the end user exclusively on the Verizon network resulting in as little as single-digit millisecond response time from Harper to the client. -Instances are built via AWS Wavelength. You can read more about [AWS Wavelength here](https://aws.amazon.com/wavelength/). +Instances are built via AWS Wavelength. You can read more about [AWS Wavelength here](https:/aws.amazon.com/wavelength/). Harper 5G Wavelength Instance Specs While Harper 5G Wavelength bills by RAM, each instance has other specifications associated with the RAM selection. The following table describes each instance size in detail\*. @@ -14,7 +18,7 @@ Harper 5G Wavelength Instance Specs While Harper 5G Wavelength bills by RAM, eac | t3.xlarge | 16 | 4 | Up to 5 | Up to 3.1 GHz Intel Xeon Platinum Processor | | r5.2xlarge | 64 | 8 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum Processor | -\*Specifications are subject to change. For the most up to date information, please refer to [AWS documentation](https://aws.amazon.com/ec2/instance-types/). +\*Specifications are subject to change. For the most up to date information, please refer to [AWS documentation](https:/aws.amazon.com/ec2/instance-types/). ## Harper 5G Wavelength Storage @@ -22,6 +26,6 @@ Harper 5G Wavelength utilizes AWS Elastic Block Storage (EBS) General Purpose SS AWS EBS gp2 volumes have a baseline performance level, which determines the number of IOPS it can perform indefinitely. The larger the volume, the higher its baseline performance. Additionally, smaller gp2 volumes are able to burst to a higher number of IOPS for periods of time. -Smaller gp2 volumes are perfect for trying out the functionality of Harper, and might also work well for applications that don’t perform many database transactions. For applications that perform a moderate or high number of transactions, we recommend that you use a larger Harper volume. Learn more about the [impact of IOPS on performance here](iops-impact.md). +Smaller gp2 volumes are perfect for trying out the functionality of Harper, and might also work well for applications that don’t perform many database transactions. For applications that perform a moderate or high number of transactions, we recommend that you use a larger Harper volume. Learn more about the [impact of IOPS on performance here](iops-impact). -You can read more about [AWS EBS gp2 volume IOPS here](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html#EBSVolumeTypes_gp2). +You can read more about [AWS EBS gp2 volume IOPS here](https:/docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html#ebsvolumetypes_gp2). diff --git a/docs/deployments/install-harper/README.md b/docs/deployments/install-harper/README.md deleted file mode 100644 index 1305b182..00000000 --- a/docs/deployments/install-harper/README.md +++ /dev/null @@ -1,57 +0,0 @@ -# Install Harper - -## Install Harper - -This documentation contains information for installing Harper locally. Note that if you’d like to get up and running quickly, you can try a [managed instance with Harper Cloud](https://studio.harperdb.io/sign-up). Harper is a cross-platform database; we recommend Linux for production use, but Harper can run on Windows and Mac as well, for development purposes. Installation is usually very simple and just takes a few steps, but there are a few different options documented here. - -Harper runs on Node.js, so if you do not have it installed, you need to do that first (if you have installed, you can skip to installing Harper, itself). Node.js can be downloaded and installed from [their site](https://nodejs.org/). For Linux and Mac, we recommend installing and managing Node versions with [NVM, which has instructions for installation](https://github.com/nvm-sh/nvm). Generally NVM can be installed with the following command: - -```bash -curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash -``` - -And then logout and login, and then install Node.js using nvm. We recommend using LTS, but support all currently maintained Node versions (which is currently version 14 and newer, and make sure to always uses latest minor/patch for the major version): - -```bash -nvm install --lts -``` - -#### Install and Start Harper - -Then you can install Harper with NPM and start it: - -```bash -npm install -g harperdb -harperdb -``` - -Harper will automatically start after installation. Harper's installation can be configured with numerous options via CLI arguments, for more information visit the [Harper Command Line Interface](../harper-cli.md) guide. - -If you are setting up a production server on Linux, [we have much more extensive documentation on how to configure volumes for database storage, set up a systemd script, and configure your operating system to use as a database server in our linux installation guide](linux.md). - -## With Docker - -If you would like to run Harper in Docker, install [Docker Desktop](https://docs.docker.com/desktop/) on your Mac or Windows computer. Otherwise, install the [Docker Engine](https://docs.docker.com/engine/install/) on your Linux server. - -Once Docker Desktop or Docker Engine is installed, visit our [Docker Hub page](https://hub.docker.com/r/harperdb/harperdb) for information and examples on how to run a Harper container. - -## Offline Install - -If you need to install Harper on a device that doesn't have an Internet connection, you can choose your version and download the npm package and install it directly (you’ll still need Node.js and NPM): - -[Download Install Package](https://products-harperdb-io.s3.us-east-2.amazonaws.com/index.html) - -Once you’ve downloaded the .tgz file, run the following command from the directory where you’ve placed it: - -```bash -npm install -g harperdb-X.X.X.tgz harperdb install -``` - -## Installation on Less Common Platforms - -Harper comes with binaries for standard AMD64/x64 or ARM64 CPU architectures on Linux, Windows (x64 only), and Mac (including Apple Silicon). However, if you are installing on a less common platform (Alpine, for example), you will need to ensure that you have build tools installed for the installation process to compile the binaries (this is handled automatically), including: - -- [Go](https://go.dev/dl/): version 1.19.1 -- GCC -- Make -- Python v3.7, v3.8, v3.9, or v3.10 diff --git a/docs/deployments/install-harper/index.md b/docs/deployments/install-harper/index.md new file mode 100644 index 00000000..25e775d7 --- /dev/null +++ b/docs/deployments/install-harper/index.md @@ -0,0 +1,61 @@ +--- +title: Install Harper +--- + +# Install Harper + +## Install Harper + +This documentation contains information for installing Harper locally. Note that if you’d like to get up and running quickly, you can try a [managed instance with Harper Cloud](https:/studio.harperdb.io/sign-up). Harper is a cross-platform database; we recommend Linux for production use, but Harper can run on Windows and Mac as well, for development purposes. Installation is usually very simple and just takes a few steps, but there are a few different options documented here. + +Harper runs on Node.js, so if you do not have it installed, you need to do that first (if you have installed, you can skip to installing Harper, itself). Node.js can be downloaded and installed from [their site](https:/nodejs.org/). For Linux and Mac, we recommend installing and managing Node versions with [NVM, which has instructions for installation](https:/github.com/nvm-sh/nvm). Generally NVM can be installed with the following command: + +```bash +curl -o- https:/raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash +``` + +And then logout and login, and then install Node.js using nvm. We recommend using LTS, but support all currently maintained Node versions (which is currently version 14 and newer, and make sure to always uses latest minor/patch for the major version): + +```bash +nvm install --lts +``` + +#### `Install and Start Harper ` + +Then you can install Harper with NPM and start it: + +```bash +npm install -g harperdb +harperdb +``` + +Harper will automatically start after installation. Harper's installation can be configured with numerous options via CLI arguments, for more information visit the [Harper Command Line Interface](../harper-cli) guide. + +If you are setting up a production server on Linux, [we have much more extensive documentation on how to configure volumes for database storage, set up a systemd script, and configure your operating system to use as a database server in our linux installation guide](linux). + +## With Docker + +If you would like to run Harper in Docker, install [Docker Desktop](https:/docs.docker.com/desktop/) on your Mac or Windows computer. Otherwise, install the [Docker Engine](https:/docs.docker.com/engine/install/) on your Linux server. + +Once Docker Desktop or Docker Engine is installed, visit our [Docker Hub page](https:/hub.docker.com/r/harperdb/harperdb) for information and examples on how to run a Harper container. + +## Offline Install + +If you need to install Harper on a device that doesn't have an Internet connection, you can choose your version and download the npm package and install it directly (you’ll still need Node.js and NPM): + +[Download Install Package](https:/products-harperdb-io.s3.us-east-2.amazonaws.com/index.html) + +Once you’ve downloaded the .tgz file, run the following command from the directory where you’ve placed it: + +```bash +npm install -g harperdb-X.X.X.tgz harperdb install +``` + +## Installation on Less Common Platforms + +Harper comes with binaries for standard AMD64/x64 or ARM64 CPU architectures on Linux, Windows (x64 only), and Mac (including Apple Silicon). However, if you are installing on a less common platform (Alpine, for example), you will need to ensure that you have build tools installed for the installation process to compile the binaries (this is handled automatically), including: + +- [Go](https:/go.dev/dl/): version 1.19.1 +- GCC +- Make +- Python v3.7, v3.8, v3.9, or v3.10 diff --git a/docs/deployments/install-harper/linux.md b/docs/deployments/install-harper/linux.md index 365989aa..15da9a7b 100644 --- a/docs/deployments/install-harper/linux.md +++ b/docs/deployments/install-harper/linux.md @@ -1,3 +1,7 @@ +--- +title: On Linux +--- + # On Linux If you wish to install locally or already have a configured server, see the basic [Installation Guide](./) @@ -9,10 +13,10 @@ The following is a recommended way to configure Linux and install Harper. These These instructions assume that the following has already been completed: 1. Linux is installed -2. Basic networking is configured -3. A non-root user account dedicated to Harper with sudo privileges exists -4. An additional volume for storing Harper files is attached to the Linux instance -5. Traffic to ports 9925 (Harper Operations API) 9926 (Harper Application Interface) and 9932 (Harper Clustering) is permitted +1. Basic networking is configured +1. A non-root user account dedicated to Harper with sudo privileges exists +1. An additional volume for storing Harper files is attached to the Linux instance +1. Traffic to ports 9925 (Harper Operations API) 9926 (Harper Application Interface) and 9932 (Harper Clustering) is permitted While you will need to access Harper through port 9925 for the administration through the operations API, and port 9932 for clustering, for higher level of security, you may want to consider keeping both of these ports restricted to a VPN or VPC, and only have the application interface (9926 by default) exposed to the public Internet. @@ -124,7 +128,7 @@ echo "ubuntu hard nofile 1000000" | sudo tee -a /etc/security/limits.conf Install Node Version Manager (nvm) ```bash -curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh | bash +curl -o- https:/raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh | bash ``` Load nvm (or logout and then login) @@ -133,13 +137,13 @@ Load nvm (or logout and then login) . ~/.nvm/nvm.sh ``` -Install Node.js using nvm ([read more about specific Node version requirements](https://www.npmjs.com/package/harperdb#prerequisites)) +Install Node.js using nvm ([read more about specific Node version requirements](https:/www.npmjs.com/package/harperdb#prerequisites)) ```bash nvm install ``` -### Install and Start Harper +### `Install and Start Harper ` Here is an example of installing Harper with minimal configuration. @@ -170,7 +174,7 @@ harperdb start \ --CLUSTERING_NODENAME "hdb1" ``` -You can also use a custom configuration file to set values on install, use the CLI/ENV variable `HDB_CONFIG` and set it to the path of your [custom configuration file](../configuration.md): +You can also use a custom configuration file to set values on install, use the CLI/ENV variable `HDB_CONFIG` and set it to the path of your [custom configuration file](../configuration): ```bash npm install -g harperdb @@ -218,4 +222,4 @@ systemctl daemon-reload systemctl enable harperdb ``` -For more information visit the [Harper Command Line Interface guide](../harper-cli.md) and the [Harper Configuration File guide](../configuration.md). +For more information visit the [Harper Command Line Interface guide](../harper-cli) and the [Harper Configuration File guide](../configuration). diff --git a/docs/deployments/upgrade-hdb-instance.md b/docs/deployments/upgrade-hdb-instance.md index cde1a11d..a277f707 100644 --- a/docs/deployments/upgrade-hdb-instance.md +++ b/docs/deployments/upgrade-hdb-instance.md @@ -1,3 +1,7 @@ +--- +title: Upgrade a Harper Instance +--- + # Upgrade a Harper Instance This document describes best practices for upgrading self-hosted Harper instances. Harper can be upgraded using a combination of npm and built-in Harper upgrade scripts. Whenever upgrading your Harper installation it is recommended you make a backup of your data first. Note: This document applies to self-hosted Harper instances only. All [Harper Cloud instances](harper-cloud/) will be upgraded by the Harper Cloud team. @@ -6,19 +10,19 @@ This document describes best practices for upgrading self-hosted Harper instance Upgrading Harper is a two-step process. First the latest version of Harper must be downloaded from npm, then the Harper upgrade scripts will be utilized to ensure the newest features are available on the system. -1. Install the latest version of Harper using `npm install -g harperdb`. +1. Install the latest version of Harper using `npm install -g harperdb`. Note `-g` should only be used if you installed Harper globally (which is recommended). -2. Run `harperdb` to initiate the upgrade process. +1. Run `harperdb` to initiate the upgrade process. Harper will then prompt you for all appropriate inputs and then run the upgrade directives. ## Node Version Manager (nvm) -[Node Version Manager (nvm)](http://nvm.sh/) is an easy way to install, remove, and switch between different versions of Node.js as required by various applications. More information, including directions on installing nvm can be found here: https://nvm.sh/. +[Node Version Manager (nvm)](http:/nvm.sh/) is an easy way to install, remove, and switch between different versions of Node.js as required by various applications. More information, including directions on installing nvm can be found here: https:/nvm.sh/. -Harper supports Node.js versions 14.0.0 and higher, however, **please check our** [**NPM page**](https://www.npmjs.com/package/harperdb) **for our recommended Node.js version.** To install a different version of Node.js with nvm, run the command: +Harper supports Node.js versions 14.0.0 and higher, however, **please check our** [**NPM page**](https:/www.npmjs.com/package/harperdb) **for our recommended Node.js version.** To install a different version of Node.js with nvm, run the command: ```bash nvm install @@ -96,11 +100,11 @@ The core of this upgrade is the _bridge node_. This node will run both NATS and ### Enabling Plexus -To enable Plexus on a node that is already running NATS, you will need to update [two values](configuration.md) in the `harperdb-config.yaml` file: +To enable Plexus on a node that is already running NATS, you will need to update [two values](configuration) in the `harperdb-config.yaml` file: ```yaml replication: - url: wss://my-cluster-node-1:9925 + url: wss:/my-cluster-node-1:9925 hostname: node-1 ``` @@ -115,22 +119,22 @@ replication: - On this node, follow the "Enabling Plexus" steps from the previous section, but **do not disable NATS clustering on this instance.** - Stop the instance and perform the upgrade. - Start the instance. This node should now be running both Plexus and NATS. -2. Upgrade a node: +1. Upgrade a node: - Choose a node that needs upgrading and enable Plexus by following the "Enable Plexus" steps. - Disable NATS by setting `clustering.enabled` to `false`. - Stop the instance and upgrade it. - Start the instance. - - Call [`add_node`](../developers/operations-api/clustering.md#add-node) on the upgraded instance. In this call, omit `subscriptions` so that a fully replicating cluster is built. The target node for this call should be the bridge node. _Note: depending on your setup, you may need to expand this `add_node` call to include_ [_authorization and/or tls information_](../developers/operations-api/clustering.md#add-node)_._ + - Call [`add_node`](../developers/operations-api/clustering#add-node) on the upgraded instance. In this call, omit `subscriptions` so that a fully replicating cluster is built. The target node for this call should be the bridge node. _Note: depending on your setup, you may need to expand this `add_node` call to include_ [_authorization and/or tls information_](../developers/operations-api/clustering#add-node)_._ ```json { "operation": "add_node", "hostname:": "node-1", - "url": "wss://my-cluster-node-1:9925" + "url": "wss:/my-cluster-node-1:9925" } ``` -3. Repeat Step 2 on all remaining nodes that need to be upgraded. -4. Disable NATS on the bridge node by setting `clustering.enabled` to `false` and restart the instance. +1. Repeat Step 2 on all remaining nodes that need to be upgraded. +1. Disable NATS on the bridge node by setting `clustering.enabled` to `false` and restart the instance. Your cluster upgrade should now be complete, with no NATS processes running on any of the nodes. diff --git a/docs/developers/_category_.json b/docs/developers/_category_.json new file mode 100644 index 00000000..9fe399bf --- /dev/null +++ b/docs/developers/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Developers", + "position": 1, + "link": { + "type": "generated-index", + "title": "Developers Documentation", + "description": "Comprehensive guides and references for building applications with HarperDB", + "keywords": [ + "developers" + ] + } +} \ No newline at end of file diff --git a/docs/developers/applications/README.md b/docs/developers/applications/README.md deleted file mode 100644 index 6279d220..00000000 --- a/docs/developers/applications/README.md +++ /dev/null @@ -1,233 +0,0 @@ -# Applications - -Harper is more than a database, it's a distributed clustering platform allowing you to package your schema, endpoints and application logic and deploy them to an entire fleet of Harper instances optimized for on-the-edge scalable data delivery. - -In this guide, we are going to explore the evermore extensible architecture that Harper provides by building a Harper application, a fundamental building-block of the Harper ecosystem. - -When working through this guide, we recommend you use the [Harper Application Template](https://github.com/HarperDB/application-template) repo as a reference. - -Before we get started, let's clarify some terminology that is used throughout the documentation. - -**Components** are the high-level concept for modules that extend the Harper core platform adding additional functionality. The application you will build here is a component. In addition to applications, components also encompass extensions. - -> We are actively working to disambiguate the terminology. When you see "component", such as in the Operations API or CLI, it generally refers to an application. We will do our best to clarify exactly which classification of a component whenever possible. - -**Applications** are best defined as the implementation of a specific user-facing feature or functionality. Applications are built on top of extensions and can be thought of as the end product that users interact with. For example, a Next.js application that serves a web interface or an Apollo GraphQL server that provides a GraphQL API are both applications. - -**Extensions** are the building blocks of the Harper component system. Applications depend on extensions to provide the functionality the application is implementing. For example, the built-in `graphqlSchema` extension enables applications to define their databases and tables using GraphQL schemas. Furthermore, the `@harperdb/nextjs` and `@harperdb/apollo` extensions are the building blocks that provide support for building Next.js and Apollo applications. - -All together, the support for implementing a feature is the extension, and the actual implementation of the feature is the application. - -Extensions can also depend on other extensions. For example, the [`@harperdb/apollo`](https://github.com/HarperDB/apollo) extension depends on the built-in `graphqlSchema` extension to create a cache table for Apollo queries. Applications can then use the `@harperdb/apollo` extension to implement an Apollo GraphQL backend server. - -```mermaid -flowchart TD - subgraph Applications - direction TB - NextJSApp["Next.js App"] - ApolloApp["Apollo App"] - CustomResource["Custom Resource"] - end - - subgraph Extensions - direction TB - subgraph Custom - NextjsExt["@harperdb/nextjs"] - ApolloExt["@harperdb/apollo"] - end - subgraph Built-In - GraphqlSchema["graphqlSchema"] - JsResource["jsResource"] - Rest["rest"] - end - end - - subgraph Core - direction TB - Database["database"] - FileSystem["file-system"] - Networking["networking"] - end - - NextJSApp --> NextjsExt - ApolloApp --> ApolloExt - CustomResource --> JsResource & GraphqlSchema & Rest - - NextjsExt --> Networking - NextjsExt --> FileSystem - ApolloExt --> GraphqlSchema - ApolloExt --> Networking - - GraphqlSchema --> Database - JsResource --> Database - Rest --> Networking -``` - -> As of Harper v4.6, a new, **experimental** component system has been introduced called **plugins**. Plugins are a **new iteration of the existing extension system**. They are simultaneously a simplification and an extensibility upgrade. Instead of defining multiple methods (`start` vs `startOnMainThread`, `handleFile` vs `setupFile`, `handleDirectory` vs `setupDirectory`), plugins only have to define a single `handleApplication` method. Plugins are **experimental**, and complete documentation is available on the [plugin API](../../technical-details/reference/components/plugins.md) page. In time we plan to deprecate the concept of extensions in favor of plugins, but for now, both are supported. - -Beyond applications and extensions, components are further classified as built-in or custom. **Built-in** components are included with Harper by default and can be directly referenced by their name. The `graphqlSchema`, `rest`, and `jsResource` extensions used in the previous application example are all examples of built-in extensions. **Custom** components must use external references, generally npm or GitHub packages, and are often included as dependencies within the `package.json` of the component. - -> Harper maintains a number of custom components that are available on `npm` and `GitHub`, such as the [`@harperdb/nextjs`](https://github.com/HarperDB/nextjs) extension or the [`@harperdb/status-check`](https://github.com/HarperDB/status-check) application. - -Harper does not currently include any built-in applications, making "custom applications" a bit redundant. Generally, we just say "application". However, there is a multitude of both built-in and custom extensions, and so the documentation refers to them as such. A complete list of built-in extensions is available in the [Built-In Extensions](../../technical-details/reference/components/built-in-extensions.md) documentation page, and the list of custom extensions and applications is available below. - -This guide is going to walk you through building a basic Harper application using a set of built-in extensions. - -> The Technical Details section of the documentation contains a [complete reference for all aspects of components](../../technical-details/reference/components), applications, extensions, and more. - -## Custom Functionality with JavaScript - -[The getting started guide](../../getting-started/first-harper-app.md) covers how to build an application entirely through schema configuration. However, if your application requires more custom functionality, you will probably want to employ your own JavaScript modules to implement more specific features and interactions. This gives you tremendous flexibility and control over how data is accessed and modified in Harper. Let's take a look at how we can use JavaScript to extend and define "resources" for custom functionality. Let's add a property to the dog records when they are returned, that includes their age in human years. In Harper, data is accessed through our [Resource API](../../technical-details/reference/resources/README.md), a standard interface to access data sources, tables, and make them available to endpoints. Database tables are `Resource` classes, and so extending the function of a table is as simple as extending their class. - -To define custom (JavaScript) resources as endpoints, we need to create a `resources.js` module (this goes in the root of your application folder). And then endpoints can be defined with Resource classes that `export`ed. This can be done in addition to, or in lieu of the `@export`ed types in the schema.graphql. If you are exporting and extending a table you defined in the schema make sure you remove the `@export` from the schema so that don't export the original table or resource to the same endpoint/path you are exporting with a class. Resource classes have methods that correspond to standard HTTP/REST methods, like `get`, `post`, `patch`, and `put` to implement specific handling for any of these methods (for tables they all have default implementations). To do this, we get the `Dog` class from the defined tables, extend it, and export it: - -```javascript -// resources.js: -const { Dog } = tables; // get the Dog table from the Harper provided set of tables (in the default database) - -export class DogWithHumanAge extends Dog { - static loadAsInstance = false; - async get(target) { - const record = await super.get(target); - return { - ...record, // include all properties from the record - humanAge: 15 + record.age * 5, // silly calculation of human age equivalent - }; - } -} -``` - -Here we exported the `DogWithHumanAge` class (exported with the same name), which directly maps to the endpoint path. Therefore, now we have a `/DogWithHumanAge/` endpoint based on this class, just like the direct table interface that was exported as `/Dog/`, but the new endpoint will return objects with the computed `humanAge` property. Resource classes provide getters/setters for every defined attribute so that accessing instance properties like `age`, will get the value from the underlying record. The instance holds information about the primary key of the record so updates and actions can be applied to the correct record. And changing or assigning new properties can be saved or included in the resource as it returned and serialized. The `return super.get(query)` call at the end allows for any query parameters to be applied to the resource, such as selecting individual properties (with a [`select` query parameter](../rest.md#select-properties)). - -Often we may want to incorporate data from other tables or data sources in your data models. Next, let's say that we want a `Breed` table that holds detailed information about each breed, and we want to add that information to the returned dog object. We might define the Breed table as (back in schema.graphql): - -```graphql -type Breed @table { - name: String @primaryKey - description: String @indexed - lifespan: Int - averageWeight: Float -} -``` - -We use the new table's (static) `get()` method to retrieve a breed by id. Harper will maintain the current context, ensuring that we are accessing the data atomically, in a consistent snapshot across tables. This provides: - -1. Automatic tracking of most recently updated timestamps across resources for caching purposes -2. Sharing of contextual metadata (like user who requested the data) -3. Transactional atomicity for any writes (not needed in this get operation, but important for other operations) - -The resource methods are automatically wrapped with a transaction and will automatically commit the changes when the method finishes. This allows us to fully utilize multiple resources in our current transaction. With our own snapshot of the database for the Dog and Breed table we can then access data like this: - -```javascript -//resource.js: -const { Dog, Breed } = tables; // get the Breed table too -export class DogWithBreed extends Dog { - static loadAsInstance = false; - async get(target) { - // get the Dog record - const record = await super.get(target); - // get the Breed record - let breedDescription = await Breed.get(record.breed); - return { - ...record, - breedDescription, - }; - } -} -``` - -The call to `Breed.get` will return an instance of the `Breed` resource class, which holds the record specified the provided id/primary key. Like the `Dog` instance, we can access or change properties on the Breed instance. - -Here we have focused on customizing how we retrieve data, but we may also want to define custom actions for writing data. While HTTP PUT method has a specific semantic definition (replace current record), a common method for custom actions is through the HTTP POST method. the POST method has much more open-ended semantics and is a good choice for custom actions. POST requests are handled by our Resource's post() method. Let's say that we want to define a POST handler that adds a new trick to the `tricks` array to a specific instance. We might do it like this, and specify an action to be able to differentiate actions: - -```javascript -export class CustomDog extends Dog { - static loadAsInstance = false; - async post(target, data) { - if (data.action === 'add-trick') { - const record = this.update(target); - record.tricks.push(data.trick); - } - } -} -``` - -And a POST request to /CustomDog/ would call this `post` method. The Resource class then automatically tracks changes you make to your resource instances and saves those changes when this transaction is committed (again these methods are automatically wrapped in a transaction and committed once the request handler is finished). So when you push data on to the `tricks` array, this will be recorded and persisted when this method finishes and before sending a response to the client. - -The `post` method automatically marks the current instance as being update. However, you can also explicitly specify that you are changing a resource by calling the `update()` method. If you want to modify a resource instance that you retrieved through a `get()` call (like `Breed.get()` call above), you can call its `update()` method to ensure changes are saved (and will be committed in the current transaction). - -We can also define custom authorization capabilities. For example, we might want to specify that only the owner of a dog can make updates to a dog. We could add logic to our `post()` method or `put()` method to do this. For example, we might do this: - -```javascript -export class CustomDog extends Dog { - static loadAsInstance = false; - async post(target, data) { - if (data.action === 'add-trick') { - const context = this.getContext(); - // if we want to skip the default permission checks, we can turn off checkPermissions: - target.checkPermissions = false; - const record = this.update(target); - // and do our own/custom permission check: - if (record.owner !== context.user?.username) { - throw new Error('Can not update this record'); - } - record.tricks.push(data.trick); - } - } -} -``` - -Any methods that are not defined will fall back to Harper's default authorization procedure based on users' roles. If you are using/extending a table, this is based on Harper's [role based access](../security/users-and-roles.md). If you are extending the base `Resource` class, the default access requires super user permission. - -You can also use the `default` export to define the root path resource handler. For example: - -```javascript -// resources.json -export default class CustomDog extends Dog { - ... -``` - -This will allow requests to url like / to be directly resolved to this resource. - -## Define Custom Data Sources - -We can also directly implement the Resource class and use it to create new data sources from scratch that can be used as endpoints. Custom resources can also be used as caching sources. Let's say that we defined a `Breed` table that was a cache of information about breeds from another source. We could implement a caching table like: - -```javascript -const { Breed } = tables; // our Breed table -class BreedSource extends Resource { - // define a data source - async get(target) { - return (await fetch(`http://best-dog-site.com/${target}`)).json(); - } -} -// define that our breed table is a cache of data from the data source above, with a specified expiration -Breed.sourcedFrom(BreedSource, { expiration: 3600 }); -``` - -The [caching documentation](caching.md) provides much more information on how to use Harper's powerful caching capabilities and set up data sources. - -Harper provides a powerful JavaScript API with significant capabilities that go well beyond a "getting started" guide. See our documentation for more information on using the [`globals`](../../technical-details/reference/globals.md) and the [Resource interface](../../technical-details/reference/resources/README.md). - -## Configuring Applications/Components - -For complete information of configuring applications, refer to the [Component Configuration](../../technical-details/reference/components/configuration.md) reference page. - -## Define Fastify Routes - -Exporting resource will generate full RESTful endpoints. But, you may prefer to define endpoints through a framework. Harper includes a resource plugin for defining routes with the Fastify web framework. Fastify is a full-featured framework with many plugins, that provides sophisticated route definition capabilities. - -By default, applications are configured to load any modules in the `routes` directory (matching `routes/*.js`) with Fastify's autoloader, which will allow these modules to export a function to define fastify routes. See the [defining routes documentation](define-routes.md) for more information on how to create Fastify routes. - -However, Fastify is not as fast as Harper's RESTful endpoints (about 10%-20% slower/more-overhead), nor does it automate the generation of a full uniform interface with correct RESTful header interactions (for caching control), so generally the Harper's REST interface is recommended for optimum performance and ease of use. - -## Restarting Your Instance - -Generally, Harper will auto-detect when files change and auto-restart the appropriate threads. However, if there are changes that aren't detected, you may manually restart, with the `restart_service` operation: - -```json -{ - "operation": "restart_service", - "service": "http_workers" -} -``` diff --git a/docs/developers/applications/caching.md b/docs/developers/applications/caching.md index 2c733583..2a56b70b 100644 --- a/docs/developers/applications/caching.md +++ b/docs/developers/applications/caching.md @@ -1,10 +1,14 @@ +--- +title: Caching +--- + # Caching Harper has integrated support for caching data from external sources. With built-in caching capabilities and distributed high-performance low-latency responsiveness, Harper makes an ideal data caching server. Harper can store cached data in standard tables, as queryable structured data, so data can easily be consumed in one format (for example JSON or CSV) and provided to end users in different formats with different selected properties (for example MessagePack, with a subset of selected properties), or even with customized querying capabilities. Harper also manages and provides timestamps/tags for proper caching control, facilitating further downstreaming caching. With these combined capabilities, Harper is an extremely fast, interoperable, flexible, and customizable caching server. ## Configuring Caching -To set up caching, first you will need to define a table that you will use as your cache (to store the cached data). You can review the [introduction to building applications](./) for more information on setting up the application (and the [defining schemas documentation](defining-schemas.md)), but once you have defined an application folder with a schema, you can add a table for caching to your `schema.graphql`: +To set up caching, first you will need to define a table that you will use as your cache (to store the cached data). You can review the [introduction to building applications](./) for more information on setting up the application (and the [defining schemas documentation](defining-schemas)), but once you have defined an application folder with a schema, you can add a table for caching to your `schema.graphql`: ```graphql type MyCache @table(expiration: 3600) @export { @@ -29,7 +33,7 @@ Next, you need to define the source for your cache. External data sources could ```javascript class ThirdPartyAPI extends Resource { async get() { - return (await fetch(`http://some-api.com/${this.getId()}`)).json(); + return (await fetch(`http:/some-api.com/${this.getId()}`)).json(); } } ``` @@ -66,7 +70,7 @@ In the example above, we simply retrieved data to fulfill a cache request. We ma ```javascript class ThirdPartyAPI extends Resource { async get() { - let response = await fetch(`http://some-api.com/${this.getId()}`); + let response = await fetch(`http:/some-api.com/${this.getId()}`); this.getContext().lastModified = response.headers.get('Last-Modified'); return response.json(); } @@ -82,14 +86,14 @@ class ThirdPartyAPI extends Resource { async get() { const context = this.getContext(); let headers = new Headers(); - if (context.replacingVersion) // this is the existing cached record + if (context.replacingVersion) / this is the existing cached record headers.set('If-Modified-Since', new Date(context.replacingVersion).toUTCString()); - let response = await fetch(`http://some-api.com/${this.getId()}`, { headers }); + let response = await fetch(`http:/some-api.com/${this.getId()}`, { headers }); let cacheInfo = response.headers.get('Cache-Control'); let maxAge = cacheInfo?.match(/max-age=(\d)/)?.[1]; - if (maxAge) // we can set a specific expiration time by setting context.expiresAt - context.expiresAt = Date.now() + maxAge * 1000; // convert from seconds to milliseconds and add to current time - // we can just revalidate and return the record if the origin has confirmed that it has the same version: + if (maxAge) / we can set a specific expiration time by setting context.expiresAt + context.expiresAt = Date.now() + maxAge * 1000; / convert from seconds to milliseconds and add to current time + / we can just revalidate and return the record if the origin has confirmed that it has the same version: if (response.status === 304) return context.replacingRecord; ... ``` @@ -107,7 +111,7 @@ const { MyTable } = tables; export class MyTableEndpoint extends MyTable { async post(data) { if (data.invalidate) - // use this flag as a marker + / use this flag as a marker this.invalidate(); } } @@ -122,16 +126,16 @@ We can provide more control of an active cache with subscriptions. If there is a ```javascript class ThirdPartyAPI extends Resource { async *subscribe() { - setInterval(() => { // every second retrieve more data - // get the next data change event from the source - let update = (await fetch(`http://some-api.com/latest-update`)).json(); - const event = { // define the change event (which will update the cache) - type: 'put', // this would indicate that the event includes the new data value - id: // the primary key of the record that updated - value: // the new value of the record that updated - timestamp: // the timestamp of when the data change occurred + setInterval(() => { / every second retrieve more data + / get the next data change event from the source + let update = (await fetch(`http:/some-api.com/latest-update`)).json(); + const event = { / define the change event (which will update the cache) + type: 'put', / this would indicate that the event includes the new data value + id: / the primary key of the record that updated + value: / the new value of the record that updated + timestamp: / the timestamp of when the data change occurred }; - yield event; // this returns this event, notifying the cache of the change + yield event; / this returns this event, notifying the cache of the change }, 1000); } async get() { @@ -143,7 +147,7 @@ Notification events should always include an `id` property to indicate the prima - `put` - This indicates that the record has been updated and provides the new value of the record. - `invalidate` - Alternately, you can notify with an event type of `invalidate` to indicate that the data has changed, but without the overhead of actually sending the data (the `value` property is not needed), so the data only needs to be sent if and when the data is requested through the cache. An `invalidate` will evict the entry and update the timestamp to indicate that there is new data that should be requested (if needed). - `delete` - This indicates that the record has been deleted. -- `message` - This indicates a message is being passed through the record. The record value has not changed, but this is used for [publish/subscribe messaging](../real-time.md). +- `message` - This indicates a message is being passed through the record. The record value has not changed, but this is used for [publish/subscribe messaging](../real-time). - `transaction` - This indicates that there are multiple writes that should be treated as a single atomic transaction. These writes should be included as an array of data notification events in the `writes` property. And the following properties can be defined on event objects: @@ -162,7 +166,7 @@ By default, Harper will only run the subscribe method on one thread. Harper is m ```javascript class ThirdPartyAPI extends Resource { static subscribeOnThisThread(threadIndex) { - return threadIndex < 2; // run on two threads (the first two threads) + return threadIndex < 2; / run on two threads (the first two threads) } async *subscribe() { .... @@ -184,7 +188,7 @@ class ThirdPartyAPI extends Resource { ## Downstream Caching -It is highly recommended that you utilize the [REST interface](../rest.md) for accessing caching tables, as it facilitates downstreaming caching for clients. Timestamps are recorded with all cached entries. Timestamps are then used for incoming [REST requests to specify the `ETag` in the response](../rest.md#cachingconditional-requests). Clients can cache data themselves and send requests using the `If-None-Match` header to conditionally get a 304 and preserve their cached data based on the timestamp/`ETag` of the entries that are cached in Harper. Caching tables also have [subscription capabilities](caching.md#subscribing-to-caching-tables), which means that downstream caches can be fully "layered" on top of Harper, both as passive or active caches. +It is highly recommended that you utilize the [REST interface](../rest) for accessing caching tables, as it facilitates downstreaming caching for clients. Timestamps are recorded with all cached entries. Timestamps are then used for incoming [REST requests to specify the `ETag` in the response](../rest#cachingconditional-requests). Clients can cache data themselves and send requests using the `If-None-Match` header to conditionally get a 304 and preserve their cached data based on the timestamp/`ETag` of the entries that are cached in Harper. Caching tables also have [subscription capabilities](caching#subscribing-to-caching-tables), which means that downstream caches can be fully "layered" on top of Harper, both as passive or active caches. ## Write-Through Caching @@ -193,13 +197,13 @@ The cache we have defined so far only has data flowing from the data source to t ```javascript class ThirdPartyAPI extends Resource { async put(data) { - await fetch(`http://some-api.com/${this.getId()}`, { + await fetch(`http:/some-api.com/${this.getId()}`, { method: 'PUT', body: JSON.stringify(data) }); } async delete() { - await fetch(`http://some-api.com/${this.getId()}`, { + await fetch(`http:/some-api.com/${this.getId()}`, { method: 'DELETE', }); } @@ -215,9 +219,9 @@ When you are using a caching table, it is important to remember that any resourc ```javascript class MyCache extends tables.MyCache { async post(data) { - // if the data is not cached locally, retrieves from source: + / if the data is not cached locally, retrieves from source: await this.ensuredLoaded(); - // now we can be sure that the data is loaded, and can access properties + / now we can be sure that the data is loaded, and can access properties this.quantity = this.quantity - data.purchases; } } @@ -235,9 +239,9 @@ With our passive update examples, we have provided a data source handler with a const { Post, Comment } = tables; class BlogSource extends Resource { get() { - const post = await (await fetch(`http://my-blog-server/${this.getId()}`).json()); + const post = await (await fetch(`http:/my-blog-server/${this.getId()}`).json()); for (let comment of post.comments) { - await Comment.put(comment, this); // save this comment as part of our current context and transaction + await Comment.put(comment, this); / save this comment as part of our current context and transaction } return post; } diff --git a/docs/developers/applications/data-loader.md b/docs/developers/applications/data-loader.md index 9773843a..488b7b19 100644 --- a/docs/developers/applications/data-loader.md +++ b/docs/developers/applications/data-loader.md @@ -1,3 +1,7 @@ +--- +title: Data Loader +--- + # Data Loader The Data Loader is a built-in component that provides a reliable mechanism for loading data from JSON or YAML files into Harper tables as part of component deployment. This feature is particularly useful for ensuring specific records exist in your database when deploying components, such as seed data, configuration records, or initial application data. @@ -11,7 +15,7 @@ dataLoader: files: 'data/*.json' ``` -The Data Loader is an [Extension](../components/reference.md#extensions) and supports the standard `files` configuration option. +The Data Loader is an [Extension](../../technical-details/reference/components#extensions) and supports the standard `files` configuration option. ## Data File Format @@ -104,30 +108,30 @@ dataLoader: When Harper starts up with a component that includes the Data Loader: 1. The Data Loader reads all specified data files (JSON or YAML) -2. For each file, it validates that a single table is specified -3. Records are inserted or updated based on timestamp comparison: +1. For each file, it validates that a single table is specified +1. Records are inserted or updated based on timestamp comparison: - New records are inserted if they don't exist - Existing records are updated only if the data file's modification time is newer than the record's updated time - This ensures data files can be safely reloaded without overwriting newer changes -4. If records with the same primary key already exist, updates occur only when the file is newer +1. If records with the same primary key already exist, updates occur only when the file is newer -Note: While the Data Loader can create tables automatically by inferring the schema from the provided records, it's recommended to define your table schemas explicitly using the [graphqlSchema](../applications/defining-schemas.md) component for better control and type safety. +Note: While the Data Loader can create tables automatically by inferring the schema from the provided records, it's recommended to define your table schemas explicitly using the [graphqlSchema](../applications/defining-schemas) component for better control and type safety. ## Best Practices -1. **Define Schemas First**: While the Data Loader can infer schemas, it's strongly recommended to define your table schemas and relations explicitly using the [graphqlSchema](../applications/defining-schemas.md) component before loading data. This ensures proper data types, constraints, and relationships between tables. +1. **Define Schemas First**: While the Data Loader can infer schemas, it's strongly recommended to define your table schemas and relations explicitly using the [graphqlSchema](../applications/defining-schemas) component before loading data. This ensures proper data types, constraints, and relationships between tables. -2. **One Table Per File**: Remember that each data file can only load records into a single table. Organize your files accordingly. +1. **One Table Per File**: Remember that each data file can only load records into a single table. Organize your files accordingly. -3. **Idempotency**: Design your data files to be idempotent - they should be safe to load multiple times without creating duplicate or conflicting data. +1. **Idempotency**: Design your data files to be idempotent - they should be safe to load multiple times without creating duplicate or conflicting data. -4. **Version Control**: Include your data files in version control to ensure consistency across deployments. +1. **Version Control**: Include your data files in version control to ensure consistency across deployments. -5. **Environment-Specific Data**: Consider using different data files for different environments (development, staging, production). +1. **Environment-Specific Data**: Consider using different data files for different environments (development, staging, production). -6. **Data Validation**: Ensure your data files are valid JSON or YAML and match your table schemas before deployment. +1. **Data Validation**: Ensure your data files are valid JSON or YAML and match your table schemas before deployment. -7. **Sensitive Data**: Avoid including sensitive data like passwords or API keys directly in data files. Use environment variables or secure configuration management instead. +1. **Sensitive Data**: Avoid including sensitive data like passwords or API keys directly in data files. Use environment variables or secure configuration management instead. ## Example Component Structure @@ -167,6 +171,6 @@ rest: true ## Related Documentation -- [Built-In Components](../../technical-details/reference/components/built-in-extensions.md) -- [Extensions](../../technical-details/reference/components/extensions.md) -- [Bulk Operations](../operations-api/bulk-operations.md) - For loading data via the Operations API \ No newline at end of file +- [Built-In Components](../../technical-details/reference/components/built-in-extensions) +- [Extensions](../../technical-details/reference/components/extensions) +- [Bulk Operations](../operations-api/bulk-operations) - For loading data via the Operations API \ No newline at end of file diff --git a/docs/developers/applications/debugging.md b/docs/developers/applications/debugging.md index d407e0a3..bd9d2622 100644 --- a/docs/developers/applications/debugging.md +++ b/docs/developers/applications/debugging.md @@ -1,3 +1,7 @@ +--- +title: Debugging Applications +--- + # Debugging Applications Harper components and applications run inside the Harper process, which is a standard Node.js process that can be debugged with standard JavaScript development tools like Chrome's devtools, VSCode, and WebStorm. Debugging can be performed by launching the Harper entry script with your IDE, or you can start Harper in dev mode and connect your debugger to the running process (defaults to standard 9229 port): @@ -10,7 +14,7 @@ harperdb dev /path/to/app Once you have connected a debugger, you may set breakpoints in your application and fully debug it. Note that when using the `dev` command from the CLI, this will run Harper in single-threaded mode. This would not be appropriate for production use, but makes it easier to debug applications. -For local debugging and development, it is recommended that you use standard console log statements for logging. For production use, you may want to use Harper's logging facilities, so you aren't logging to the console. The logging functions are available on the global `logger` variable that is provided by Harper. This logger can be used to output messages directly to the Harper log using standardized logging level functions, described below. The log level can be set in the [Harper Configuration File](../../deployments/configuration.md). +For local debugging and development, it is recommended that you use standard console log statements for logging. For production use, you may want to use Harper's logging facilities, so you aren't logging to the console. The logging functions are available on the global `logger` variable that is provided by Harper. This logger can be used to output messages directly to the Harper log using standardized logging level functions, described below. The log level can be set in the [Harper Configuration File](../../deployments/configuration). Harper Logger Functions @@ -32,4 +36,4 @@ If you want to ensure a message is logged, you can use `notify` as these message ## Viewing the Log -The Harper Log can be found in your local `~/hdb/log/hdb.log` file (or in the log folder if you have specified an alternate hdb root), or in the [Studio Status page](../../administration/harper-studio/instance-metrics.md). Additionally, you can use the [`read_log` operation](../operations-api/logs.md) to query the Harper log. +The Harper Log can be found in your local `~/hdb/log/hdb.log` file (or in the log folder if you have specified an alternate hdb root), or in the Studio Status page. Additionally, you can use the [`read_log` operation](../operations-api/logs) to query the Harper log. diff --git a/docs/developers/applications/define-routes.md b/docs/developers/applications/define-routes.md index 454755b6..3438f300 100644 --- a/docs/developers/applications/define-routes.md +++ b/docs/developers/applications/define-routes.md @@ -1,13 +1,17 @@ +--- +title: Define Fastify Routes +--- + # Define Fastify Routes -Harper’s applications provide an extension for loading [Fastify](https://www.fastify.io/) routes as a way to handle endpoints. While we generally recommend building your endpoints/APIs with Harper's [REST interface](../rest.md) for better performance and standards compliance, Fastify's route can provide an extensive API for highly customized path handling. Below is a very simple example of a route declaration. +Harper’s applications provide an extension for loading [Fastify](https:/www.fastify.io/) routes as a way to handle endpoints. While we generally recommend building your endpoints/APIs with Harper's [REST interface](../rest) for better performance and standards compliance, Fastify's route can provide an extensive API for highly customized path handling. Below is a very simple example of a route declaration. -The fastify route handler can be configured in your application's config.yaml (this is the default config if you used the [application template](https://github.com/HarperDB/application-template)): +The fastify route handler can be configured in your application's config.yaml (this is the default config if you used the [application template](https:/github.com/HarperDB/application-template)): ```yaml fastifyRoutes: # This loads files that define fastify routes using fastify's auto-loader files: routes/*.js # specify the location of route definition modules - path: . # relative to the app-name, like http://server/app-name/route-name + path: . # relative to the app-name, like http:/server/app-name/route-name ``` By default, route URLs are configured to be: @@ -16,7 +20,7 @@ By default, route URLs are configured to be: However, you can specify the path to be `/` if you wish to have your routes handling the root path of incoming URLs. -- The route below, using the default config, within the **dogs** project, with a route of **breeds** would be available at **http://localhost:9926/dogs/breeds**. +- The route below, using the default config, within the **dogs** project, with a route of **breeds** would be available at **http:/localhost:9926/dogs/breeds**. In effect, this route is just a pass-through to Harper. The same result could have been achieved by hitting the core Harper API, since it uses **hdbCore.preValidation** and **hdbCore.request**, which are defined in the “helper methods” section, below. @@ -81,7 +85,7 @@ export default async (server, { hdbCore, logger }) => { }; ``` -Notice we imported customValidation from the **helpers** directory. To include a helper, and to see the actual code within customValidation, see [Helper Methods](define-routes.md#helper-methods). +Notice we imported customValidation from the **helpers** directory. To include a helper, and to see the actual code within customValidation, see [Helper Methods](define-routes#helper-methods). ## Helper Methods diff --git a/docs/developers/applications/defining-roles.md b/docs/developers/applications/defining-roles.md index 075c02eb..55dd5885 100644 --- a/docs/developers/applications/defining-roles.md +++ b/docs/developers/applications/defining-roles.md @@ -1,4 +1,8 @@ -In addition to [defining a database schema](./defining-schemas.md), you can also define roles in your application. Roles are a way to group permissions together and assign them to users as part of Harper's [role based access control](../security/users-and-roles.md). An application component may declare roles that should exist for the application in a roles configuration file. To use this, first specify your roles config file in the `config.yaml` in your application directory: +--- +title: Defining Roles +--- + +In addition to [defining a database schema](./defining-schemas), you can also define roles in your application. Roles are a way to group permissions together and assign them to users as part of Harper's [role based access control](../security/users-and-roles). An application component may declare roles that should exist for the application in a roles configuration file. To use this, first specify your roles config file in the `config.yaml` in your application directory: ```yaml roles: diff --git a/docs/developers/applications/defining-schemas.md b/docs/developers/applications/defining-schemas.md index 1c9d35b8..5337603b 100644 --- a/docs/developers/applications/defining-schemas.md +++ b/docs/developers/applications/defining-schemas.md @@ -1,8 +1,12 @@ +--- +title: Defining Schemas +--- + # Defining Schemas Schemas define tables and their attributes. Schemas can be declaratively defined in Harper's using GraphQL schema definitions. Schemas definitions can be used to ensure that tables exist (that are required for applications), and have the appropriate attributes. Schemas can define the primary key, data types for attributes, if they are required, and specify which attributes should be indexed. The [introduction to applications provides](./) a helpful introduction to how to use schemas as part of database application development. -Schemas can be used to define the expected structure of data, but are also highly flexible and support heterogeneous data structures and by default allows data to include additional properties. The standard types for GraphQL schemas are specified in the [GraphQL schema documentation](https://graphql.org/learn/schema/). +Schemas can be used to define the expected structure of data, but are also highly flexible and support heterogeneous data structures and by default allows data to include additional properties. The standard types for GraphQL schemas are specified in the [GraphQL schema documentation](https:/graphql.org/learn/schema/). An example schema that defines a couple tables might look like: @@ -39,7 +43,7 @@ By default the table name is inherited from the type name (in this case the tabl - `@table(table: "table_name")` - This allows you to explicitly specify the table name. - `@table(database: "database_name")` - This allows you to specify which database the table belongs to. This defaults to the "data" database. - `@table(expiration: 3600)` - Sets an expiration time on entries in the table before they are automatically cleared (primarily useful for caching tables). This is specified in seconds. -- `@table(audit: true)` - This enables the audit log for the table so that a history of record changes are recorded. This defaults to [configuration file's setting for `auditLog`](../../deployments/configuration.md#logging). +- `@table(audit: true)` - This enables the audit log for the table so that a history of record changes are recorded. This defaults to [configuration file's setting for `auditLog`](../../deployments/configuration#logging). Database naming: the default "data" database is generally a good default choice for tables in applications that will not be reused in other applications (and don't need to worry about staying in a separate namespace). Application with many tables may wish to organize the tables into separate databases (but remember that transactions do not preserve atomicity across different databases, only across tables in the same database). For components that are designed for re-use, it is recommended that you use a database name that is specific to the component (e.g. "my-component-data") to avoid name collisions with other components. @@ -74,7 +78,7 @@ type Brand @table @export { } ``` -Once this is defined we can use the `brand` attribute as a [property in our product instances](../../technical-details/reference/resources/README.md) and allow for querying by `brand` and selecting brand attributes as returned properties in [query results](../rest.md). +Once this is defined we can use the `brand` attribute as a [property in our product instances](../../technical-details/reference/resources/) and allow for querying by `brand` and selecting brand attributes as returned properties in [query results](../rest). Again, the foreign key may be a multi-valued array (array of keys referencing the target table records). For example, if we had a list of features that references a Feature table: @@ -159,7 +163,7 @@ type Product @table { } ``` -For more in-depth information on computed properties, visit our blog [here](https://www.harpersystems.dev/development/tutorials/how-to-create-custom-indexes-with-computed-properties) +For more in-depth information on computed properties, visit our blog [here](https:/www.harpersystems.dev/development/tutorials/how-to-create-custom-indexes-with-computed-properties) ### Field Directives @@ -191,7 +195,7 @@ HNSW indexing finds the nearest neighbors to a search vector. To use this, you c ```javascript let results = Product.search({ sort: { attribute: 'textEmbeddings', target: searchVector }, - limit: 5, // get the five nearest neighbors + limit: 5, / get the five nearest neighbors }); ``` @@ -201,7 +205,7 @@ This can be used in combination with other conditions as well, for example: let results = Product.search({ conditions: [{ attribute: 'price', comparator: 'lt', value: 50 }], sort: { attribute: 'textEmbeddings', target: searchVector }, - limit: 5, // get the five nearest neighbors + limit: 5, / get the five nearest neighbors }); ``` @@ -237,7 +241,7 @@ The `@sealed` directive specifies that no additional properties should be allowe ### Defined vs Dynamic Schemas -If you do not define a schema for a table and create a table through the operations API (without specifying attributes) or studio, such a table will not have a defined schema and will follow the behavior of a ["dynamic-schema" table](../../technical-details/reference/dynamic-schema.md). It is generally best-practice to define schemas for your tables to ensure predictable, consistent structures with data integrity. +If you do not define a schema for a table and create a table through the operations API (without specifying attributes) or studio, such a table will not have a defined schema and will follow the behavior of a ["dynamic-schema" table](../../technical-details/reference/dynamic-schema). It is generally best-practice to define schemas for your tables to ensure predictable, consistent structures with data integrity. ### Field Types @@ -246,14 +250,14 @@ Harper supports the following field types in addition to user defined (object) t - `String`: String/text - `Int`: A 32-bit signed integer (from -2147483648 to 2147483647) - `Long`: A 54-bit signed integer (from -9007199254740992 to 9007199254740992) -- `Float`: Any number (any number that can be represented as a [64-bit double precision floating point number](https://en.wikipedia.org/wiki/Double-precision_floating-point_format). Note that all numbers are stored in the most compact representation available) +- `Float`: Any number (any number that can be represented as a [64-bit double precision floating point number](https:/en.wikipedia.org/wiki/Double-precision_floating-point_format). Note that all numbers are stored in the most compact representation available) - `BigInt`: Any integer (negative or positive) with less than 300 digits (Note that `BigInt` is a distinct and separate type from standard numbers in JavaScript, so custom code should handle this type appropriately) - `Boolean`: true or false - `ID`: A string (but indicates it is not intended to be human readable) - `Any`: Any primitive, object, or array is allowed - `Date`: A Date object - `Bytes`: Binary data as a Buffer or Uint8Array -- `Blob`: Binary data as a [Blob](../../technical-details/reference/blob.md), designed for large blocks of data that can be streamed. It is recommend that you use this for binary data that will typically be larger than 20KB. +- `Blob`: Binary data as a [Blob](../../technical-details/reference/blob), designed for large blocks of data that can be streamed. It is recommend that you use this for binary data that will typically be larger than 20KB. #### Renaming Tables @@ -261,7 +265,7 @@ It is important to note that Harper does not currently support renaming tables. ### OpenAPI Specification -_The_ [_OpenAPI Specification_](https://spec.openapis.org/oas/v3.1.0) _defines a standard, programming language-agnostic interface description for HTTP APIs, which allows both humans and computers to discover and understand the capabilities of a service without requiring access to source code, additional documentation, or inspection of network traffic._ +_The_ [_OpenAPI Specification_](https:/spec.openapis.org/oas/v3.1.0) _defines a standard, programming language-agnostic interface description for HTTP APIs, which allows both humans and computers to discover and understand the capabilities of a service without requiring access to source code, additional documentation, or inspection of network traffic._ If a set of endpoints are configured through a Harper GraphQL schema, those endpoints can be described by using a default REST endpoint called `GET /openapi`. diff --git a/docs/developers/applications/example-projects.md b/docs/developers/applications/example-projects.md index 7e99e459..51231c31 100644 --- a/docs/developers/applications/example-projects.md +++ b/docs/developers/applications/example-projects.md @@ -1,33 +1,37 @@ +--- +title: Example Projects +--- + # Example Projects **Library of example Harper applications and components:** -- [Authorization in Harper using Okta Customer Identity Cloud](https://www.harperdb.io/post/authorization-in-harperdb-using-okta-customer-identity-cloud), by Yitaek Hwang +- [Authorization in Harper using Okta Customer Identity Cloud](https:/www.harperdb.io/post/authorization-in-harperdb-using-okta-customer-identity-cloud), by Yitaek Hwang -- [How to Speed Up your Applications by Caching at the Edge with Harper](https://dev.to/doabledanny/how-to-speed-up-your-applications-by-caching-at-the-edge-with-harperdb-3o2l), by Danny Adams +- [How to Speed Up your Applications by Caching at the Edge with Harper](https:/dev.to/doabledanny/how-to-speed-up-your-applications-by-caching-at-the-edge-with-harperdb-3o2l), by Danny Adams -- [OAuth Authentication in Harper using Auth0 & Node.js](https://www.harperdb.io/post/oauth-authentication-in-harperdb-using-auth0-and-node-js), by Lucas Santos +- [OAuth Authentication in Harper using Auth0 & Node.js](https:/www.harperdb.io/post/oauth-authentication-in-harperdb-using-auth0-and-node-js), by Lucas Santos -- [How To Create a CRUD API with Next.js & Harper Custom Functions](https://www.harperdb.io/post/create-a-crud-api-w-next-js-harperdb), by Colby Fayock +- [How To Create a CRUD API with Next.js & Harper Custom Functions](https:/www.harperdb.io/post/create-a-crud-api-w-next-js-harperdb), by Colby Fayock -- [Build a Dynamic REST API with Custom Functions](https://harperdb.io/blog/build-a-dynamic-rest-api-with-custom-functions/), by Terra Roush +- [Build a Dynamic REST API with Custom Functions](https:/harperdb.io/blog/build-a-dynamic-rest-api-with-custom-functions/), by Terra Roush -- [How to use Harper Custom Functions to Build your Entire Backend](https://dev.to/andrewbaisden/how-to-use-harperdb-custom-functions-to-build-your-entire-backend-a2m), by Andrew Baisden +- [How to use Harper Custom Functions to Build your Entire Backend](https:/dev.to/andrewbaisden/how-to-use-harperdb-custom-functions-to-build-your-entire-backend-a2m), by Andrew Baisden -- [Using TensorFlowJS & Harper Custom Functions for Machine Learning](https://harperdb.io/blog/using-tensorflowjs-harperdb-for-machine-learning/), by Kevin Ashcraft +- [Using TensorFlowJS & Harper Custom Functions for Machine Learning](https:/harperdb.io/blog/using-tensorflowjs-harperdb-for-machine-learning/), by Kevin Ashcraft -- [Build & Deploy a Fitness App with Python & Harper](https://www.youtube.com/watch?v=KMkmA4i2FQc), by Patrick Löber +- [Build & Deploy a Fitness App with Python & Harper](https:/www.youtube.com/watch?v=KMkmA4i2FQc), by Patrick Löber -- [Create a Discord Slash Bot using Harper Custom Functions](https://geekysrm.hashnode.dev/discord-slash-bot-with-harperdb-custom-functions), by Soumya Ranjan Mohanty +- [Create a Discord Slash Bot using Harper Custom Functions](https:/geekysrm.hashnode.dev/discord-slash-bot-with-harperdb-custom-functions), by Soumya Ranjan Mohanty -- [How I used Harper Custom Functions to Build a Web App for my Newsletter](https://blog.hrithwik.me/how-i-used-harperdb-custom-functions-to-build-a-web-app-for-my-newsletter), by Hrithwik Bharadwaj +- [How I used Harper Custom Functions to Build a Web App for my Newsletter](https:/blog.hrithwik.me/how-i-used-harperdb-custom-functions-to-build-a-web-app-for-my-newsletter), by Hrithwik Bharadwaj -- [How I used Harper Custom Functions and Recharts to create Dashboard](https://blog.greenroots.info/how-to-create-dashboard-with-harperdb-custom-functions-and-recharts), by Tapas Adhikary +- [How I used Harper Custom Functions and Recharts to create Dashboard](https:/blog.greenroots.info/how-to-create-dashboard-with-harperdb-custom-functions-and-recharts), by Tapas Adhikary -- [How To Use Harper Custom Functions With Your React App](https://dev.to/tyaga001/how-to-use-harperdb-custom-functions-with-your-react-app-2c43), by Ankur Tyagi +- [How To Use Harper Custom Functions With Your React App](https:/dev.to/tyaga001/how-to-use-harperdb-custom-functions-with-your-react-app-2c43), by Ankur Tyagi -- [Build a Web App Using Harper’s Custom Functions](https://www.youtube.com/watch?v=rz6prItVJZU), livestream by Jaxon Repp +- [Build a Web App Using Harper’s Custom Functions](https:/www.youtube.com/watch?v=rz6prItVJZU), livestream by Jaxon Repp -- [How to Web Scrape Using Python, Snscrape & Custom Functions](https://hackernoon.com/how-to-web-scrape-using-python-snscrape-and-harperdb), by Davis David +- [How to Web Scrape Using Python, Snscrape & Custom Functions](https:/hackernoon.com/how-to-web-scrape-using-python-snscrape-and-harperdb), by Davis David -- [What’s the Big Deal w/ Custom Functions](https://rss.com/podcasts/harperdb-select-star/278933/), Select\* Podcast +- [What’s the Big Deal w/ Custom Functions](https:/rss.com/podcasts/harperdb-select-star/278933/), Select\* Podcast diff --git a/docs/developers/applications/index.md b/docs/developers/applications/index.md new file mode 100644 index 00000000..2549639f --- /dev/null +++ b/docs/developers/applications/index.md @@ -0,0 +1,237 @@ +--- +title: Applications +--- + +# Applications + +Harper is more than a database, it's a distributed clustering platform allowing you to package your schema, endpoints and application logic and deploy them to an entire fleet of Harper instances optimized for on-the-edge scalable data delivery. + +In this guide, we are going to explore the evermore extensible architecture that Harper provides by building a Harper application, a fundamental building-block of the Harper ecosystem. + +When working through this guide, we recommend you use the [Harper Application Template](https:/github.com/HarperDB/application-template) repo as a reference. + +Before we get started, let's clarify some terminology that is used throughout the documentation. + +**Components** are the high-level concept for modules that extend the Harper core platform adding additional functionality. The application you will build here is a component. In addition to applications, components also encompass extensions. + +> We are actively working to disambiguate the terminology. When you see "component", such as in the Operations API or CLI, it generally refers to an application. We will do our best to clarify exactly which classification of a component whenever possible. + +**Applications** are best defined as the implementation of a specific user-facing feature or functionality. Applications are built on top of extensions and can be thought of as the end product that users interact with. For example, a Next.js application that serves a web interface or an Apollo GraphQL server that provides a GraphQL API are both applications. + +**Extensions** are the building blocks of the Harper component system. Applications depend on extensions to provide the functionality the application is implementing. For example, the built-in `graphqlSchema` extension enables applications to define their databases and tables using GraphQL schemas. Furthermore, the `@harperdb/nextjs` and `@harperdb/apollo` extensions are the building blocks that provide support for building Next.js and Apollo applications. + +All together, the support for implementing a feature is the extension, and the actual implementation of the feature is the application. + +Extensions can also depend on other extensions. For example, the [`@harperdb/apollo`](https:/github.com/HarperDB/apollo) extension depends on the built-in `graphqlSchema` extension to create a cache table for Apollo queries. Applications can then use the `@harperdb/apollo` extension to implement an Apollo GraphQL backend server. + +```mermaid +flowchart TD + subgraph Applications + direction TB + NextJSApp["Next.js App"] + ApolloApp["Apollo App"] + CustomResource["Custom Resource"] + end + + subgraph Extensions + direction TB + subgraph Custom + NextjsExt["@harperdb/nextjs"] + ApolloExt["@harperdb/apollo"] + end + subgraph Built-In + GraphqlSchema["graphqlSchema"] + JsResource["jsResource"] + Rest["rest"] + end + end + + subgraph Core + direction TB + Database["database"] + FileSystem["file-system"] + Networking["networking"] + end + + NextJSApp --> NextjsExt + ApolloApp --> ApolloExt + CustomResource --> JsResource & GraphqlSchema & Rest + + NextjsExt --> Networking + NextjsExt --> FileSystem + ApolloExt --> GraphqlSchema + ApolloExt --> Networking + + GraphqlSchema --> Database + JsResource --> Database + Rest --> Networking +``` + +> As of Harper v4.6, a new, **experimental** component system has been introduced called **plugins**. Plugins are a **new iteration of the existing extension system**. They are simultaneously a simplification and an extensibility upgrade. Instead of defining multiple methods (`start` vs `startOnMainThread`, `handleFile` vs `setupFile`, `handleDirectory` vs `setupDirectory`), plugins only have to define a single `handleApplication` method. Plugins are **experimental**, and complete documentation is available on the [plugin API](../../technical-details/reference/components/plugins) page. In time we plan to deprecate the concept of extensions in favor of plugins, but for now, both are supported. + +Beyond applications and extensions, components are further classified as built-in or custom. **Built-in** components are included with Harper by default and can be directly referenced by their name. The `graphqlSchema`, `rest`, and `jsResource` extensions used in the previous application example are all examples of built-in extensions. **Custom** components must use external references, generally npm or GitHub packages, and are often included as dependencies within the `package.json` of the component. + +> Harper maintains a number of custom components that are available on `npm` and `GitHub`, such as the [`@harperdb/nextjs`](https:/github.com/HarperDB/nextjs) extension or the [`@harperdb/status-check`](https:/github.com/HarperDB/status-check) application. + +Harper does not currently include any built-in applications, making "custom applications" a bit redundant. Generally, we just say "application". However, there is a multitude of both built-in and custom extensions, and so the documentation refers to them as such. A complete list of built-in extensions is available in the [Built-In Extensions](../../technical-details/reference/components/built-in-extensions) documentation page, and the list of custom extensions and applications is available below. + +This guide is going to walk you through building a basic Harper application using a set of built-in extensions. + +> The Technical Details section of the documentation contains a [complete reference for all aspects of components](../../technical-details/reference/components), applications, extensions, and more. + +## Custom Functionality with JavaScript + +[The getting started guide](../../getting-started/first-harper-app) covers how to build an application entirely through schema configuration. However, if your application requires more custom functionality, you will probably want to employ your own JavaScript modules to implement more specific features and interactions. This gives you tremendous flexibility and control over how data is accessed and modified in Harper. Let's take a look at how we can use JavaScript to extend and define "resources" for custom functionality. Let's add a property to the dog records when they are returned, that includes their age in human years. In Harper, data is accessed through our [Resource API](../../technical-details/reference/resources/), a standard interface to access data sources, tables, and make them available to endpoints. Database tables are `Resource` classes, and so extending the function of a table is as simple as extending their class. + +To define custom (JavaScript) resources as endpoints, we need to create a `resources.js` module (this goes in the root of your application folder). And then endpoints can be defined with Resource classes that `export`ed. This can be done in addition to, or in lieu of the `@export`ed types in the schema.graphql. If you are exporting and extending a table you defined in the schema make sure you remove the `@export` from the schema so that don't export the original table or resource to the same endpoint/path you are exporting with a class. Resource classes have methods that correspond to standard HTTP/REST methods, like `get`, `post`, `patch`, and `put` to implement specific handling for any of these methods (for tables they all have default implementations). To do this, we get the `Dog` class from the defined tables, extend it, and export it: + +```javascript +/ resources.js: +const { Dog } = tables; / get the Dog table from the Harper provided set of tables (in the default database) + +export class DogWithHumanAge extends Dog { + static loadAsInstance = false; + async get(target) { + const record = await super.get(target); + return { + ...record, / include all properties from the record + humanAge: 15 + record.age * 5, / silly calculation of human age equivalent + }; + } +} +``` + +Here we exported the `DogWithHumanAge` class (exported with the same name), which directly maps to the endpoint path. Therefore, now we have a `/DogWithHumanAge/` endpoint based on this class, just like the direct table interface that was exported as `/Dog/`, but the new endpoint will return objects with the computed `humanAge` property. Resource classes provide getters/setters for every defined attribute so that accessing instance properties like `age`, will get the value from the underlying record. The instance holds information about the primary key of the record so updates and actions can be applied to the correct record. And changing or assigning new properties can be saved or included in the resource as it returned and serialized. The `return super.get(query)` call at the end allows for any query parameters to be applied to the resource, such as selecting individual properties (with a [`select` query parameter](../rest#select-properties)). + +Often we may want to incorporate data from other tables or data sources in your data models. Next, let's say that we want a `Breed` table that holds detailed information about each breed, and we want to add that information to the returned dog object. We might define the Breed table as (back in schema.graphql): + +```graphql +type Breed @table { + name: String @primaryKey + description: String @indexed + lifespan: Int + averageWeight: Float +} +``` + +We use the new table's (static) `get()` method to retrieve a breed by id. Harper will maintain the current context, ensuring that we are accessing the data atomically, in a consistent snapshot across tables. This provides: + +1. Automatic tracking of most recently updated timestamps across resources for caching purposes +1. Sharing of contextual metadata (like user who requested the data) +1. Transactional atomicity for any writes (not needed in this get operation, but important for other operations) + +The resource methods are automatically wrapped with a transaction and will automatically commit the changes when the method finishes. This allows us to fully utilize multiple resources in our current transaction. With our own snapshot of the database for the Dog and Breed table we can then access data like this: + +```javascript +/resource.js: +const { Dog, Breed } = tables; / get the Breed table too +export class DogWithBreed extends Dog { + static loadAsInstance = false; + async get(target) { + / get the Dog record + const record = await super.get(target); + / get the Breed record + let breedDescription = await Breed.get(record.breed); + return { + ...record, + breedDescription, + }; + } +} +``` + +The call to `Breed.get` will return an instance of the `Breed` resource class, which holds the record specified the provided id/primary key. Like the `Dog` instance, we can access or change properties on the Breed instance. + +Here we have focused on customizing how we retrieve data, but we may also want to define custom actions for writing data. While HTTP PUT method has a specific semantic definition (replace current record), a common method for custom actions is through the HTTP POST method. the POST method has much more open-ended semantics and is a good choice for custom actions. POST requests are handled by our Resource's post() method. Let's say that we want to define a POST handler that adds a new trick to the `tricks` array to a specific instance. We might do it like this, and specify an action to be able to differentiate actions: + +```javascript +export class CustomDog extends Dog { + static loadAsInstance = false; + async post(target, data) { + if (data.action === 'add-trick') { + const record = this.update(target); + record.tricks.push(data.trick); + } + } +} +``` + +And a POST request to /CustomDog/ would call this `post` method. The Resource class then automatically tracks changes you make to your resource instances and saves those changes when this transaction is committed (again these methods are automatically wrapped in a transaction and committed once the request handler is finished). So when you push data on to the `tricks` array, this will be recorded and persisted when this method finishes and before sending a response to the client. + +The `post` method automatically marks the current instance as being update. However, you can also explicitly specify that you are changing a resource by calling the `update()` method. If you want to modify a resource instance that you retrieved through a `get()` call (like `Breed.get()` call above), you can call its `update()` method to ensure changes are saved (and will be committed in the current transaction). + +We can also define custom authorization capabilities. For example, we might want to specify that only the owner of a dog can make updates to a dog. We could add logic to our `post()` method or `put()` method to do this. For example, we might do this: + +```javascript +export class CustomDog extends Dog { + static loadAsInstance = false; + async post(target, data) { + if (data.action === 'add-trick') { + const context = this.getContext(); + / if we want to skip the default permission checks, we can turn off checkPermissions: + target.checkPermissions = false; + const record = this.update(target); + / and do our own/custom permission check: + if (record.owner !== context.user?.username) { + throw new Error('Can not update this record'); + } + record.tricks.push(data.trick); + } + } +} +``` + +Any methods that are not defined will fall back to Harper's default authorization procedure based on users' roles. If you are using/extending a table, this is based on Harper's [role based access](../security/users-and-roles). If you are extending the base `Resource` class, the default access requires super user permission. + +You can also use the `default` export to define the root path resource handler. For example: + +```javascript +/ resources.json +export default class CustomDog extends Dog { + ... +``` + +This will allow requests to url like / to be directly resolved to this resource. + +## Define Custom Data Sources + +We can also directly implement the Resource class and use it to create new data sources from scratch that can be used as endpoints. Custom resources can also be used as caching sources. Let's say that we defined a `Breed` table that was a cache of information about breeds from another source. We could implement a caching table like: + +```javascript +const { Breed } = tables; / our Breed table +class BreedSource extends Resource { + / define a data source + async get(target) { + return (await fetch(`http:/best-dog-site.com/${target}`)).json(); + } +} +/ define that our breed table is a cache of data from the data source above, with a specified expiration +Breed.sourcedFrom(BreedSource, { expiration: 3600 }); +``` + +The [caching documentation](caching) provides much more information on how to use Harper's powerful caching capabilities and set up data sources. + +Harper provides a powerful JavaScript API with significant capabilities that go well beyond a "getting started" guide. See our documentation for more information on using the [`globals`](../../technical-details/reference/globals) and the [Resource interface](../../technical-details/reference/resources/). + +## Configuring Applications/Components + +For complete information of configuring applications, refer to the [Component Configuration](../../technical-details/reference/components/configuration) reference page. + +## Define Fastify Routes + +Exporting resource will generate full RESTful endpoints. But, you may prefer to define endpoints through a framework. Harper includes a resource plugin for defining routes with the Fastify web framework. Fastify is a full-featured framework with many plugins, that provides sophisticated route definition capabilities. + +By default, applications are configured to load any modules in the `routes` directory (matching `routes/*.js`) with Fastify's autoloader, which will allow these modules to export a function to define fastify routes. See the [defining routes documentation](define-routes) for more information on how to create Fastify routes. + +However, Fastify is not as fast as Harper's RESTful endpoints (about 10%-20% slower/more-overhead), nor does it automate the generation of a full uniform interface with correct RESTful header interactions (for caching control), so generally the Harper's REST interface is recommended for optimum performance and ease of use. + +## Restarting Your Instance + +Generally, Harper will auto-detect when files change and auto-restart the appropriate threads. However, if there are changes that aren't detected, you may manually restart, with the `restart_service` operation: + +```json +{ + "operation": "restart_service", + "service": "http_workers" +} +``` diff --git a/docs/developers/applications/web-applications.md b/docs/developers/applications/web-applications.md index f6d7798b..c49596b3 100644 --- a/docs/developers/applications/web-applications.md +++ b/docs/developers/applications/web-applications.md @@ -1,3 +1,7 @@ +--- +title: Web Applications on Harper +--- + # Web Applications on Harper Harper is an efficient, capable, and robust platform for developing web applications, with numerous capabilities designed @@ -18,11 +22,11 @@ using popular frameworks is a simple and straightforward process. Get started today with one of our examples: -- [Next.js](https://github.com/HarperDB/nextjs-example) -- [React SSR](https://github.com/HarperDB/react-ssr-example) -- [Vue SSR](https://github.com/HarperDB/vue-ssr-example) -- [Svelte SSR](https://github.com/HarperDB/svelte-ssr-example) -- [Solid SSR](https://github.com/HarperDB/solid-ssr-example) +- [Next.js](https:/github.com/HarperDB/nextjs-example) +- [React SSR](https:/github.com/HarperDB/react-ssr-example) +- [Vue SSR](https:/github.com/HarperDB/vue-ssr-example) +- [Svelte SSR](https:/github.com/HarperDB/svelte-ssr-example) +- [Solid SSR](https:/github.com/HarperDB/solid-ssr-example) ## Cookie Support @@ -45,15 +49,15 @@ This allows web applications to directly interact with Harper and database resou ## Browser Caching Negotiation -Browsers support caching negotiation with revalidation, which allows requests for locally cached data to be sent to servers with a tag or timestamp. Harper REST functionality can fully interact with these headers, and return `304 Not Modified` response based on prior `Etag` sent in headers. It is highly recommended that you utilize the [REST interface](../rest.md) for accessing tables, as it facilitates this downstream browser caching. Timestamps are recorded with all records and are then returned [as the `ETag` in the response](../rest.md#cachingconditional-requests). Utilizing this browser caching can greatly reduce the load on your server and improve the performance of your web application by being able to instantly use locally cached data after revalidation from the server. +Browsers support caching negotiation with revalidation, which allows requests for locally cached data to be sent to servers with a tag or timestamp. Harper REST functionality can fully interact with these headers, and return `304 Not Modified` response based on prior `Etag` sent in headers. It is highly recommended that you utilize the [REST interface](../rest) for accessing tables, as it facilitates this downstream browser caching. Timestamps are recorded with all records and are then returned [as the `ETag` in the response](../rest#cachingconditional-requests). Utilizing this browser caching can greatly reduce the load on your server and improve the performance of your web application by being able to instantly use locally cached data after revalidation from the server. ## Built-in Cross-Origin Resource Sharing (CORS) -Harper includes built-in support for Cross-Origin Resource Sharing (CORS), which allows you to define which domains are allowed to access your Harper instance. This is a critical security feature for web applications, as it prevents unauthorized access to your data from other domains, while allowing cross-domain access from known hosts. You can define the allowed domains in your [Harper configuration file](../../deployments/configuration.md#http), and Harper will automatically handle the CORS headers for you. +Harper includes built-in support for Cross-Origin Resource Sharing (CORS), which allows you to define which domains are allowed to access your Harper instance. This is a critical security feature for web applications, as it prevents unauthorized access to your data from other domains, while allowing cross-domain access from known hosts. You can define the allowed domains in your [Harper configuration file](../../deployments/configuration#http), and Harper will automatically handle the CORS headers for you. ## More Resources Make sure to check out our developer videos too: -- [Next.js on Harper | Step-by-Step Guide for Next Level Next.js Performance](https://youtu.be/GqLEwteFJYY) -- [Server-side Rendering (SSR) with Multi-Tier Cache Demo](https://youtu.be/L-tnBNhO9Fc) +- [Next.js on Harper | Step-by-Step Guide for Next Level Next.js Performance](https:/youtu.be/GqLEwteFJYY) +- [Server-side Rendering (SSR) with Multi-Tier Cache Demo](https:/youtu.be/L-tnBNhO9Fc) diff --git a/docs/developers/clustering/README.md b/docs/developers/clustering/README.md deleted file mode 100644 index a92ded99..00000000 --- a/docs/developers/clustering/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# NATS Clustering - -Harper 4.0 - 4.3 used a clustering system based on NATS for replication. In 4.4+, Harper has moved to a new native replication system that has better performance, reliability, and data consistency. This document describes the legacy NATS clustering system. Harper clustering is the process of connecting multiple Harper databases together to create a database mesh network that enables users to define data replication patterns. - -Harper’s clustering engine replicates data between instances of Harper using a highly performant, bi-directional pub/sub model on a per-table basis. Data replicates asynchronously with eventual consistency across the cluster following the defined pub/sub configuration. Individual transactions are sent in the order in which they were transacted, once received by the destination instance, they are processed in an ACID-compliant manner. Conflict resolution follows a last writer wins model based on recorded transaction time on the transaction and the timestamp on the record on the node. - ---- - -### Common Use Case - -A common use case is an edge application collecting and analyzing sensor data that creates an alert if a sensor value exceeds a given threshold: - -- The edge application should not be making outbound http requests for security purposes. -- There may not be a reliable network connection. -- Not all sensor data will be sent to the cloud--either because of the unreliable network connection, or maybe it’s just a pain to store it. -- The edge node should be inaccessible from outside the firewall. -- The edge node will send alerts to the cloud with a snippet of sensor data containing the offending sensor readings. - -Harper simplifies the architecture of such an application with its bi-directional, table-level replication: - -- The edge instance subscribes to a “thresholds” table on the cloud instance, so the application only makes localhost calls to get the thresholds. -- The application continually pushes sensor data into a “sensor_data” table via the localhost API, comparing it to the threshold values as it does so. -- When a threshold violation occurs, the application adds a record to the “alerts” table. -- The application appends to that record array “sensor_data” entries for the 60 seconds (or minutes, or days) leading up to the threshold violation. -- The edge instance publishes the “alerts” table up to the cloud instance. - -By letting Harper focus on the fault-tolerant logistics of transporting your data, you get to write less code. By moving data only when and where it’s needed, you lower storage and bandwidth costs. And by restricting your app to only making local calls to Harper, you reduce the overall exposure of your application to outside forces. diff --git a/docs/developers/clustering/certificate-management.md b/docs/developers/clustering/certificate-management.md index 00b7bb67..a11a1a35 100644 --- a/docs/developers/clustering/certificate-management.md +++ b/docs/developers/clustering/certificate-management.md @@ -1,8 +1,12 @@ +--- +title: Certificate Management +--- + # Certificate Management ## Development -Out of the box Harper generates certificates that are used when Harper nodes are clustered together to securely share data between nodes. These certificates are meant for testing and development purposes. Because these certificates do not have Common Names (CNs) that will match the Fully Qualified Domain Name (FQDN) of the Harper node, the following settings (see the full [configuration file](../../deployments/configuration.md) docs for more details) are defaulted & recommended for ease of development: +Out of the box Harper generates certificates that are used when Harper nodes are clustered together to securely share data between nodes. These certificates are meant for testing and development purposes. Because these certificates do not have Common Names (CNs) that will match the Fully Qualified Domain Name (FQDN) of the Harper node, the following settings (see the full [configuration file](../../deployments/configuration) docs for more details) are defaulted & recommended for ease of development: ``` clustering: @@ -64,13 +68,13 @@ If you are having TLS issues with clustering, use the following steps to verify openssl x509 -in .pem -noout -text` ``` -2. Make sure the certificate validates with the CA: +1. Make sure the certificate validates with the CA: ``` openssl verify -CAfile .pem .pem` ``` -3. Make sure the certificate and private key are a valid pair by verifying that the output of the following commands match: +1. Make sure the certificate and private key are a valid pair by verifying that the output of the following commands match: ``` openssl rsa -modulus -noout -in .pem | openssl md5 diff --git a/docs/developers/clustering/creating-a-cluster-user.md b/docs/developers/clustering/creating-a-cluster-user.md index 864989a5..0a8b2a6c 100644 --- a/docs/developers/clustering/creating-a-cluster-user.md +++ b/docs/developers/clustering/creating-a-cluster-user.md @@ -1,3 +1,7 @@ +--- +title: Creating a Cluster User +--- + # Creating a Cluster User Inter-node authentication takes place via Harper users. There is a special role type called `cluster_user` that exists by default and limits the user to only clustering functionality. @@ -40,7 +44,7 @@ clustering: _Note: When making any changes to the `harperdb-config.yaml` file, Harper must be restarted for the changes to take effect._ -2. Upon installation using **command line variables**. This will automatically set the user in the `harperdb-config.yaml` file. +1. Upon installation using **command line variables**. This will automatically set the user in the `harperdb-config.yaml` file. _Note: Using command line or environment variables for setting the cluster user only works on install._ @@ -48,7 +52,7 @@ _Note: Using command line or environment variables for setting the cluster user harperdb install --CLUSTERING_USER cluster_account --CLUSTERING_PASSWORD letsCluster123! ``` -3. Upon installation using **environment variables**. This will automatically set the user in the `harperdb-config.yaml` file. +1. Upon installation using **environment variables**. This will automatically set the user in the `harperdb-config.yaml` file. ``` CLUSTERING_USER=cluster_account CLUSTERING_PASSWORD=letsCluster123 diff --git a/docs/developers/clustering/enabling-clustering.md b/docs/developers/clustering/enabling-clustering.md index 762a9902..606bc29c 100644 --- a/docs/developers/clustering/enabling-clustering.md +++ b/docs/developers/clustering/enabling-clustering.md @@ -1,3 +1,7 @@ +--- +title: Enabling Clustering +--- + # Enabling Clustering Clustering does not run by default; it needs to be enabled. @@ -26,13 +30,13 @@ _Note: When making any changes to the `harperdb-config.yaml` file Harper must be _Note: When making any changes to Harper configuration Harper must be restarted for the changes to take effect._ -2. Using **command line variables**. +1. Using **command line variables**. ``` harperdb --CLUSTERING_ENABLED true ``` -3. Using **environment variables**. +1. Using **environment variables**. ``` CLUSTERING_ENABLED=true diff --git a/docs/developers/clustering/establishing-routes.md b/docs/developers/clustering/establishing-routes.md index 8fe628a7..a3c27556 100644 --- a/docs/developers/clustering/establishing-routes.md +++ b/docs/developers/clustering/establishing-routes.md @@ -1,3 +1,7 @@ +--- +title: Establishing Routes +--- + # Establishing Routes A route is a connection between two nodes. It is how the clustering network is established. @@ -28,14 +32,14 @@ clustering: port: 9932 ``` -![figure 1](../../../images/clustering/figure1.png) +![figure 1](/clustering/figure1.png) This diagram shows one way of using routes to connect a network of nodes. Node2 and Node3 do not reference any routes in their config. Node1 contains routes for Node2 and Node3, which is enough to establish a network between all three nodes. There are multiple ways to set routes, they are: 1. Directly editing the `harperdb-config.yaml` file (refer to code snippet above). -2. Calling `cluster_set_routes` through the API. +1. Calling `cluster_set_routes` through the API. ```json { @@ -47,13 +51,13 @@ There are multiple ways to set routes, they are: _Note: When making any changes to Harper configuration Harper must be restarted for the changes to take effect._ -3. From the command line. +1. From the command line. ```bash --CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES "[{\"host\": \"3.735.184.8\", \"port\": 9932}]" ``` -4. Using environment variables. +1. Using environment variables. ```bash CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES=[{"host": "3.735.184.8", "port": 9932}] diff --git a/docs/developers/clustering/index.md b/docs/developers/clustering/index.md new file mode 100644 index 00000000..95c3433c --- /dev/null +++ b/docs/developers/clustering/index.md @@ -0,0 +1,31 @@ +--- +title: NATS Clustering +--- + +# NATS Clustering + +Harper 4.0 - 4.3 used a clustering system based on NATS for replication. In 4.4+, Harper has moved to a new native replication system that has better performance, reliability, and data consistency. This document describes the legacy NATS clustering system. Harper clustering is the process of connecting multiple Harper databases together to create a database mesh network that enables users to define data replication patterns. + +Harper’s clustering engine replicates data between instances of Harper using a highly performant, bi-directional pub/sub model on a per-table basis. Data replicates asynchronously with eventual consistency across the cluster following the defined pub/sub configuration. Individual transactions are sent in the order in which they were transacted, once received by the destination instance, they are processed in an ACID-compliant manner. Conflict resolution follows a last writer wins model based on recorded transaction time on the transaction and the timestamp on the record on the node. + +--- + +### Common Use Case + +A common use case is an edge application collecting and analyzing sensor data that creates an alert if a sensor value exceeds a given threshold: + +- The edge application should not be making outbound http requests for security purposes. +- There may not be a reliable network connection. +- Not all sensor data will be sent to the cloud--either because of the unreliable network connection, or maybe it’s just a pain to store it. +- The edge node should be inaccessible from outside the firewall. +- The edge node will send alerts to the cloud with a snippet of sensor data containing the offending sensor readings. + +Harper simplifies the architecture of such an application with its bi-directional, table-level replication: + +- The edge instance subscribes to a “thresholds” table on the cloud instance, so the application only makes localhost calls to get the thresholds. +- The application continually pushes sensor data into a “sensor_data” table via the localhost API, comparing it to the threshold values as it does so. +- When a threshold violation occurs, the application adds a record to the “alerts” table. +- The application appends to that record array “sensor_data” entries for the 60 seconds (or minutes, or days) leading up to the threshold violation. +- The edge instance publishes the “alerts” table up to the cloud instance. + +By letting Harper focus on the fault-tolerant logistics of transporting your data, you get to write less code. By moving data only when and where it’s needed, you lower storage and bandwidth costs. And by restricting your app to only making local calls to Harper, you reduce the overall exposure of your application to outside forces. diff --git a/docs/developers/clustering/managing-subscriptions.md b/docs/developers/clustering/managing-subscriptions.md index 5d94fb75..f043c9d1 100644 --- a/docs/developers/clustering/managing-subscriptions.md +++ b/docs/developers/clustering/managing-subscriptions.md @@ -1,3 +1,7 @@ +--- +title: Managing subscriptions +--- + Tables are replicated when the table is designated as replicating and there is subscription between the nodes. Tables designated as replicating by default, but can be changed by setting `replicate` to `false` in the table definition: diff --git a/docs/developers/clustering/naming-a-node.md b/docs/developers/clustering/naming-a-node.md index 32054115..7a512efb 100644 --- a/docs/developers/clustering/naming-a-node.md +++ b/docs/developers/clustering/naming-a-node.md @@ -1,3 +1,7 @@ +--- +title: Naming a Node +--- + # Naming a Node Node name is the name given to a node. It is how nodes are identified within the cluster and must be unique to the cluster. @@ -19,7 +23,7 @@ clustering: _Note: When making any changes to the `harperdb-config.yaml` file Harper must be restarted for the changes to take effect._ -2. Calling `set_configuration` through the operations API +1. Calling `set_configuration` through the operations API ```json { @@ -28,13 +32,13 @@ _Note: When making any changes to the `harperdb-config.yaml` file Harper must be } ``` -3. Using command line variables. +1. Using command line variables. ``` harperdb --CLUSTERING_NODENAME Node1 ``` -4. Using environment variables. +1. Using environment variables. ``` CLUSTERING_NODENAME=Node1 diff --git a/docs/developers/clustering/requirements-and-definitions.md b/docs/developers/clustering/requirements-and-definitions.md index 53559a78..22bc3977 100644 --- a/docs/developers/clustering/requirements-and-definitions.md +++ b/docs/developers/clustering/requirements-and-definitions.md @@ -1,3 +1,7 @@ +--- +title: Requirements and Definitions +--- + # Requirements and Definitions To create a cluster you must have two or more nodes\* (aka instances) of Harper running. diff --git a/docs/developers/clustering/subscription-overview.md b/docs/developers/clustering/subscription-overview.md index 484aa7d1..b812f8bf 100644 --- a/docs/developers/clustering/subscription-overview.md +++ b/docs/developers/clustering/subscription-overview.md @@ -1,3 +1,7 @@ +--- +title: Subscription Overview +--- + # Subscription Overview A subscription defines how data should move between two nodes. They are exclusively table level and operate independently. They connect a table on one node to a table on another node, the subscription will apply to a matching database name and table name on both nodes. @@ -16,7 +20,7 @@ A subscription consists of: #### Publish subscription -![figure 2](../../../images/clustering/figure2.png) +![figure 2](/clustering/figure2.png) This diagram is an example of a `publish` subscription from the perspective of Node1. @@ -24,7 +28,7 @@ The record with id 2 has been inserted in the dog table on Node1, after it has c #### Subscribe subscription -![figure 3](../../../images/clustering/figure3.png) +![figure 3](/clustering/figure3.png) This diagram is an example of a `subscribe` subscription from the perspective of Node1. @@ -32,10 +36,10 @@ The record with id 3 has been inserted in the dog table on Node2, after it has c #### Subscribe and Publish -![figure 4](../../../images/clustering/figure4.png) +![figure 4](/clustering/figure4.png) This diagram shows both subscribe and publish but publish is set to false. You can see that because subscribe is true the insert on Node2 is being replicated on Node1 but because publish is set to false the insert on Node1 is _**not**_ being replicated on Node2. -![figure 5](../../../images/clustering/figure5.png) +![figure 5](/clustering/figure5.png) This shows both subscribe and publish set to true. The insert on Node1 is replicated on Node2 and the update on Node2 is replicated on Node1. diff --git a/docs/developers/clustering/things-worth-knowing.md b/docs/developers/clustering/things-worth-knowing.md index 1b63c8ac..bdff086f 100644 --- a/docs/developers/clustering/things-worth-knowing.md +++ b/docs/developers/clustering/things-worth-knowing.md @@ -1,3 +1,7 @@ +--- +title: Things Worth Knowing +--- + # Things Worth Knowing Additional information that will help you define your clustering topology. @@ -36,4 +40,4 @@ Harper has built-in resiliency for when network connectivity is lost within a su Harper clustering creates a mesh network between nodes giving end users the ability to create an infinite number of topologies. subscription topologies can be simple or as complex as needed. -![](../../../images/clustering/figure6.png) +![](/clustering/figure6.png) diff --git a/docs/developers/miscellaneous/README.md b/docs/developers/miscellaneous/README.md deleted file mode 100644 index 9772780f..00000000 --- a/docs/developers/miscellaneous/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Miscellaneous - -This section covers a grouping of reference documents for various external developer tools, packages, SDKs, etc. diff --git a/docs/developers/miscellaneous/google-data-studio.md b/docs/developers/miscellaneous/google-data-studio.md index f0c683f5..b29af70e 100644 --- a/docs/developers/miscellaneous/google-data-studio.md +++ b/docs/developers/miscellaneous/google-data-studio.md @@ -1,8 +1,12 @@ +--- +title: Google Data Studio +--- + # Google Data Studio -[Google Data Studio](https://datastudio.google.com/) is a free collaborative visualization tool which enables users to build configurable charts and tables quickly. The Harper Google Data Studio connector seamlessly integrates your Harper data with Google Data Studio so you can build custom, real-time data visualizations. +[Google Data Studio](https:/datastudio.google.com/) is a free collaborative visualization tool which enables users to build configurable charts and tables quickly. The Harper Google Data Studio connector seamlessly integrates your Harper data with Google Data Studio so you can build custom, real-time data visualizations. -The Harper Google Data Studio Connector is subject to our [Terms of Use](https://harperdb.io/legal/harperdb-cloud-terms-of-service/) and [Privacy Policy](https://harperdb.io/legal/privacy-policy/). +The Harper Google Data Studio Connector is subject to our [Terms of Use](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/) and [Privacy Policy](https:/harperdb.io/legal/privacy-policy/). ## Requirements @@ -10,23 +14,23 @@ The Harper database must be accessible through the Internet in order for Google ## Get Started -Get started by selecting the Harper connector from the [Google Data Studio Partner Connector Gallery](https://datastudio.google.com/u/0/datasources/create). +Get started by selecting the Harper connector from the [Google Data Studio Partner Connector Gallery](https:/datastudio.google.com/u/0/datasources/create). -1. Log in to https://datastudio.google.com/. -2. Add a new Data Source using the Harper connector. The current release version can be added as a data source by following this link: [Harper Google Data Studio Connector](https://datastudio.google.com/datasources/create?connectorId=AKfycbxBKgF8FI5R42WVxO-QCOq7dmUys0HJrUJMkBQRoGnCasY60_VJeO3BhHJPvdd20-S76g). -3. Authorize the connector to access other servers on your behalf (this allows the connector to contact your database). -4. Enter the Web URL to access your database (preferably with HTTPS), as well as the Basic Auth key you use to access the database. Just include the key, not the word “Basic” at the start of it. -5. Check the box for “Secure Connections Only” if you want to always use HTTPS connections for this data source; entering a Web URL that starts with https:// will do the same thing, if you prefer. -6. Check the box for “Allow Bad Certs” if your Harper instance does not have a valid SSL certificate. [Harper Cloud](../../deployments/harper-cloud/) always has valid certificates, and so will never require this to be checked. Instances you set up yourself may require this, if you are using self-signed certs. If you are using [Harper Cloud](../../deployments/harper-cloud/) or another instance you know should always have valid SSL certificates, do not check this box. -7. Choose your Query Type. This determines what information the configuration will ask for after pressing the Next button. +1. Log in to https:/datastudio.google.com/. +1. Add a new Data Source using the Harper connector. The current release version can be added as a data source by following this link: [Harper Google Data Studio Connector](https:/datastudio.google.com/datasources/create?connectorId=AKfycbxBKgF8FI5R42WVxO-QCOq7dmUys0HJrUJMkBQRoGnCasY60_VJeO3BhHJPvdd20-S76g). +1. Authorize the connector to access other servers on your behalf (this allows the connector to contact your database). +1. Enter the Web URL to access your database (preferably with HTTPS), as well as the Basic Auth key you use to access the database. Just include the key, not the word “Basic” at the start of it. +1. Check the box for “Secure Connections Only” if you want to always use HTTPS connections for this data source; entering a Web URL that starts with https:/ will do the same thing, if you prefer. +1. Check the box for “Allow Bad Certs” if your Harper instance does not have a valid SSL certificate. [Harper Cloud](../../deployments/harper-cloud/) always has valid certificates, and so will never require this to be checked. Instances you set up yourself may require this, if you are using self-signed certs. If you are using [Harper Cloud](../../deployments/harper-cloud/) or another instance you know should always have valid SSL certificates, do not check this box. +1. Choose your Query Type. This determines what information the configuration will ask for after pressing the Next button. - Table will ask you for a Schema and a Table to return all fields of using `SELECT *`. - SQL will ask you for the SQL query you’re using to retrieve fields from the database. You may `JOIN` multiple tables together, and use Harper specific SQL functions, along with the usual power SQL grants. -8. When all information is entered correctly, press the Connect button in the top right of the new Data Source view to generate the Schema. You may also want to name the data source at this point. If the connector encounters any errors, a dialog box will tell you what went wrong so you can correct the issue. -9. If there are no errors, you now have a data source you can use in your reports! You may change the types of the generated fields in the Schema view if you need to (for instance, changing a Number field to a specific currency), as well as creating new fields from the report view that do calculations on other fields. +1. When all information is entered correctly, press the Connect button in the top right of the new Data Source view to generate the Schema. You may also want to name the data source at this point. If the connector encounters any errors, a dialog box will tell you what went wrong so you can correct the issue. +1. If there are no errors, you now have a data source you can use in your reports! You may change the types of the generated fields in the Schema view if you need to (for instance, changing a Number field to a specific currency), as well as creating new fields from the report view that do calculations on other fields. ## Considerations -- Both Postman and the [Harper Studio](../../administration/harper-studio/README.md) app have ways to convert a user:password pair to a Basic Auth token. Use either to create the token for the connector's user. +- Both Postman and the [Harper Studio](../../administration/harper-studio/) app have ways to convert a user:password pair to a Basic Auth token. Use either to create the token for the connector's user. - You may sign out of your current user by going to the instances tab in Harper Studio, then clicking on the lock icon at the top-right of a given instance’s box. Click the lock again to sign in as any user. The Basic Auth token will be visible in the Authorization header portion of any code created in the Sample Code tab. - It’s highly recommended that you create a read-only user role in Harper Studio, and create a user with that role for your data sources to use. This prevents that authorization token from being used to alter your database, should someone else ever get ahold of it. - The RecordCount field is intended for use as a metric, for counting how many instances of a given set of values appear in a report’s data set. diff --git a/docs/developers/miscellaneous/index.md b/docs/developers/miscellaneous/index.md new file mode 100644 index 00000000..f80dc499 --- /dev/null +++ b/docs/developers/miscellaneous/index.md @@ -0,0 +1,7 @@ +--- +title: Miscellaneous +--- + +# Miscellaneous + +This section covers a grouping of reference documents for various external developer tools, packages, SDKs, etc. diff --git a/docs/developers/miscellaneous/query-optimization.md b/docs/developers/miscellaneous/query-optimization.md index 16faa3cb..139b862b 100644 --- a/docs/developers/miscellaneous/query-optimization.md +++ b/docs/developers/miscellaneous/query-optimization.md @@ -1,3 +1,7 @@ +--- +title: Query Optimization +--- + ## Query Optimization Harper has powerful query functionality with excellent performance characteristics. However, like any database, different queries can vary significantly in performance. It is important to understand how querying works to help you optimize your queries for the best performance. diff --git a/docs/developers/miscellaneous/sdks.md b/docs/developers/miscellaneous/sdks.md index 0202cb57..d64e19ce 100644 --- a/docs/developers/miscellaneous/sdks.md +++ b/docs/developers/miscellaneous/sdks.md @@ -1,4 +1,5 @@ --- +title: SDKs description: >- Software Development Kits available for connecting to Harper from different languages. @@ -8,14 +9,14 @@ description: >- | SDK/Tool | Description | Installation | | ------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- | -| [HarperDB.NET.Client](https://www.nuget.org/packages/HarperDB.NET.Client) | A Dot Net Core client to execute operations against HarperDB | `dotnet add package HarperDB.NET.Client --version 1.1.0` | -| [Websocket Client](https://www.npmjs.com/package/harperdb-websocket-client) | A Javascript client for real-time access to HarperDB transactions | `npm i -s harperdb-websocket-client` | -| [Gatsby HarperDB Source](https://www.npmjs.com/package/gatsby-source-harperdb) | Use Harper as the data source for a Gatsby project at the build time | `npm i -s gatsby-source-harperdb` | -| [HarperDB.EntityFrameworkCore](https://www.nuget.org/packages/HarperDB.EntityFrameworkCore) | The Harper EntityFrameworkCore Provider Package for .NET 6.0 | `dotnet add package HarperDB.EntityFrameworkCore --version 1.0.0` | -| [Python SDK](https://pypi.org/project/harperdb/) | Python3 implementations of Harper API functions with wrappers for an object-oriented interface | `pip3 install harperdb` | -| [HarperDB Flutter SDK](https://github.com/HarperDB/harperdb-sdk-flutter) | A Harper SDK for Flutter | `flutter pub add harperdb` | -| [React Hook](https://www.npmjs.com/package/use-harperdb) | A ReactJS Hook for HarperDB | `npm i -s use-harperdb` | -| [Node Red Node](https://flows.nodered.org/node/node-red-contrib-harperdb) | Easy drag and drop connections to Harper using the Node-Red platform | `npm i -s node-red-contrib-harperdb` | -| [NodeJS SDK](https://www.npmjs.com/package/harperive) | A Harper SDK for NodeJS | `npm i -s harperive` | -| [HarperDB Cargo Crate](https://crates.io/crates/harperdb) | A Harper SDK for Rust | `Cargo.toml > harperdb = '1.0.0'` | -| [HarperDB Go SDK](https://github.com/HarperDB/sdk-go) | A Harper SDK for Go | `go get github.com/HarperDB/sdk-go` | +| [HarperDB.NET.Client](https:/www.nuget.org/packages/HarperDB.NET.Client) | A Dot Net Core client to execute operations against HarperDB | `dotnet add package HarperDB.NET.Client --version 1.1.0` | +| [Websocket Client](https:/www.npmjs.com/package/harperdb-websocket-client) | A Javascript client for real-time access to HarperDB transactions | `npm i -s harperdb-websocket-client` | +| [Gatsby HarperDB Source](https:/www.npmjs.com/package/gatsby-source-harperdb) | Use Harper as the data source for a Gatsby project at the build time | `npm i -s gatsby-source-harperdb` | +| [HarperDB.EntityFrameworkCore](https:/www.nuget.org/packages/HarperDB.EntityFrameworkCore) | The Harper EntityFrameworkCore Provider Package for .NET 6.0 | `dotnet add package HarperDB.EntityFrameworkCore --version 1.0.0` | +| [Python SDK](https:/pypi.org/project/harperdb/) | Python3 implementations of Harper API functions with wrappers for an object-oriented interface | `pip3 install harperdb` | +| [HarperDB Flutter SDK](https:/github.com/HarperDB/harperdb-sdk-flutter) | A Harper SDK for Flutter | `flutter pub add harperdb` | +| [React Hook](https:/www.npmjs.com/package/use-harperdb) | A ReactJS Hook for HarperDB | `npm i -s use-harperdb` | +| [Node Red Node](https:/flows.nodered.org/node/node-red-contrib-harperdb) | Easy drag and drop connections to Harper using the Node-Red platform | `npm i -s node-red-contrib-harperdb` | +| [NodeJS SDK](https:/www.npmjs.com/package/harperive) | A Harper SDK for NodeJS | `npm i -s harperive` | +| [HarperDB Cargo Crate](https:/crates.io/crates/harperdb) | A Harper SDK for Rust | `Cargo.toml > harperdb = '1.0.0'` | +| [HarperDB Go SDK](https:/github.com/HarperDB/sdk-go) | A Harper SDK for Go | `go get github.com/HarperDB/sdk-go` | diff --git a/docs/developers/operations-api/README.md b/docs/developers/operations-api/README.md deleted file mode 100644 index eff9b595..00000000 --- a/docs/developers/operations-api/README.md +++ /dev/null @@ -1,51 +0,0 @@ -# Operations API - -The operations API provides a full set of capabilities for configuring, deploying, administering, and controlling Harper. To send operations to the operations API, you send a POST request to the operations API endpoint, which [defaults to port 9925](../../deployments/configuration.md#operationsapi), on the root path, where the body is the operations object. These requests need to authenticated, which can be done with [basic auth](../security/basic-auth.md) or [JWT authentication](../security/jwt-auth.md). For example, a request to create a table would be performed as: - -```http -POST http://my-harperdb-server:9925/ -Authorization: Basic YourBase64EncodedInstanceUser:Pass -Content-Type: application/json - -{ - "operation": "create_table", - "table": "my-table" -} -``` - -The operations API reference is available below and categorized by topic: - -* [Quick Start Examples](quickstart-examples.md) -* [Databases and Tables](databases-and-tables.md) -* [NoSQL Operations](nosql-operations.md) -* [Bulk Operations](bulk-operations.md) -* [Users and Roles](users-and-roles.md) -* [Clustering](clustering.md) -* [Clustering with NATS](clustering-nats.md) -* [Components](components.md) -* [Registration](registration.md) -* [Jobs](jobs.md) -* [Logs](logs.md) -* [System Operations](system-operations.md) -* [Configuration](configuration.md) -* [Certificate Management](certificate-management.md) -* [Token Authentication](token-authentication.md) -* [SQL Operations](sql-operations.md) -* [Advanced JSON SQL Examples](advanced-json-sql-examples.md) -* [Analytics](analytics.md) - -• [Past Release API Documentation](https://olddocs.harperdb.io) - -## More Examples - -Here is an example of using `curl` to make an operations API request: - -```bash -curl --location --request POST 'https://instance-subdomain.harperdbcloud.com' \ ---header 'Authorization: Basic YourBase64EncodedInstanceUser:Pass' \ ---header 'Content-Type: application/json' \ ---data-raw '{ -"operation": "create_schema", -"schema": "dev" -}' -``` diff --git a/docs/developers/operations-api/advanced-json-sql-examples.md b/docs/developers/operations-api/advanced-json-sql-examples.md index 2fee2a0c..58116884 100644 --- a/docs/developers/operations-api/advanced-json-sql-examples.md +++ b/docs/developers/operations-api/advanced-json-sql-examples.md @@ -1,3 +1,7 @@ +--- +title: Advanced JSON SQL Examples +--- + # Advanced JSON SQL Examples ## Create movies database @@ -86,7 +90,7 @@ Inserts data from a hosted CSV file into the "movie" table using the 'csv_url_lo "operation": "csv_url_load", "database": "movies", "table": "movie", - "csv_url": "https://search-json-sample-data.s3.us-east-2.amazonaws.com/movie.csv" + "csv_url": "https:/search-json-sample-data.s3.us-east-2.amazonaws.com/movie.csv" } ``` @@ -111,7 +115,7 @@ Inserts data from a hosted CSV file into the "credits" table using the 'csv_url_ "operation": "csv_url_load", "database": "movies", "table": "credits", - "csv_url": "https://search-json-sample-data.s3.us-east-2.amazonaws.com/credits.csv" + "csv_url": "https:/search-json-sample-data.s3.us-east-2.amazonaws.com/credits.csv" } ``` diff --git a/docs/developers/operations-api/analytics.md b/docs/developers/operations-api/analytics.md index 61ba6af9..d3880f0c 100644 --- a/docs/developers/operations-api/analytics.md +++ b/docs/developers/operations-api/analytics.md @@ -1,3 +1,7 @@ +--- +title: Analytics Operations +--- + # Analytics Operations ## get_analytics @@ -8,7 +12,7 @@ Retrieves analytics data from the server. * start_time _(optional)_ - Unix timestamp in seconds * end_time _(optional)_ - Unix timestamp in seconds * get_attributes _(optional)_ - array of attribute names to retrieve -* conditions _(optional)_ - array of conditions to filter results (see [search_by_conditions docs](docs/developers/operations-api/nosql-operations.md) for details) +* conditions _(optional)_ - array of conditions to filter results (see [search_by_conditions docs](developers/operations-api/nosql-operations) for details) ### Body diff --git a/docs/developers/operations-api/bulk-operations.md b/docs/developers/operations-api/bulk-operations.md index 5caad1c2..51801438 100644 --- a/docs/developers/operations-api/bulk-operations.md +++ b/docs/developers/operations-api/bulk-operations.md @@ -1,3 +1,7 @@ +--- +title: Bulk Operations +--- + # Bulk Operations ## Export Local @@ -52,7 +56,7 @@ Ingests CSV data, provided directly in the operation as an `insert`, `update` or "database": "dev", "action": "insert", "table": "breed", - "data": "id,name,section,country,image\n1,ENGLISH POINTER,British and Irish Pointers and Setters,GREAT BRITAIN,http://www.fci.be/Nomenclature/Illustrations/001g07.jpg\n2,ENGLISH SETTER,British and Irish Pointers and Setters,GREAT BRITAIN,http://www.fci.be/Nomenclature/Illustrations/002g07.jpg\n3,KERRY BLUE TERRIER,Large and medium sized Terriers,IRELAND,\n" + "data": "id,name,section,country,image\n1,ENGLISH POINTER,British and Irish Pointers and Setters,GREAT BRITAIN,http:/www.fci.be/Nomenclature/Illustrations/001g07.jpg\n2,ENGLISH SETTER,British and Irish Pointers and Setters,GREAT BRITAIN,http:/www.fci.be/Nomenclature/Illustrations/002g07.jpg\n3,KERRY BLUE TERRIER,Large and medium sized Terriers,IRELAND,\n" } ``` @@ -120,7 +124,7 @@ Ingests CSV data, provided via URL, as an `insert`, `update` or `upsert` into th "action": "insert", "database": "dev", "table": "breed", - "csv_url": "https://s3.amazonaws.com/complimentarydata/breeds.csv" + "csv_url": "https:/s3.amazonaws.com/complimentarydata/breeds.csv" } ``` diff --git a/docs/developers/operations-api/certificate-management.md b/docs/developers/operations-api/certificate-management.md index 219d0b26..b569dffc 100644 --- a/docs/developers/operations-api/certificate-management.md +++ b/docs/developers/operations-api/certificate-management.md @@ -1,3 +1,7 @@ +--- +title: Certificate Management +--- + # Certificate Management ## Add Certificate diff --git a/docs/developers/operations-api/clustering-nats.md b/docs/developers/operations-api/clustering-nats.md index 8c7fa2e4..a45c593e 100644 --- a/docs/developers/operations-api/clustering-nats.md +++ b/docs/developers/operations-api/clustering-nats.md @@ -1,3 +1,7 @@ +--- +title: Clustering using NATS +--- + # Clustering using NATS ## Cluster Set Routes @@ -154,7 +158,7 @@ _Operation is restricted to super_user roles only_ ## Add Node -Registers an additional Harper instance with associated subscriptions. Learn more about [Harper clustering here](../clustering/README.md). +Registers an additional Harper instance with associated subscriptions. Learn more about [Harper clustering here](../clustering/). _Operation is restricted to super_user roles only_ @@ -197,7 +201,7 @@ _Operation is restricted to super_user roles only_ ## Update Node -Modifies an existing Harper instance registration and associated subscriptions. This operation behaves as a PATCH/upsert, meaning it will insert or update the specified replication configurations while leaving other table replication configuration untouched. Learn more about [Harper clustering here](../clustering/README.md). +Modifies an existing Harper instance registration and associated subscriptions. This operation behaves as a PATCH/upsert, meaning it will insert or update the specified replication configurations while leaving other table replication configuration untouched. Learn more about [Harper clustering here](../clustering/). _Operation is restricted to super_user roles only_ @@ -281,7 +285,7 @@ _Operation is restricted to super_user roles only_ ## Cluster Status -Returns an array of status objects from a cluster. A status object will contain the clustering node name, whether or not clustering is enabled, and a list of possible connections. Learn more about [Harper clustering here](../clustering/README.md). +Returns an array of status objects from a cluster. A status object will contain the clustering node name, whether or not clustering is enabled, and a list of possible connections. Learn more about [Harper clustering here](../clustering/). _Operation is restricted to super_user roles only_ @@ -328,7 +332,7 @@ _Operation is restricted to super_user roles only_ ## Cluster Network -Returns an object array of enmeshed nodes. Each node object will contain the name of the node, the amount of time (in milliseconds) it took for it to respond, the names of the nodes it is enmeshed with and the routes set in its config file. Learn more about [Harper clustering here](../clustering/README.md). +Returns an object array of enmeshed nodes. Each node object will contain the name of the node, the amount of time (in milliseconds) it took for it to respond, the names of the nodes it is enmeshed with and the routes set in its config file. Learn more about [Harper clustering here](../clustering/). _Operation is restricted to super_user roles only_ @@ -375,7 +379,7 @@ _Operation is restricted to super_user roles only_ ## Remove Node -Removes a Harper instance and associated subscriptions from the cluster. Learn more about [Harper clustering here](../clustering/README.md). +Removes a Harper instance and associated subscriptions from the cluster. Learn more about [Harper clustering here](../clustering/). _Operation is restricted to super_user roles only_ @@ -404,7 +408,7 @@ _Operation is restricted to super_user roles only_ ## Configure Cluster Bulk create/remove subscriptions for any number of remote nodes. Resets and replaces any existing clustering setup. -Learn more about [Harper clustering here](../clustering/README.md). +Learn more about [Harper clustering here](../clustering/). _Operation is restricted to super_user roles only_ diff --git a/docs/developers/operations-api/clustering.md b/docs/developers/operations-api/clustering.md index 5b78e2d1..26cd785f 100644 --- a/docs/developers/operations-api/clustering.md +++ b/docs/developers/operations-api/clustering.md @@ -1,8 +1,12 @@ +--- +title: Clustering +--- + # Clustering The following operations are available for configuring and managing [Harper replication](../replication/).\ -_**If you are using NATS for clustering, please see the**_ [_**NATS Clustering Operations**_](clustering-nats.md) _**documentation.**_ +_**If you are using NATS for clustering, please see the**_ [_**NATS Clustering Operations**_](clustering-nats) _**documentation.**_ ## Add Node @@ -147,7 +151,7 @@ _Operation is restricted to super_user roles only_ { "replicateByDefault": true, "replicates": true, - "url": "wss://server-2.domain.com:9933", + "url": "wss:/server-2.domain.com:9933", "name": "server-2.domain.com", "subscriptions": null, "database_sockets": [ @@ -256,7 +260,7 @@ _Operation is restricted to super_user roles only_ { "operation": "cluster_set_routes", "routes": [ - "wss://server-two:9925", + "wss:/server-two:9925", { "hostname": "server-three", "port": 9930 @@ -271,7 +275,7 @@ _Operation is restricted to super_user roles only_ { "message": "cluster routes successfully set", "set": [ - "wss://server-two:9925", + "wss:/server-two:9925", { "hostname": "server-three", "port": 9930 @@ -303,7 +307,7 @@ _Operation is restricted to super_user roles only_ ```json [ - "wss://server-two:9925", + "wss:/server-two:9925", { "hostname": "server-three", "port": 9930 diff --git a/docs/developers/operations-api/components.md b/docs/developers/operations-api/components.md index d1536d2f..6c3809ae 100644 --- a/docs/developers/operations-api/components.md +++ b/docs/developers/operations-api/components.md @@ -1,3 +1,7 @@ +--- +title: Components +--- + # Components ## Add Component @@ -35,10 +39,10 @@ Will deploy a component using either a base64-encoded string representation of a If deploying with the `payload` option, Harper will decrypt the base64-encoded string, reconstitute the .tar file of your project folder, and extract it to the component root project directory. -If deploying with the `package` option, the package value will be written to `harperdb-config.yaml`. Then npm install will be utilized to install the component in the `node_modules` directory located in the hdb root. The value is a package reference, which should generally be a [URL reference, as described here](https://docs.npmjs.com/cli/v10/configuring-npm/package-json#urls-as-dependencies) (it is also possible to include NPM registerd packages and file paths). URL package references can directly reference tarballs that can be installed as a package. However, the most common and recommended usage is to install from a Git repository, which can be combined with a tag to deploy a specific version directly from versioned source control. When using tags, we highly recommend that you use the `semver` directive to ensure consistent and reliable installation by NPM. In addition to tags, you can also reference branches or commit numbers. Here is an example URL package reference to a (public) Git repository that doesn't require authentication: +If deploying with the `package` option, the package value will be written to `harperdb-config.yaml`. Then npm install will be utilized to install the component in the `node_modules` directory located in the hdb root. The value is a package reference, which should generally be a [URL reference, as described here](https:/docs.npmjs.com/cli/v10/configuring-npm/package-json#urls-as-dependencies) (it is also possible to include NPM registerd packages and file paths). URL package references can directly reference tarballs that can be installed as a package. However, the most common and recommended usage is to install from a Git repository, which can be combined with a tag to deploy a specific version directly from versioned source control. When using tags, we highly recommend that you use the `semver` directive to ensure consistent and reliable installation by NPM. In addition to tags, you can also reference branches or commit numbers. Here is an example URL package reference to a (public) Git repository that doesn't require authentication: ``` -https://github.com/HarperDB/application-template#semver:v1.0.0 +https:/github.com/HarperDB/application-template#semver:v1.0.0 ``` or this can be shortened to: @@ -50,24 +54,22 @@ HarperDB/application-template#semver:v1.0.0 You can also install from private repository if you have installed SSH keys on the server. Ensure the `host` portion of the url exactly matches the `host` used when adding ssh keys to ensure proper authentication. ``` -git+ssh://git@github.com:my-org/my-app.git#semver:v1.0.0 - -git+ssh://git@harperdb-private-component.github.com:my-org/my-app.git#semver:v1.0.0 +git+ssh:/git@github.com:my-org/my-app.git#semver:v1.0.0 ``` Or you can use a Github token: ``` -https://@github.com/my-org/my-app#semver:v1.0.0 +https:/@github.com/my-org/my-app#semver:v1.0.0 ``` Or you can use a GitLab Project Access Token: ``` -https://my-project:@gitlab.com/my-group/my-project#semver:v1.0.0 +https:/my-project:@gitlab.com/my-group/my-project#semver:v1.0.0 ``` -Note that your component will be installed by NPM. If your component has dependencies, NPM will attempt to download and install these as well. NPM normally uses the public registry.npmjs.org registry. If you are installing without network access to this, you may wish to define [custom registry locations](https://docs.npmjs.com/cli/v8/configuring-npm/npmrc) if you have any dependencies that need to be installed. NPM will install the deployed component and any dependencies in node_modules in the hdb root directory (typically `~/hdb/node_modules`). +Note that your component will be installed by NPM. If your component has dependencies, NPM will attempt to download and install these as well. NPM normally uses the public registry.npmjs.org registry. If you are installing without network access to this, you may wish to define [custom registry locations](https:/docs.npmjs.com/cli/v8/configuring-npm/npmrc) if you have any dependencies that need to be installed. NPM will install the deployed component and any dependencies in node_modules in the hdb root directory (typically `~/hdb/node_modules`). _Note: After deploying a component a restart may be required_ @@ -214,7 +216,7 @@ _Operation is restricted to super_user roles only_ "size": 1070 }, { - "name": "README.md", + "name": "index.md", "mtime": "2023-08-22T16:00:40.287Z", "size": 1207 }, @@ -281,7 +283,7 @@ _Operation is restricted to super_user roles only_ ```json { - "message": "/**export class MyCustomResource extends tables.TableName {\n\t// we can define our own custom POST handler\n\tpost(content) {\n\t\t// do something with the incoming content;\n\t\treturn super.post(content);\n\t}\n\t// or custom GET handler\n\tget() {\n\t\t// we can modify this resource before returning\n\t\treturn super.get();\n\t}\n}\n */\n// we can also define a custom resource without a specific table\nexport class Greeting extends Resource {\n\t// a \"Hello, world!\" handler\n\tget() {\n\t\treturn { greeting: 'Hello, world!' };\n\t}\n}" + "message": "/**export class MyCustomResource extends tables.TableName {\n\t/ we can define our own custom POST handler\n\tpost(content) {\n\t\t/ do something with the incoming content;\n\t\treturn super.post(content);\n\t}\n\t/ or custom GET handler\n\tget() {\n\t\t/ we can modify this resource before returning\n\t\treturn super.get();\n\t}\n}\n */\n/ we can also define a custom resource without a specific table\nexport class Greeting extends Resource {\n\t/ a \"Hello, world!\" handler\n\tget() {\n\t\treturn { greeting: 'Hello, world!' };\n\t}\n}" } ``` @@ -367,9 +369,9 @@ Host harperdb-private-component.github.com ``` ``` -"package": "git+ssh://git@:.git#semver:v1.2.3" +"package": "git+ssh:/git@:.git#semver:v1.2.3" -"package": "git+ssh://git@harperdb-private-component.github.com:HarperDB/harperdb-private-component.git#semver:v1.2.3" +"package": "git+ssh:/git@harperdb-private-component.github.com:HarperDB/harperdb-private-component.git#semver:v1.2.3" ``` Note that `deploy_component` with a package uses `npm install` so the url must be a valid npm format url. The above is an example of a url using a tag in the repo to install. @@ -529,7 +531,7 @@ _Operation is restricted to super_user roles only_ - operation _(required)_ - must always be `install_node_modules` - projects _(required)_ - must ba an array of custom functions projects. -- dry*run *(optional)\_ - refers to the npm --dry-run flag: [https://docs.npmjs.com/cli/v8/commands/npm-install#dry-run](https://docs.npmjs.com/cli/v8/commands/npm-install#dry-run). Defaults to false. +- dry*run *(optional)\_ - refers to the npm --dry-run flag: [https:/docs.npmjs.com/cli/v8/commands/npm-install#dry-run](https:/docs.npmjs.com/cli/v8/commands/npm-install#dry-run). Defaults to false. ### Body diff --git a/docs/developers/operations-api/configuration.md b/docs/developers/operations-api/configuration.md index f4f2018f..c48381ab 100644 --- a/docs/developers/operations-api/configuration.md +++ b/docs/developers/operations-api/configuration.md @@ -1,3 +1,7 @@ +--- +title: Configuration +--- + # Configuration ## Set Configuration @@ -73,7 +77,7 @@ _Operation is restricted to super_user roles only_ "hostname": "node1", "databases": "*", "routes": null, - "url": "wss://127.0.0.1:9925" + "url": "wss:/127.0.0.1:9925" }, "componentsRoot": "/Users/hdb/components", "localStudio": { diff --git a/docs/developers/operations-api/custom-functions.md b/docs/developers/operations-api/custom-functions.md index 7ebffc6e..ed31785a 100644 --- a/docs/developers/operations-api/custom-functions.md +++ b/docs/developers/operations-api/custom-functions.md @@ -1,3 +1,7 @@ +--- +title: Custom Functions +--- + # Custom Functions _These operations are deprecated._ @@ -160,7 +164,7 @@ _Operation is restricted to super_user roles only_ ## Add Custom Function Project -Creates a new project folder in the Custom Functions root project directory. It also inserts into the new directory the contents of our Custom Functions Project template, which is available publicly, here: https://github.com/HarperDB/harperdb-custom-functions-template. +Creates a new project folder in the Custom Functions root project directory. It also inserts into the new directory the contents of our Custom Functions Project template, which is available publicly, here: https:/github.com/HarperDB/harperdb-custom-functions-template. _Operation is restricted to super_user roles only_ diff --git a/docs/developers/operations-api/databases-and-tables.md b/docs/developers/operations-api/databases-and-tables.md index e01731a9..936425c3 100644 --- a/docs/developers/operations-api/databases-and-tables.md +++ b/docs/developers/operations-api/databases-and-tables.md @@ -1,3 +1,7 @@ +--- +title: Databases and Tables +--- + # Databases and Tables ## Describe All diff --git a/docs/developers/operations-api/index.md b/docs/developers/operations-api/index.md new file mode 100644 index 00000000..1c2fb3ab --- /dev/null +++ b/docs/developers/operations-api/index.md @@ -0,0 +1,55 @@ +--- +title: Operations API +--- + +# Operations API + +The operations API provides a full set of capabilities for configuring, deploying, administering, and controlling Harper. To send operations to the operations API, you send a POST request to the operations API endpoint, which [defaults to port 9925](../../deployments/configuration#operationsapi), on the root path, where the body is the operations object. These requests need to authenticated, which can be done with [basic auth](../security/basic-auth) or [JWT authentication](../security/jwt-auth). For example, a request to create a table would be performed as: + +```http +POST http:/my-harperdb-server:9925/ +Authorization: Basic YourBase64EncodedInstanceUser:Pass +Content-Type: application/json + +{ + "operation": "create_table", + "table": "my-table" +} +``` + +The operations API reference is available below and categorized by topic: + +* [Quick Start Examples](quickstart-examples) +* [Databases and Tables](databases-and-tables) +* [NoSQL Operations](nosql-operations) +* [Bulk Operations](bulk-operations) +* [Users and Roles](users-and-roles) +* [Clustering](clustering) +* [Clustering with NATS](clustering-nats) +* [Components](components) +* [Registration](registration) +* [Jobs](jobs) +* [Logs](logs) +* [System Operations](system-operations) +* [Configuration](configuration) +* [Certificate Management](certificate-management) +* [Token Authentication](token-authentication) +* [SQL Operations](sql-operations) +* [Advanced JSON SQL Examples](advanced-json-sql-examples) +* [Analytics](analytics) + +• [Past Release API Documentation](https:/olddocs.harperdb.io) + +## More Examples + +Here is an example of using `curl` to make an operations API request: + +```bash +curl --location --request POST 'https:/instance-subdomain.harperdbcloud.com' \ +--header 'Authorization: Basic YourBase64EncodedInstanceUser:Pass' \ +--header 'Content-Type: application/json' \ +--data-raw '{ +"operation": "create_schema", +"schema": "dev" +}' +``` diff --git a/docs/developers/operations-api/jobs.md b/docs/developers/operations-api/jobs.md index 0240ae4a..173125a1 100644 --- a/docs/developers/operations-api/jobs.md +++ b/docs/developers/operations-api/jobs.md @@ -1,3 +1,7 @@ +--- +title: Jobs +--- + # Jobs ## Get Job diff --git a/docs/developers/operations-api/logs.md b/docs/developers/operations-api/logs.md index 2f7e9630..4efeb27f 100644 --- a/docs/developers/operations-api/logs.md +++ b/docs/developers/operations-api/logs.md @@ -1,8 +1,12 @@ +--- +title: Logs +--- + # Logs ## Read Harper Log -Returns log outputs from the primary Harper log based on the provided search criteria. [Read more about Harper logging here](../../administration/logging/logging.md#read-logs-via-the-api). +Returns log outputs from the primary Harper log based on the provided search criteria. [Read more about Harper logging here](../../administration/logging/standard-logging#read-logs-via-the-api). _Operation is restricted to super_user roles only_ @@ -60,7 +64,7 @@ _Operation is restricted to super_user roles only_ ## Read Transaction Log -Returns all transactions logged for the specified database table. You may filter your results with the optional from, to, and limit fields. [Read more about Harper transaction logs here](logs.md#read-transaction-log). +Returns all transactions logged for the specified database table. You may filter your results with the optional from, to, and limit fields. [Read more about Harper transaction logs here](logs#read-transaction-log). _Operation is restricted to super_user roles only_ @@ -295,7 +299,7 @@ _Operation is restricted to super_user roles only_ ## Read Audit Log -AuditLog must be enabled in the Harper configuration file to make this request. Returns a verbose history of all transactions logged for the specified database table, including original data records. You may filter your results with the optional search_type and search_values fields. [Read more about Harper transaction logs here.](../../administration/logging/transaction-logging.md#read_transaction_log) +AuditLog must be enabled in the Harper configuration file to make this request. Returns a verbose history of all transactions logged for the specified database table, including original data records. You may filter your results with the optional search_type and search_values fields. [Read more about Harper transaction logs here.](../../administration/logging/transaction-logging#read_transaction_log) _Operation is restricted to super_user roles only_ @@ -390,7 +394,7 @@ _Operation is restricted to super_user roles only_ ## Read Audit Log by timestamp -AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table between the specified time window. [Read more about Harper transaction logs here](logs.md#read-transaction-log). +AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table between the specified time window. [Read more about Harper transaction logs here](logs#read-transaction-log). _Operation is restricted to super_user roles only_ @@ -511,7 +515,7 @@ _Operation is restricted to super_user roles only_ ## Read Audit Log by username -AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table which were committed by the specified user. [Read more about Harper transaction logs here](../../administration/logging/transaction-logging.md#read_transaction_log). +AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table which were committed by the specified user. [Read more about Harper transaction logs here](../../administration/logging/transaction-logging#read_transaction_log). _Operation is restricted to super_user roles only_ @@ -631,7 +635,7 @@ _Operation is restricted to super_user roles only_ ## Read Audit Log by hash_value -AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table which were committed to the specified hash value(s). [Read more about Harper transaction logs here](../../administration/logging/transaction-logging.md#read_transaction_log). +AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table which were committed to the specified hash value(s). [Read more about Harper transaction logs here](../../administration/logging/transaction-logging#read_transaction_log). _Operation is restricted to super_user roles only_ diff --git a/docs/developers/operations-api/nosql-operations.md b/docs/developers/operations-api/nosql-operations.md index 0b8b7d7f..099ebbcd 100644 --- a/docs/developers/operations-api/nosql-operations.md +++ b/docs/developers/operations-api/nosql-operations.md @@ -1,3 +1,7 @@ +--- +title: NoSQL Operations +--- + # NoSQL Operations ## Insert diff --git a/docs/developers/operations-api/quickstart-examples.md b/docs/developers/operations-api/quickstart-examples.md index 74e46469..9d60c002 100644 --- a/docs/developers/operations-api/quickstart-examples.md +++ b/docs/developers/operations-api/quickstart-examples.md @@ -1,6 +1,10 @@ +--- +title: Quick Start Examples +--- + # Quick Start Examples -Harper recommends utilizing [Harper Applications](../../developers/applications/README.md) for defining databases, tables, and other functionality. However, this guide is a great way to get started using on the Harper Operations API. +Harper recommends utilizing [Harper Applications](../../developers/applications/) for defining databases, tables, and other functionality. However, this guide is a great way to get started using on the Harper Operations API. ## Create dog Table @@ -230,7 +234,7 @@ Let's add some more Harper doggies! We can add as many dog objects as we want in We need to populate the 'breed' table with some data so we can reference it later. For larger data sets, we recommend using our CSV upload option. -Each header in a column will be considered as an attribute, and each row in the file will be a row in the table. Simply specify the file path and the table to upload to, and Harper will take care of the rest. You can pull the breeds.csv file from here: https://s3.amazonaws.com/complimentarydata/breeds.csv +Each header in a column will be considered as an attribute, and each row in the file will be a row in the table. Simply specify the file path and the table to upload to, and Harper will take care of the rest. You can pull the breeds.csv file from here: https:/s3.amazonaws.com/complimentarydata/breeds.csv ### Body @@ -238,7 +242,7 @@ Each header in a column will be considered as an attribute, and each row in the { "operation": "csv_url_load", "table": "breed", - "csv_url": "https://s3.amazonaws.com/complimentarydata/breeds.csv" + "csv_url": "https:/s3.amazonaws.com/complimentarydata/breeds.csv" } ``` diff --git a/docs/developers/operations-api/registration.md b/docs/developers/operations-api/registration.md index 99792e81..cd7bc61f 100644 --- a/docs/developers/operations-api/registration.md +++ b/docs/developers/operations-api/registration.md @@ -1,3 +1,7 @@ +--- +title: Registration +--- + # Registration ## Registration Info diff --git a/docs/developers/operations-api/sql-operations.md b/docs/developers/operations-api/sql-operations.md index 1069ce19..71dfa436 100644 --- a/docs/developers/operations-api/sql-operations.md +++ b/docs/developers/operations-api/sql-operations.md @@ -1,6 +1,10 @@ -{% hint style="warning" %} +--- +title: SQL Operations +--- + +:::warning Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. -{% endhint %} +::: # SQL Operations diff --git a/docs/developers/operations-api/system-operations.md b/docs/developers/operations-api/system-operations.md index 8252c19d..da47e104 100644 --- a/docs/developers/operations-api/system-operations.md +++ b/docs/developers/operations-api/system-operations.md @@ -1,3 +1,7 @@ +--- +title: System Operations +--- + # System Operations ## Restart diff --git a/docs/developers/operations-api/token-authentication.md b/docs/developers/operations-api/token-authentication.md index 26220051..b9ff5b31 100644 --- a/docs/developers/operations-api/token-authentication.md +++ b/docs/developers/operations-api/token-authentication.md @@ -1,3 +1,7 @@ +--- +title: Token Authentication +--- + # Token Authentication ## Create Authentication Tokens diff --git a/docs/developers/operations-api/users-and-roles.md b/docs/developers/operations-api/users-and-roles.md index 0326038a..ecaa1117 100644 --- a/docs/developers/operations-api/users-and-roles.md +++ b/docs/developers/operations-api/users-and-roles.md @@ -1,8 +1,12 @@ +--- +title: Users and Roles +--- + # Users and Roles ## List Roles -Returns a list of all roles. [Learn more about Harper roles here.](../security/users-and-roles.md) +Returns a list of all roles. [Learn more about Harper roles here.](../security/users-and-roles) _Operation is restricted to super_user roles only_ @@ -72,7 +76,7 @@ _Operation is restricted to super_user roles only_ ## Add Role -Creates a new role with the specified permissions. [Learn more about Harper roles here.](../security/users-and-roles.md) +Creates a new role with the specified permissions. [Learn more about Harper roles here.](../security/users-and-roles) _Operation is restricted to super_user roles only_ @@ -150,7 +154,7 @@ _Operation is restricted to super_user roles only_ ## Alter Role -Modifies an existing role with the specified permissions. updates permissions from an existing role. [Learn more about Harper roles here.](../security/users-and-roles.md) +Modifies an existing role with the specified permissions. updates permissions from an existing role. [Learn more about Harper roles here.](../security/users-and-roles) _Operation is restricted to super_user roles only_ @@ -229,7 +233,7 @@ _Operation is restricted to super_user roles only_ ## Drop Role -Deletes an existing role from the database. NOTE: Role with associated users cannot be dropped. [Learn more about Harper roles here.](../security/users-and-roles.md) +Deletes an existing role from the database. NOTE: Role with associated users cannot be dropped. [Learn more about Harper roles here.](../security/users-and-roles) _Operation is restricted to super_user roles only_ @@ -257,7 +261,7 @@ _Operation is restricted to super_user roles only_ ## List Users -Returns a list of all users. [Learn more about Harper roles here.](../security/users-and-roles.md) +Returns a list of all users. [Learn more about Harper roles here.](../security/users-and-roles) _Operation is restricted to super_user roles only_ @@ -407,7 +411,7 @@ Returns user data for the associated user credentials. ## Add User -Creates a new user with the specified role and credentials. [Learn more about Harper roles here.](../security/users-and-roles.md) +Creates a new user with the specified role and credentials. [Learn more about Harper roles here.](../security/users-and-roles) _Operation is restricted to super_user roles only_ @@ -441,7 +445,7 @@ _Operation is restricted to super_user roles only_ ## Alter User -Modifies an existing user's role and/or credentials. [Learn more about Harper roles here.](../security/users-and-roles.md) +Modifies an existing user's role and/or credentials. [Learn more about Harper roles here.](../security/users-and-roles) _Operation is restricted to super_user roles only_ @@ -479,7 +483,7 @@ _Operation is restricted to super_user roles only_ ## Drop User -Deletes an existing user by username. [Learn more about Harper roles here.](../security/users-and-roles.md) +Deletes an existing user by username. [Learn more about Harper roles here.](../security/users-and-roles) _Operation is restricted to super_user roles only_ diff --git a/docs/developers/real-time.md b/docs/developers/real-time.md index d38ca74c..03853ae6 100644 --- a/docs/developers/real-time.md +++ b/docs/developers/real-time.md @@ -1,10 +1,14 @@ +--- +title: Real-Time +--- + # Real-Time ## Real-Time Harper provides real-time access to data and messaging. This allows clients to monitor and subscribe to data for changes in real-time as well as handling data-oriented messaging. Harper supports multiple standardized protocols to facilitate diverse standards-based client interaction. -Harper real-time communication is based around database tables. Declared tables are the basis for monitoring data, and defining "topics" for publishing and subscribing to messages. Declaring a table that establishes a topic can be as simple as adding a table with no attributes to your [schema.graphql in a Harper application folder](./applications/README.md): +Harper real-time communication is based around database tables. Declared tables are the basis for monitoring data, and defining "topics" for publishing and subscribing to messages. Declaring a table that establishes a topic can be as simple as adding a table with no attributes to your [schema.graphql in a Harper application folder](./applications/): ``` type MyTopic @table @export @@ -36,9 +40,7 @@ mqtt: requireAuthentication: true ``` -Note that if you are using WebSockets for MQTT, the sub-protocol should be set to "mqtt" (this is required by the MQTT specification, and should be included by any conformant client): `Sec-WebSocket-Protocol: mqtt`. - -mTLS is also supported by enabling it in the configuration and using the certificate authority from the TLS section of the configuration. When mTLS is enabled for MQTT, Harper automatically performs certificate revocation checking using OCSP (Online Certificate Status Protocol) to ensure that revoked certificates cannot be used for authentication. See the [configuration documentation for more information](../deployments/configuration.md). +Note that if you are using WebSockets for MQTT, the sub-protocol should be set to "mqtt" (this is required by the MQTT specification, and should be included by any conformant client): `Sec-WebSocket-Protocol: mqtt`. mTLS is also supported by enabling it in the configuration and using the certificate authority from the TLS section of the configuration. See the [configuration documentation for more information](../deployments/configuration). #### Capabilities @@ -89,12 +91,12 @@ Non-retained messages are generally a good choice for applications like chat, wh ### WebSockets -WebSockets are supported through the REST interface and go through the `connect(incomingMessages)` method on resources. By default, making a WebSockets connection to a URL will subscribe to the referenced resource. For example, making a WebSocket connection to `new WebSocket('wss://server/my-resource/341')` will access the resource defined for 'my-resource' and the resource id of 341 and connect to it. On the web platform this could be: +WebSockets are supported through the REST interface and go through the `connect(incomingMessages)` method on resources. By default, making a WebSockets connection to a URL will subscribe to the referenced resource. For example, making a WebSocket connection to `new WebSocket('wss:/server/my-resource/341')` will access the resource defined for 'my-resource' and the resource id of 341 and connect to it. On the web platform this could be: ```javascript -let ws = new WebSocket('wss://server/my-resource/341'); +let ws = new WebSocket('wss:/server/my-resource/341'); ws.onmessage = (event) => { - // received a notification from the server + / received a notification from the server let data = JSON.parse(event.data); }; ``` @@ -104,8 +106,8 @@ By default, the resources will make a subscription to that resource, monitoring ```javascript export class Echo extends Resource { async *connect(incomingMessages) { - for await (let message of incomingMessages) { // wait for each incoming message from the client - // and send the message back to the client + for await (let message of incomingMessages) { / wait for each incoming message from the client + / and send the message back to the client yield message; } } @@ -119,13 +121,13 @@ export class Example extends Resource { let outgoingMessages = super.connect(); let timer = setInterval(() => { outgoingMessages.send({greeting: 'hi again!'}); - }, 1000); // send a message once a second + }, 1000); / send a message once a second incomingMessages.on('data', (message) => { - // another way of echo-ing the data back to the client + / another way of echo-ing the data back to the client outgoingMessages.send(message); }); outgoingMessages.on('close', () => { - // make sure we end the timer once the connection is closed + / make sure we end the timer once the connection is closed clearInterval(timer); }); return outgoingMessages; @@ -137,9 +139,9 @@ export class Example extends Resource { Server Sent Events (SSE) are also supported through the REST server interface, and provide a simple and efficient mechanism for web-based applications to receive real-time updates. For consistency of push delivery, SSE connections go through the `connect()` method on resources, much like WebSockets. The primary difference is that `connect` is called without any `incomingMessages` argument, since SSE is a one-directional transport mechanism. This can be used much like WebSockets, specifying a resource URL path will connect to that resource, and by default provides a stream of messages for changes and messages for that resource. For example, you can connect to receive notification in a browser for a resource like: ```javascript -let eventSource = new EventSource('https://server/my-resource/341', { withCredentials: true }); +let eventSource = new EventSource('https:/server/my-resource/341', { withCredentials: true }); eventSource.onmessage = (event) => { - // received a notification from the server + / received a notification from the server let data = JSON.parse(event.data); }; ``` diff --git a/docs/developers/replication/README.md b/docs/developers/replication/README.md deleted file mode 100644 index ed498e55..00000000 --- a/docs/developers/replication/README.md +++ /dev/null @@ -1,278 +0,0 @@ -# Replication/Clustering - -Harper’s replication system is designed to make distributed data replication fast and reliable across multiple nodes. This means you can easily build a distributed database that ensures high availability, disaster recovery, and data localization. The best part? It’s simple to set up, configure, and manage. You can easily add or remove nodes, choose which data to replicate, and monitor the system’s health without jumping through hoops. - -### Replication Overview - -Harper replication uses a peer-to-peer model where every node in your cluster can send and subscribe to data. Each node connects through WebSockets, allowing data to flow seamlessly in both directions. By default, Harper takes care of managing these connections and subscriptions, so you don’t have to worry about data consistency. The system is designed to maintain secure, reliable connections between nodes, ensuring that your data is always safe. - -### Replication Configuration - -To connect your nodes, you need to provide hostnames or URLs for the nodes to connect to each other. This can be done via configuration or through operations. To configure replication, you can specify connection information the `replication` section of the [harperdb-config.yaml](../../deployments/configuration.md). Here, you can specify the host name of the current node, and routes to connect to other nodes, for example: - -```yaml -replication: - hostname: server-one - routes: - - server-two - - server-three -``` - -In this example, the current node is `server-one`, and it will connect to `server-two` and `server-three`. Routes to other nodes can also be configured with URLs or ports: - -```yaml -replication: - hostname: server-one - routes: - - wss://server-two:9933 # URL based route - - hostname: server-three # define a hostname and port - port: 9933 -``` - -You can also use the [operations API](../operations-api/clustering.md) to dynamically add and remove nodes from the cluster. This is useful for adding new nodes to a running cluster or removing nodes that are no longer needed. For example (note this is the basic form, you would also need to provide the necessary credentials for the operation, see the section on securing connections for more details): - -```json -{ - "operation": "add_node", - "hostname": "server-two" -} -``` - -These operations will also dynamically generating certificates as needed, if there are no existing signed certificates, or if the existing certificates are not valid for the new node. - -Harper will also automatically replicate node information to other nodes in a cluster ([gossip-style discovery](https://highscalability.com/gossip-protocol-explained/)). This means that you only need to connect to one node in an existing cluster, and Harper will automatically detect and connect to other nodes in the cluster (bidirectionally). - -By default, Harper will replicate all the data in all the databases. You can configure which databases are replicated, and then override this behavior on a per-table basis. For example, you can indicate which databases should be replicated by default, here indicating you want to replicate the `data` and `system` databases: - -```yaml -replication: - databases: - - data - - system -``` - -By default, all tables within a replicated database will be replicated. Transactions are replicated atomically, which may involve data across multiple tables. However, you can also configure replication for individual tables, and disable and exclude replication for specific tables in a database by setting `replicate` to `false` in the table definition: - -```graphql -type LocalTableForNode @table(replicate: false) { - id: ID! - name: String! -} -``` - -You can also control which nodes data is replicated to, and how many nodes data is replicated to. By default, Harper will replicate data to all nodes in the cluster, but you can control where data is replicated to with the [sharding configuration and APIs](sharding.md). - -By default, replication connects to the secure port 9933. You can configure the replication port in the `replication` section. - -```yaml -replication: - securePort: 9933 -``` - -### Securing Connections - -Harper supports the highest levels of security through public key infrastructure based security and authorization. Depending on your security configuration, you can configure Harper in several different ways to build a connected cluster. - -When using certificate-based authentication, Harper automatically performs OCSP (Online Certificate Status Protocol) verification to check if certificates have been revoked. This ensures that compromised certificates cannot be used for replication connections. Certificate verification settings follow the same configuration as HTTP mTLS connections (see [certificate verification configuration](../../deployments/configuration.md#http)). - -#### Provide your own certificates - -If you want to secure your Harper connections with your own signed certificates, you can easily do so. Whether you have certificates from a public authority (like Let's Encrypt or Digicert) or a corporate certificate authority, you can use them to authenticate nodes securely. You can then allow nodes to authorize each other by checking the certificate against the standard list of root certificate authorities by enabling the `enableRootCAs` option in the config: - -``` -replication - enableRootCAs: true -``` - -And then just make sure the certificate’s common name (CN) matches the node's hostname. - -#### Setting Up Custom Certificates - -There are two ways to configure Harper with your own certificates: - -1. Use the `add_certificate` operation to upload them. -2. Or, specify the certificate paths directly in the `replication` section of the `harperdb-config.yaml` file. - -If your certificate is signed by a trusted public authority, just provide the path to the certificate and private key. If you're using self-signed certificates or a private certificate authority, you’ll also need to provide the certificate authority (CA) details to complete the setup.\ -\ -Example configuration: - -```yaml -tls: - certificate: /path/to/certificate.pem - certificateAuthority: /path/to/ca.pem - privateKey: /path/to/privateKey.pem -``` - -With this in place, Harper will load the provided certificates into the certificate table and use these to secure and authenticate connections between nodes. - -You have the option to skip providing a specific certificate authority (CA) and instead verify your certificate against the root certificates included in the bundled Mozilla CA store. This bundled CA store, provided by Node.js, is a snapshot of Mozilla's CA certificates that is fixed at the time of each Node.js release. - -To enable the root certificates set `replication.enableRootCAs` to `true` in the `harperdb-config.yaml` file: - -```yaml -replication: - enableRootCAs: true -``` - -#### Cross-generated certificates - -Harper can also generate its own certificates for secure connections. This is useful for setting up secure connections between nodes when no existing certificates are available, and can be used in development, testing, or production environments. Certificates will be automatically requested and signed between nodes to support a form of distributed certificate generation and signing. To establish secure connections between nodes using cross-generated certificates, you simply use the [`add_node` operation](../operations-api/clustering.md) over SSL, and specify the temporary authentication credentials to use for connecting and authorizing the certificate generation and signing. \ -\ -Example configuration: - -```json -{ - "operation": "add_node", - "hostname": "server-two", - "verify_tls": false, - "authorization": { - "username": "admin", - "password": "password" - } -} -``` - -When you connect to another node (e.g., `server-two`), Harper uses secure WebSockets and the provided credentials to establish the connection. - -If you’re working with a fresh install, you’ll need to set `verify_tls` to `false` temporarily, so the self-signed certificate is accepted. Once the connection is made, Harper will automatically handle the certificate signing process: - -- It creates a certificate signing request (CSR), sends it to `server-two`, which then signs it and returns the signed certificate along with the certificate authority (CA). -- The signed certificate is stored for future connections between the nodes, ensuring secure communication. - -**Important:** Your credentials are not stored—they are discarded immediately after use. - -You can also provide credentials in HTTP Authorization format (Basic auth, Token auth, or JWT). This is helpful for handling authentication with the required permissions to generate and sign certificates. - -Additionally, you can use `set_node` as an alias for the `add_node` operation if you prefer. - -#### Revoking Certificates - -Certificates used in replication can be revoked by using the certificate serial number and either the `revoked_certificates` attribute in the `hdb_nodes` system table or route config in `harperdb-config.yaml`. - -To utilize the `revoked_certificates` attribute in the `hdb_nodes` table, you can use the `add_node` or `update_node` operation to add the certificate serial number to the `revoked_certificates` array. For example: - -```json -{ - "operation": "update_node", - "hostname": "server-two", - "revoked_certificates": ["1769F7D6A"] -} -``` - -To utilize the replication route config in `harperdb-config.yaml`, you can add the certificate serial number to the `revokedCertificates` array. For example: - -```yaml -replication: - routes: - - hostname: server-three - port: 9930 - revokedCertificates: - - 1769F7D6A - - QA69C7E2S -``` - -#### Removing Nodes - -Nodes can be removed from the cluster using the [`remove_node` operation](../operations-api/clustering.md). This will remove the node from the cluster, and stop replication to and from the node. For example: - -```json -{ - "operation": "remove_node", - "hostname": "server-two" -} -``` - -#### Insecure Connection IP-based Authentication - -You can completely disable secure connections and use IP addresses to authenticate nodes with each other. This can be useful for development and testing, or within a secure private network, but should never be used for production with publicly accessible servers. To disable secure connections, simply configure replication within an insecure port, either by [configuring the operations API](../../deployments/configuration.md) to run on an insecure port or replication to run on an insecure port. And then set up IP-based routes to connect to other nodes: - -```yaml -replication: - port: 9933 - routes: - - 127.0.0.2 - - 127.0.0.3 -``` - -Note that in this example, we are using loop back addresses, which can be a convenient way to run multiple nodes on a single machine for testing and development. - -#### Explicit Subscriptions - -#### Managing Node Connections and Subscriptions in Harper - -By default, Harper automatically handles connections and subscriptions between nodes, ensuring data consistency across your cluster. It even uses data routing to manage node failures. But if you want more control, you can manage these connections manually by explicitly subscribing to nodes. This is useful for advanced configurations, testing, or debugging. - -#### Important Notes on Explicit Subscriptions - -If you choose to manage subscriptions manually, Harper will no longer handle data consistency for you. This means there’s no guarantee that all nodes will have consistent data if subscriptions don’t fully replicate in all directions. If a node goes down, it’s possible that some data wasn’t replicated before the failure. - -#### How to Subscribe to Nodes - -To explicitly subscribe to a node, you can use operations like `add_node` and define the subscriptions. For example, you can configure a node (e.g., `server-two`) to publish transactions on a specific table (e.g., `dev.my-table`) without receiving data from that node. - -Example configuration: - -```json -{ - "operation": "add_node", - "hostname": "server-two", - "subscriptions": [ - { - "database": "dev", - "table": "my-table", - "publish": true, - "subscribe": false - } - ] -} -``` - -To update an explicit subscription you can use the [`update_node` operation](../operations-api/clustering.md). - -Here we are updating the subscription to receive transactions on the `dev.my-table` table from the `server-two` node. - -```json -{ - "operation": "update_node", - "hostname": "server-two", - "subscriptions": [ - { - "database": "dev", - "table": "my-table", - "publish": true, - "subscribe": true - } - ] -} -``` - -#### Monitoring Replication - -You can monitor the status of replication through the operations API. You can use the [`cluster_status` operation](../operations-api/clustering.md) to get the status of replication. For example: - -```json -{ - "operation": "cluster_status" -} -``` - -#### Database Initial Synchronization and Resynchronization - -When a new node is added to the cluster, if its database has not previously been synced, it will initially download the database from the first node it connects to. This will copy every record from the source database to the new node. Once the initial synchronization is complete, the new node will enter replication mode and receive records from each node as they are created, updated, or deleted. If a node goes down and comes back up, it will also resynchronize with the other nodes in the cluster, to ensure that it has the most up-to-date data. - -You may also specify a `start_time` in the `add_node` to specify that when a database connects, that it should not download the entire database, but only data since a given starting time. - -**Advanced Configuration** - -You can also check the configuration of the replication system, including the current known nodes and certificates, by querying the hdb_nodes and hdb_certificate table: - -```json -{ - "operation": "search_by_value", - "database": "system", - "table": "hdb_nodes", - "search_attribute": "name", - "search_value": "*" -} -``` diff --git a/docs/developers/replication/index.md b/docs/developers/replication/index.md new file mode 100644 index 00000000..6f30c72c --- /dev/null +++ b/docs/developers/replication/index.md @@ -0,0 +1,282 @@ +--- +title: Replication/Clustering +--- + +# Replication/Clustering + +Harper’s replication system is designed to make distributed data replication fast and reliable across multiple nodes. This means you can easily build a distributed database that ensures high availability, disaster recovery, and data localization. The best part? It’s simple to set up, configure, and manage. You can easily add or remove nodes, choose which data to replicate, and monitor the system’s health without jumping through hoops. + +### Replication Overview + +Harper replication uses a peer-to-peer model where every node in your cluster can send and subscribe to data. Each node connects through WebSockets, allowing data to flow seamlessly in both directions. By default, Harper takes care of managing these connections and subscriptions, so you don’t have to worry about data consistency. The system is designed to maintain secure, reliable connections between nodes, ensuring that your data is always safe. + +### Replication Configuration + +To connect your nodes, you need to provide hostnames or URLs for the nodes to connect to each other. This can be done via configuration or through operations. To configure replication, you can specify connection information the `replication` section of the [harperdb-config.yaml](../../deployments/configuration). Here, you can specify the host name of the current node, and routes to connect to other nodes, for example: + +```yaml +replication: + hostname: server-one + routes: + - server-two + - server-three +``` + +In this example, the current node is `server-one`, and it will connect to `server-two` and `server-three`. Routes to other nodes can also be configured with URLs or ports: + +```yaml +replication: + hostname: server-one + routes: + - wss:/server-two:9933 # URL based route + - hostname: server-three # define a hostname and port + port: 9933 +``` + +You can also use the [operations API](../operations-api/clustering) to dynamically add and remove nodes from the cluster. This is useful for adding new nodes to a running cluster or removing nodes that are no longer needed. For example (note this is the basic form, you would also need to provide the necessary credentials for the operation, see the section on securing connections for more details): + +```json +{ + "operation": "add_node", + "hostname": "server-two" +} +``` + +These operations will also dynamically generating certificates as needed, if there are no existing signed certificates, or if the existing certificates are not valid for the new node. + +Harper will also automatically replicate node information to other nodes in a cluster ([gossip-style discovery](https:/highscalability.com/gossip-protocol-explained/)). This means that you only need to connect to one node in an existing cluster, and Harper will automatically detect and connect to other nodes in the cluster (bidirectionally). + +By default, Harper will replicate all the data in all the databases. You can configure which databases are replicated, and then override this behavior on a per-table basis. For example, you can indicate which databases should be replicated by default, here indicating you want to replicate the `data` and `system` databases: + +```yaml +replication: + databases: + - data + - system +``` + +By default, all tables within a replicated database will be replicated. Transactions are replicated atomically, which may involve data across multiple tables. However, you can also configure replication for individual tables, and disable and exclude replication for specific tables in a database by setting `replicate` to `false` in the table definition: + +```graphql +type LocalTableForNode @table(replicate: false) { + id: ID! + name: String! +} +``` + +You can also control which nodes data is replicated to, and how many nodes data is replicated to. By default, Harper will replicate data to all nodes in the cluster, but you can control where data is replicated to with the [sharding configuration and APIs](sharding). + +By default, replication connects to the secure port 9933. You can configure the replication port in the `replication` section. + +```yaml +replication: + securePort: 9933 +``` + +### Securing Connections + +Harper supports the highest levels of security through public key infrastructure based security and authorization. Depending on your security configuration, you can configure Harper in several different ways to build a connected cluster. + +When using certificate-based authentication, Harper automatically performs OCSP (Online Certificate Status Protocol) verification to check if certificates have been revoked. This ensures that compromised certificates cannot be used for replication connections. Certificate verification settings follow the same configuration as HTTP mTLS connections (see [certificate verification configuration](../../deployments/configuration.md#http)). + +#### Provide your own certificates + +If you want to secure your Harper connections with your own signed certificates, you can easily do so. Whether you have certificates from a public authority (like Let's Encrypt or Digicert) or a corporate certificate authority, you can use them to authenticate nodes securely. You can then allow nodes to authorize each other by checking the certificate against the standard list of root certificate authorities by enabling the `enableRootCAs` option in the config: + +``` +replication + enableRootCAs: true +``` + +And then just make sure the certificate’s common name (CN) matches the node's hostname. + +#### Setting Up Custom Certificates + +There are two ways to configure Harper with your own certificates: + +1. Use the `add_certificate` operation to upload them. +1. Or, specify the certificate paths directly in the `replication` section of the `harperdb-config.yaml` file. + +If your certificate is signed by a trusted public authority, just provide the path to the certificate and private key. If you're using self-signed certificates or a private certificate authority, you’ll also need to provide the certificate authority (CA) details to complete the setup.\ +\ +Example configuration: + +```yaml +tls: + certificate: /path/to/certificate.pem + certificateAuthority: /path/to/ca.pem + privateKey: /path/to/privateKey.pem +``` + +With this in place, Harper will load the provided certificates into the certificate table and use these to secure and authenticate connections between nodes. + +You have the option to skip providing a specific certificate authority (CA) and instead verify your certificate against the root certificates included in the bundled Mozilla CA store. This bundled CA store, provided by Node.js, is a snapshot of Mozilla's CA certificates that is fixed at the time of each Node.js release. + +To enable the root certificates set `replication.enableRootCAs` to `true` in the `harperdb-config.yaml` file: + +```yaml +replication: + enableRootCAs: true +``` + +#### Cross-generated certificates + +Harper can also generate its own certificates for secure connections. This is useful for setting up secure connections between nodes when no existing certificates are available, and can be used in development, testing, or production environments. Certificates will be automatically requested and signed between nodes to support a form of distributed certificate generation and signing. To establish secure connections between nodes using cross-generated certificates, you simply use the [`add_node` operation](../operations-api/clustering) over SSL, and specify the temporary authentication credentials to use for connecting and authorizing the certificate generation and signing. \ +\ +Example configuration: + +```json +{ + "operation": "add_node", + "hostname": "server-two", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password" + } +} +``` + +When you connect to another node (e.g., `server-two`), Harper uses secure WebSockets and the provided credentials to establish the connection. + +If you’re working with a fresh install, you’ll need to set `verify_tls` to `false` temporarily, so the self-signed certificate is accepted. Once the connection is made, Harper will automatically handle the certificate signing process: + +- It creates a certificate signing request (CSR), sends it to `server-two`, which then signs it and returns the signed certificate along with the certificate authority (CA). +- The signed certificate is stored for future connections between the nodes, ensuring secure communication. + +**Important:** Your credentials are not stored—they are discarded immediately after use. + +You can also provide credentials in HTTP Authorization format (Basic auth, Token auth, or JWT). This is helpful for handling authentication with the required permissions to generate and sign certificates. + +Additionally, you can use `set_node` as an alias for the `add_node` operation if you prefer. + +#### Revoking Certificates + +Certificates used in replication can be revoked by using the certificate serial number and either the `revoked_certificates` attribute in the `hdb_nodes` system table or route config in `harperdb-config.yaml`. + +To utilize the `revoked_certificates` attribute in the `hdb_nodes` table, you can use the `add_node` or `update_node` operation to add the certificate serial number to the `revoked_certificates` array. For example: + +```json +{ + "operation": "update_node", + "hostname": "server-two", + "revoked_certificates": ["1769F7D6A"] +} +``` + +To utilize the replication route config in `harperdb-config.yaml`, you can add the certificate serial number to the `revokedCertificates` array. For example: + +```yaml +replication: + routes: + - hostname: server-three + port: 9930 + revokedCertificates: + - 1769F7D6A + - QA69C7E2S +``` + +#### Removing Nodes + +Nodes can be removed from the cluster using the [`remove_node` operation](../operations-api/clustering). This will remove the node from the cluster, and stop replication to and from the node. For example: + +```json +{ + "operation": "remove_node", + "hostname": "server-two" +} +``` + +#### Insecure Connection IP-based Authentication + +You can completely disable secure connections and use IP addresses to authenticate nodes with each other. This can be useful for development and testing, or within a secure private network, but should never be used for production with publicly accessible servers. To disable secure connections, simply configure replication within an insecure port, either by [configuring the operations API](../../deployments/configuration) to run on an insecure port or replication to run on an insecure port. And then set up IP-based routes to connect to other nodes: + +```yaml +replication: + port: 9933 + routes: + - 127.0.0.2 + - 127.0.0.3 +``` + +Note that in this example, we are using loop back addresses, which can be a convenient way to run multiple nodes on a single machine for testing and development. + +#### Explicit Subscriptions + +#### Managing Node Connections and Subscriptions in Harper + +By default, Harper automatically handles connections and subscriptions between nodes, ensuring data consistency across your cluster. It even uses data routing to manage node failures. But if you want more control, you can manage these connections manually by explicitly subscribing to nodes. This is useful for advanced configurations, testing, or debugging. + +#### Important Notes on Explicit Subscriptions + +If you choose to manage subscriptions manually, Harper will no longer handle data consistency for you. This means there’s no guarantee that all nodes will have consistent data if subscriptions don’t fully replicate in all directions. If a node goes down, it’s possible that some data wasn’t replicated before the failure. + +#### How to Subscribe to Nodes + +To explicitly subscribe to a node, you can use operations like `add_node` and define the subscriptions. For example, you can configure a node (e.g., `server-two`) to publish transactions on a specific table (e.g., `dev.my-table`) without receiving data from that node. + +Example configuration: + +```json +{ + "operation": "add_node", + "hostname": "server-two", + "subscriptions": [ + { + "database": "dev", + "table": "my-table", + "publish": true, + "subscribe": false + } + ] +} +``` + +To update an explicit subscription you can use the [`update_node` operation](../operations-api/clustering). + +Here we are updating the subscription to receive transactions on the `dev.my-table` table from the `server-two` node. + +```json +{ + "operation": "update_node", + "hostname": "server-two", + "subscriptions": [ + { + "database": "dev", + "table": "my-table", + "publish": true, + "subscribe": true + } + ] +} +``` + +#### Monitoring Replication + +You can monitor the status of replication through the operations API. You can use the [`cluster_status` operation](../operations-api/clustering) to get the status of replication. For example: + +```json +{ + "operation": "cluster_status" +} +``` + +#### Database Initial Synchronization and Resynchronization + +When a new node is added to the cluster, if its database has not previously been synced, it will initially download the database from the first node it connects to. This will copy every record from the source database to the new node. Once the initial synchronization is complete, the new node will enter replication mode and receive records from each node as they are created, updated, or deleted. If a node goes down and comes back up, it will also resynchronize with the other nodes in the cluster, to ensure that it has the most up-to-date data. + +You may also specify a `start_time` in the `add_node` to specify that when a database connects, that it should not download the entire database, but only data since a given starting time. + +**Advanced Configuration** + +You can also check the configuration of the replication system, including the current known nodes and certificates, by querying the hdb_nodes and hdb_certificate table: + +```json +{ + "operation": "search_by_value", + "database": "system", + "table": "hdb_nodes", + "search_attribute": "name", + "search_value": "*" +} +``` diff --git a/docs/developers/replication/sharding.md b/docs/developers/replication/sharding.md index 7e14dee6..84197445 100644 --- a/docs/developers/replication/sharding.md +++ b/docs/developers/replication/sharding.md @@ -1,3 +1,7 @@ +--- +title: Sharding +--- + Harper's replication system supports various levels of replication or sharding. Harper can be configured or set up to replicate to different data to different subsets of nodes. This can be used facilitate horizontally scalability of storage and write performance, while maintaining optimal strategies of data locality and data consistency. When sharding is configured, Harper will replicate data to only a subset of nodes, based on the sharding configuration, and can then retrieve data from the appropriate nodes as needed to fulfill requests for data. There are two main ways to setup sharding in Harper. The approach is to use dynamic sharding, where the location or residency of records is determined dynamically based on where the record was written and record data, and records can be dynamically relocated based on where they are accessed. This residency information can be specific to each record, and can vary based on the computed residency and where the data is written and accessed. @@ -71,7 +75,7 @@ Additionally, you can specify `replicateTo` and `replicatedConfirmation` paramet class MyTable extends tables.MyTable { put(record) { const context = this.getContext(); - context.replicateTo = 2; // or an array of node names + context.replicateTo = 2; / or an array of node names context.replicatedConfirmation = 1; return super.put(record); } @@ -128,7 +132,7 @@ Alternately you can define a custom sharding strategy based on the primary key a ```javascript MyTable.setResidencyById((id) => { - return id % 2 === 0 ? 1 : 2; // return shard number + return id % 2 === 0 ? 1 : 2; / return shard number }); ``` @@ -136,7 +140,7 @@ or ```javascript MyTable.setResidencyById((id) => { - return id % 2 === 0 ? ['node1'] : ['node2']; // return array of node hostnames + return id % 2 === 0 ? ['node1'] : ['node2']; / return array of node hostnames }); ``` diff --git a/docs/developers/rest.md b/docs/developers/rest.md index c40a8c9a..a88febcc 100644 --- a/docs/developers/rest.md +++ b/docs/developers/rest.md @@ -1,10 +1,14 @@ +--- +title: REST +--- + # REST ## REST Harper provides a powerful, efficient, and standard-compliant HTTP REST interface for interacting with tables and other resources. The REST interface is the recommended interface for data access, querying, and manipulation (for HTTP interactions), providing the best performance and HTTP interoperability with different clients. -Resources, including tables, can be configured as RESTful endpoints. Make sure you review the [application introduction](applications/) and [defining schemas](applications/defining-schemas.md) to properly define your schemas and select which tables are exported and available through REST interface, as tables are not exported by default. The name of the [exported](applications/defining-schemas.md#export) resource defines the basis of the endpoint path available at the application HTTP server port [configured here](../deployments/configuration.md#http) (the default being `9926`). From there, a record id or query can be appended. Following uniform interface principles, HTTP methods define different actions with resources. For each method, this describes the default action. +Resources, including tables, can be configured as RESTful endpoints. Make sure you review the [application introduction](applications/) and [defining schemas](applications/defining-schemas) to properly define your schemas and select which tables are exported and available through REST interface, as tables are not exported by default. The name of the [exported](applications/defining-schemas#export) resource defines the basis of the endpoint path available at the application HTTP server port [configured here](../deployments/configuration#http) (the default being `9926`). From there, a record id or query can be appended. Following uniform interface principles, HTTP methods define different actions with resources. For each method, this describes the default action. The default path structure provides access to resources at several levels: @@ -78,11 +82,11 @@ Generally the POST method can be used for custom actions since POST has the broa This is handled by the Resource method `post(data)`, which is a good method to extend to make various other types of modifications. Also, with a table you can create a new record without specifying a primary key, for example: ````http -```http +````http POST /MyTable/ Content-Type: application/json -{ "name": "some data" } +`{ "name": "some data" }` ```` This will create a new record, auto-assigning a primary key, which will be returned in the `Location` header. @@ -91,7 +95,7 @@ This will create a new record, auto-assigning a primary key, which will be retur URL query parameters provide a powerful language for specifying database queries in Harper. This can be used to search by a single attribute name and value, to find all records which provide value for the given property/attribute. It is important to note that this attribute must be configured to be indexed to search on it. For example: -```http +````http GET /my-resource/?property=value ``` @@ -103,7 +107,7 @@ GET /my-resource/?property=value&property2=another-value Note that only one of the attributes needs to be indexed for this query to execute. -We can also specify different comparators such as less than and greater than queries using [FIQL](https://datatracker.ietf.org/doc/html/draft-nottingham-atompub-fiql-00) syntax. If we want to specify records with an `age` value greater than 20: +We can also specify different comparators such as less than and greater than queries using [FIQL](https:/datatracker.ietf.org/doc/html/draft-nottingham-atompub-fiql-00) syntax. If we want to specify records with an `age` value greater than 20: ```http GET /my-resource/?age=gt=20 diff --git a/docs/developers/security/README.md b/docs/developers/security/README.md deleted file mode 100644 index 576f2484..00000000 --- a/docs/developers/security/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# Security - -Harper uses role-based, attribute-level security to ensure that users can only gain access to the data they’re supposed to be able to access. Our granular permissions allow for unparalleled flexibility and control, and can actually lower the total cost of ownership compared to other database solutions, since you no longer have to replicate subsets of your data to isolate use cases. - -## Authentication Methods - -- [JWT Authentication](jwt-auth.md) - JSON Web Token based authentication -- [Basic Authentication](basic-auth.md) - Username/password authentication -- [mTLS Authentication](mtls-auth.md) - Certificate-based mutual TLS authentication with automatic certificate revocation checking - -## Security Configuration - -- [Configuration](configuration.md) - Security-related configuration options -- [Users and Roles](users-and-roles.md) - Managing users and role-based permissions -- [Certificate Management](certificate-management.md) - Managing SSL/TLS certificates diff --git a/docs/developers/security/basic-auth.md b/docs/developers/security/basic-auth.md index 83b1746e..6e3dac3a 100644 --- a/docs/developers/security/basic-auth.md +++ b/docs/developers/security/basic-auth.md @@ -1,16 +1,20 @@ +--- +title: Basic Authentication +--- + # Basic Authentication Harper uses Basic Auth and JSON Web Tokens (JWTs) to secure our HTTP requests. In the context of an HTTP transaction, **basic access authentication** is a method for an HTTP user agent to provide a username and password when making a request. -\*\* _**You do not need to log in separately. Basic Auth is added to each HTTP request like create_database, create_table, insert etc… via headers.**_ \*\* +** _**You do not need to log in separately. Basic Auth is added to each HTTP request like create_database, create_table, insert etc… via headers.**_ ** -A header is added to each HTTP request. The header key is **“Authorization”** the header value is **“Basic <\>”** +A header is added to each HTTP request. The header key is **“Authorization”** the header value is **“Basic <<your username and password buffer token>>”** ## Authentication in Harper Studio In the below code sample, you can see where we add the authorization header to the request. This needs to be added for each and every HTTP request for Harper. -_Note: This function uses btoa. Learn about_ [_btoa here_](https://developer.mozilla.org/en-US/docs/Web/API/btoa)_._ +_Note: This function uses btoa. Learn about_ [_btoa here_](https:/developer.mozilla.org/en-US/docs/Web/API/btoa)_._ ```javascript function callHarperDB(call_object, operation, callback) { diff --git a/docs/developers/security/certificate-management.md b/docs/developers/security/certificate-management.md index 81d41943..fdc8cc22 100644 --- a/docs/developers/security/certificate-management.md +++ b/docs/developers/security/certificate-management.md @@ -1,10 +1,14 @@ +--- +title: Certificate Management +--- + # Certificate Management -This document is information on managing certificates for Harper external facing APIs. For information on certificate management for clustering see [clustering certificate management](../clustering/certificate-management.md). +This document is information on managing certificates for Harper external facing APIs. For information on certificate management for clustering see [clustering certificate management](../clustering/certificate-management). ## Development -An out of the box install of Harper does not have HTTPS enabled (see [configuration](../../deployments/configuration.md#http) for relevant configuration file settings.) This is great for local development. If you are developing using a remote server and your requests are traversing the Internet, we recommend that you enable HTTPS. +An out of the box install of Harper does not have HTTPS enabled (see [configuration](../../deployments/configuration#http) for relevant configuration file settings.) This is great for local development. If you are developing using a remote server and your requests are traversing the Internet, we recommend that you enable HTTPS. To enable HTTPS, set `http.securePort` in `harperdb-config.yaml` to the port you wish to use for HTTPS connections and restart Harper. @@ -57,7 +61,7 @@ Instead of enabling HTTPS for Harper, Nginx can be used as a reverse proxy for H Install Nginx, configure Nginx to use certificates issued from your own CA or a public CA, then configure Nginx to listen for HTTPS requests and forward to Harper as HTTP requests. -[Certbot](https://certbot.eff.org/) is a great tool for automatically requesting and renewing Let’s Encrypt certificates used by Nginx. +[Certbot](https:/certbot.eff.org/) is a great tool for automatically requesting and renewing Let’s Encrypt certificates used by Nginx. ### Option: External Reverse Proxy diff --git a/docs/developers/security/configuration.md b/docs/developers/security/configuration.md index 3debfeb3..de30868c 100644 --- a/docs/developers/security/configuration.md +++ b/docs/developers/security/configuration.md @@ -1,23 +1,27 @@ +--- +title: Configuration +--- + # Configuration Harper was set up to require very minimal configuration to work out of the box. There are, however, some best practices we encourage for anyone building an app with Harper. ## CORS -Harper allows for managing [cross-origin HTTP requests](https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS). By default, Harper enables CORS for all domains if you need to disable CORS completely or set up an access list of domains you can do the following: +Harper allows for managing [cross-origin HTTP requests](https:/developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS). By default, Harper enables CORS for all domains if you need to disable CORS completely or set up an access list of domains you can do the following: 1. Open the harperdb-config.yaml file, which can be found in \, the location you specified during install. -2. In harperdb-config.yaml there should be 2 entries under `operationsApi.network`: cors and corsAccessList. +1. In harperdb-config.yaml there should be 2 entries under `operationsApi.network`: cors and corsAccessList. - `cors` 1. To turn off, change to: `cors: false` - 2. To turn on, change to: `cors: true` + 1. To turn on, change to: `cors: true` - `corsAccessList` 1. The `corsAccessList` will only be recognized by the system when `cors` is `true` - 2. To create an access list you set `corsAccessList` to a comma-separated list of domains. + 1. To create an access list you set `corsAccessList` to a comma-separated list of domains. - i.e. `corsAccessList` is `http://harpersystems.dev,http://products.harpersystems.dev` + i.e. `corsAccessList` is `http:/harpersystems.dev,http:/products.harpersystems.dev` - 3. To clear out the access list and allow all domains: `corsAccessList` is `[null]` + 1. To clear out the access list and allow all domains: `corsAccessList` is `[null]` ## SSL diff --git a/docs/developers/security/index.md b/docs/developers/security/index.md new file mode 100644 index 00000000..51e4b891 --- /dev/null +++ b/docs/developers/security/index.md @@ -0,0 +1,13 @@ +--- +title: Security +--- + +# Security + +Harper uses role-based, attribute-level security to ensure that users can only gain access to the data they’re supposed to be able to access. Our granular permissions allow for unparalleled flexibility and control, and can actually lower the total cost of ownership compared to other database solutions, since you no longer have to replicate subsets of your data to isolate use cases. + +- [JWT Authentication](jwt-auth) +- [Basic Authentication](basic-auth) +- [mTLS Authentication](mtls-auth) +- [Configuration](configuration) +- [Users and Roles](users-and-roles) diff --git a/docs/developers/security/jwt-auth.md b/docs/developers/security/jwt-auth.md index 0b5c4de9..570de46d 100644 --- a/docs/developers/security/jwt-auth.md +++ b/docs/developers/security/jwt-auth.md @@ -1,3 +1,7 @@ +--- +title: JWT Authentication +--- + # JWT Authentication Harper uses token based authentication with JSON Web Tokens, JWTs. @@ -24,7 +28,7 @@ Users must initially create tokens using their Harper credentials. The following A full cURL example can be seen here: ```bash -curl --location --request POST 'http://localhost:9925' \ +curl --location --request POST 'http:/localhost:9925' \ --header 'Content-Type: application/json' \ --data-raw '{ "operation": "create_authentication_tokens", @@ -47,7 +51,7 @@ An example expected return object is: The `operation_token` value is used to authenticate all operations in place of our standard Basic auth. In order to pass the token you will need to create an Bearer Token Authorization Header like the following request: ```bash -curl --location --request POST 'http://localhost:9925' \ +curl --location --request POST 'http:/localhost:9925' \ --header 'Content-Type: application/json' \ --header 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDUwNjQ2MDAsInN1YiI6Im9wZXJhdGlvbiJ9.MpQA-9CMjA-mn-7mHyUXSuSC_-kqMqJXp_NDiKLFtbtMRbodCuY3DzH401rvy_4vb0yCELf0B5EapLVY1545sv80nxSl6FoZFxQaDWYXycoia6zHpiveR8hKlmA6_XTWHJbY2FM1HAFrdtt3yUTiF-ylkdNbPG7u7fRjTmHfsZ78gd2MNWIDkHoqWuFxIyqk8XydQpsjULf2Uacirt9FmHfkMZ-Jr_rRpcIEW0FZyLInbm6uxLfseFt87wA0TbZ0ofImjAuaW_3mYs-3H48CxP152UJ0jByPb0kHsk1QKP7YHWx1-Wce9NgNADfG5rfgMHANL85zvkv8sJmIGZIoSpMuU3CIqD2rgYnMY-L5dQN1fgfROrPMuAtlYCRK7r-IpjvMDQtRmCiNG45nGsM4DTzsa5GyDrkGssd5OBhl9gr9z9Bb5HQVYhSKIOiy72dK5dQNBklD4eGLMmo-u322zBITmE0lKaBcwYGJw2mmkYcrjDOmsDseU6Bf_zVUd9WF3FqwNkhg4D7nrfNSC_flalkxPHckU5EC_79cqoUIX2ogufBW5XgYbU4WfLloKcIpb51YTZlZfwBHlHPSyaq_guaXFaeCUXKq39_i1n0HRF_mRaxNru0cNDFT9Fm3eD7V8axFijSVAMDyQs_JR7SY483YDKUfN4l-vw-EVynImr4' \ --data-raw '{ @@ -61,10 +65,10 @@ curl --location --request POST 'http://localhost:9925' \ ## Token Expiration -`operation_token` expires at a set interval. Once it expires it will no longer be accepted by Harper. This duration defaults to one day, and is configurable in [harperdb-config.yaml](../../deployments/configuration.md). To generate a new `operation_token`, the `refresh_operation_token` operation is used, passing the `refresh_token` in the Bearer Token Authorization Header. A full cURL example can be seen here: +`operation_token` expires at a set interval. Once it expires it will no longer be accepted by Harper. This duration defaults to one day, and is configurable in [harperdb-config.yaml](../../deployments/configuration). To generate a new `operation_token`, the `refresh_operation_token` operation is used, passing the `refresh_token` in the Bearer Token Authorization Header. A full cURL example can be seen here: ```bash -curl --location --request POST 'http://localhost:9925' \ +curl --location --request POST 'http:/localhost:9925' \ --header 'Content-Type: application/json' \ --header 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDc1NzAyMDAsInN1YiI6InJlZnJlc2gifQ.acaCsk-CJWIMLGDZdGnsthyZsJfQ8ihXLyE8mTji8PgGkpbwhs7e1O0uitMgP_pGjHq2tey1BHSwoeCL49b18WyMIB10hK-q2BXGKQkykltjTrQbg7VsdFi0h57mGfO0IqAwYd55_hzHZNnyJMh4b0iPQFDwU7iTD7x9doHhZAvzElpkWbc_NKVw5_Mw3znjntSzbuPN105zlp4Niurin-_5BnukwvoJWLEJ-ZlF6hE4wKhaMB1pWTJjMvJQJE8khTTvlUN8tGxmzoaDYoe1aCGNxmDEQnx8Y5gKzVd89sylhqi54d2nQrJ2-ElfEDsMoXpR01Ps6fNDFtLTuPTp7ixj8LvgL2nCjAg996Ga3PtdvXJAZPDYCqqvaBkZZcsiqOgqLV0vGo3VVlfrcgJXQImMYRr_Inu0FCe47A93IAWuQTs-KplM1KdGJsHSnNBV6oe6QEkROJT5qZME-8xhvBYvOXqp9Znwg39bmiBCMxk26Ce66_vw06MNgoa3D5AlXPWemfdVKPZDnj_aLVjZSs0gAfFElcVn7l9yjWJOaT2Muk26U8bJl-2BEq_DSclqKHODuYM5kkPKIdE4NFrsqsDYuGxcA25rlNETFyl0q-UXj1aoz_joy5Hdnr4mFELmjnoo4jYQuakufP9xeGPsj1skaodKl0mmoGcCD6v1F60' \ --data-raw '{ @@ -80,13 +84,13 @@ This will return a new `operation_token`. An example expected return object is: } ``` -The `refresh_token` also expires at a set interval, but a longer interval. Once it expires it will no longer be accepted by Harper. This duration defaults to thirty days, and is configurable in [harperdb-config.yaml](../../deployments/configuration.md). To generate a new `operation_token` and a new `refresh_token` the `create_authentication_tokensoperation` is called. +The `refresh_token` also expires at a set interval, but a longer interval. Once it expires it will no longer be accepted by Harper. This duration defaults to thirty days, and is configurable in [harperdb-config.yaml](../../deployments/configuration). To generate a new `operation_token` and a new `refresh_token` the `create_authentication_tokensoperation` is called. ## Configuration -Token timeouts are configurable in [harperdb-config.yaml](../../deployments/configuration.md) with the following parameters: +Token timeouts are configurable in [harperdb-config.yaml](../../deployments/configuration) with the following parameters: - `operationsApi.authentication.operationTokenTimeout`: Defines the length of time until the operation_token expires (default 1d). - `operationsApi.authentication.refreshTokenTimeout`: Defines the length of time until the refresh_token expires (default 30d). -A full list of valid values for both parameters can be found [here](https://github.com/vercel/ms). +A full list of valid values for both parameters can be found [here](https:/github.com/vercel/ms). diff --git a/docs/developers/security/mtls-auth.md b/docs/developers/security/mtls-auth.md index 428bb2a9..375ec927 100644 --- a/docs/developers/security/mtls-auth.md +++ b/docs/developers/security/mtls-auth.md @@ -1,53 +1,7 @@ -# mTLS Authentication - -Harper supports mTLS (mutual TLS) authentication for incoming connections, providing certificate-based authentication for enhanced security. When enabled in the [HTTP config settings](../../deployments/configuration.md#http), the client certificate will be checked against the certificate authority specified with `tls.certificateAuthority`. If the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. The [HTTP config settings](../../deployments/configuration.md#http) allow you to determine if mTLS is required for all connections or optional. - -## Certificate Verification - -When mTLS is enabled, Harper automatically performs certificate revocation checking using OCSP (Online Certificate Status Protocol). OCSP provides real-time verification that certificates have not been revoked by the Certificate Authority, ensuring that compromised or invalidated certificates cannot be used for authentication. - -### How It Works - -When a client presents a certificate: - -1. Harper validates the certificate against the configured Certificate Authority -2. If validation succeeds, Harper queries the CA's OCSP responder to check revocation status -3. The verification result is cached to minimize performance impact on subsequent connections -4. Based on the configuration, the connection is either allowed or rejected +--- +title: mTLS Authentication +--- -### Key Features - -- **Enabled by default** when mTLS is configured -- **Real-time verification** with the Certificate Authority's OCSP responder -- **Intelligent caching** of verification results (default: 1 hour) -- **Configurable timeout** for OCSP responses (default: 5 seconds) -- **Flexible failure handling** with fail-open or fail-closed modes -- **Unified across all protocols** - HTTP, MQTT, and replication connections - -### Configuration - -Certificate verification can be disabled or customized through the `certificateVerification` setting. See the [HTTP configuration](../../deployments/configuration.md#http) and [MQTT configuration](../../deployments/configuration.md#mqtt) sections for detailed options. - -#### Configuration Examples - -```yaml -# Simple configuration with defaults -http: - securePort: 9926 - mtls: true # Uses default certificate verification settings - -# Or with custom certificate verification settings -http: - securePort: 9926 - mtls: - certificateVerification: - timeout: 5000 # OCSP timeout in milliseconds (default: 5000) - cacheTtl: 3600000 # Cache TTL in milliseconds (default: 1 hour) - failureMode: fail-open # or fail-closed (default: fail-open) +# mTLS Authentication -# To disable certificate verification (not recommended for production) -http: - securePort: 9926 - mtls: - certificateVerification: false -``` +Harper supports mTLS authentication for incoming connections. When enabled in the [HTTP config settings](../../deployments/configuration#http) the client certificate will be checked against the certificate authority specified with `tls.certificateAuthority`. If the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. The [HTTP config settings](../../deployments/configuration#http) allow you to determine if mTLS is required for all connections or optional. diff --git a/docs/developers/security/users-and-roles.md b/docs/developers/security/users-and-roles.md index 4d045f1e..9fbe6b75 100644 --- a/docs/developers/security/users-and-roles.md +++ b/docs/developers/security/users-and-roles.md @@ -1,3 +1,7 @@ +--- +title: Users & Roles +--- + # Users & Roles Harper utilizes a Role-Based Access Control (RBAC) framework to manage access to Harper instances. A user is assigned a role that determines the user’s permissions to access database resources and run core operations. @@ -9,13 +13,13 @@ Role permissions in Harper are broken into two categories – permissions around **Database Manipulation**: A role defines CRUD (create, read, update, delete) permissions against database resources (i.e. data) in a Harper instance. 1. At the table-level access, permissions must be explicitly defined when adding or altering a role – _i.e. Harper will assume CRUD access to be FALSE if not explicitly provided in the permissions JSON passed to the `add_role` and/or `alter_role` API operations._ -2. At the attribute-level, permissions for attributes in all tables included in the permissions set will be assigned based on either the specific attribute-level permissions defined in the table’s permission set or, if there are no attribute-level permissions defined, permissions will be based on the table’s CRUD set. +1. At the attribute-level, permissions for attributes in all tables included in the permissions set will be assigned based on either the specific attribute-level permissions defined in the table’s permission set or, if there are no attribute-level permissions defined, permissions will be based on the table’s CRUD set. **Database Definition**: Permissions related to managing databases, tables, roles, users, and other system settings and operations are restricted to the built-in `super_user` role. **Built-In Roles** -There are three built-in roles within Harper. See full breakdown of operations restricted to only super_user roles [here](users-and-roles.md#Role-Based-Operation-Restrictions). +There are three built-in roles within Harper. See full breakdown of operations restricted to only super_user roles [here](users-and-roles#role-based-operation-restrictions). - `super_user` - This role provides full access to all operations and methods within a Harper instance, this can be considered the admin role. - This role provides full access to all Database Definition operations and the ability to run Database Manipulation operations across the entire database schema with no restrictions. @@ -96,17 +100,17 @@ Each table that a role should be given some level of CRUD permissions to must be ```json { - "table_name": { // the name of the table to define CRUD perms for - "read": boolean, // access to read from this table - "insert": boolean, // access to insert data to table - "update": boolean, // access to update data in table - "delete": boolean, // access to delete row data in table - "attribute_permissions": [ // permissions for specific table attributes + "table_name": { / the name of the table to define CRUD perms for + "read": boolean, / access to read from this table + "insert": boolean, / access to insert data to table + "update": boolean, / access to update data in table + "delete": boolean, / access to delete row data in table + "attribute_permissions": [ / permissions for specific table attributes { - "attribute_name": "attribute_name", // attribute to assign permissions to - "read": boolean, // access to read this attribute from table - "insert": boolean, // access to insert this attribute into the table - "update": boolean // access to update this attribute in the table + "attribute_name": "attribute_name", / attribute to assign permissions to + "read": boolean, / access to read this attribute from table + "insert": boolean, / access to insert this attribute into the table + "update": boolean / access to update this attribute in the table } ] } @@ -115,29 +119,29 @@ Each table that a role should be given some level of CRUD permissions to must be **Important Notes About Table Permissions** 1. If a database and/or any of its tables are not included in the permissions JSON, the role will not have any CRUD access to the database and/or tables. -2. If a table-level CRUD permission is set to false, any attribute-level with that same CRUD permission set to true will return an error. +1. If a table-level CRUD permission is set to false, any attribute-level with that same CRUD permission set to true will return an error. **Important Notes About Attribute Permissions** 1. If there are attribute-specific CRUD permissions that need to be enforced on a table, those need to be explicitly described in the `attribute_permissions` array. -2. If a non-hash attribute is given some level of CRUD access, that same access will be assigned to the table’s `hash_attribute` (also referred to as the `primary_key`), even if it is not explicitly defined in the permissions JSON. +1. If a non-hash attribute is given some level of CRUD access, that same access will be assigned to the table’s `hash_attribute` (also referred to as the `primary_key`), even if it is not explicitly defined in the permissions JSON. _See table_name1’s permission set for an example of this – even though the table’s hash attribute is not specifically defined in the attribute_permissions array, because the role has CRUD access to ‘attribute1’, the role will have the same access to the table’s hash attribute._ -3. If attribute-level permissions are set – _i.e. attribute_permissions.length > 0_ – any table attribute not explicitly included will be assumed to have not CRUD access (with the exception of the `hash_attribute` described in #2). +1. If attribute-level permissions are set – _i.e. attribute_permissions.length > 0_ – any table attribute not explicitly included will be assumed to have not CRUD access (with the exception of the `hash_attribute` described in #2). _See table_name1’s permission set for an example of this – in this scenario, the role will have the ability to create, insert and update ‘attribute1’ and the table’s hash attribute but no other attributes on that table._ -4. If an `attribute_permissions` array is empty, the role’s access to a table’s attributes will be based on the table-level CRUD permissions. +1. If an `attribute_permissions` array is empty, the role’s access to a table’s attributes will be based on the table-level CRUD permissions. _See table_name2’s permission set for an example of this._ -5. The `__createdtime__` and `__updatedtime__` attributes that Harper manages internally can have read perms set but, if set, all other attribute-level permissions will be ignored. -6. Please note that DELETE permissions are not included as a part of an individual attribute-level permission set. That is because it is not possible to delete individual attributes from a row, rows must be deleted in full. +1. The `__createdtime__` and `__updatedtime__` attributes that Harper manages internally can have read perms set but, if set, all other attribute-level permissions will be ignored. +1. Please note that DELETE permissions are not included as a part of an individual attribute-level permission set. That is because it is not possible to delete individual attributes from a row, rows must be deleted in full. - If a role needs the ability to delete rows from a table, that permission should be set on the table-level. - The practical approach to deleting an individual attribute of a row would be to set that attribute to null via an update statement. -## Role-Based Operation Restrictions +## `Role-Based Operation Restrictions ` The table below includes all API operations available in Harper and indicates whether or not the operation is restricted to super_user roles. diff --git a/docs/developers/sql-guide/README.md b/docs/developers/sql-guide/README.md deleted file mode 100644 index 15fa7b22..00000000 --- a/docs/developers/sql-guide/README.md +++ /dev/null @@ -1,84 +0,0 @@ -# SQL Guide - -{% hint style="warning" %} -Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. -{% endhint %} - -## Harper SQL Guide - -The purpose of this guide is to describe the available functionality of Harper as it relates to supported SQL functionality. The SQL parser is still actively being developed, many SQL features may not be optimized or utilize indexes. This document will be updated as more features and functionality becomes available. Generally, the REST interface provides a more stable, secure, and performant interface for data interaction, but the SQL functionality can be useful for administrative ad-hoc querying, and utilizing existing SQL statements. **A high-level view of supported features can be found** [**here**](features-matrix.md)**.** - -Harper adheres to the concept of database & tables. This allows developers to isolate table structures from each other all within one database. - -## Select - -Harper has robust SELECT support, from simple queries all the way to complex joins with multi-conditions, aggregates, grouping & ordering. - -All results are returned as JSON object arrays. - -Query for all records and attributes in the dev.dog table: - -``` -SELECT * FROM dev.dog -``` - -Query specific columns from all rows in the dev.dog table: - -``` -SELECT id, dog_name, age FROM dev.dog -``` - -Query for all records and attributes in the dev.dog table ORDERED BY age in ASC order: - -``` -SELECT * FROM dev.dog ORDER BY age -``` - -_The ORDER BY keyword sorts in ascending order by default. To sort in descending order, use the DESC keyword._ - -## Insert - -Harper supports inserting 1 to n records into a table. The primary key must be unique (not used by any other record). If no primary key is provided, it will be assigned an auto-generated UUID. Harper does not support selecting from one table to insert into another at this time. - -``` -INSERT INTO dev.dog (id, dog_name, age, breed_id) - VALUES(1, 'Penny', 5, 347), (2, 'Kato', 4, 347) -``` - -## Update - -Harper supports updating existing table row(s) via UPDATE statements. Multiple conditions can be applied to filter the row(s) to update. At this time selecting from one table to update another is not supported. - -``` -UPDATE dev.dog - SET owner_name = 'Kyle' - WHERE id IN (1, 2) -``` - -## Delete - -Harper supports deleting records from a table with condition support. - -``` -DELETE FROM dev.dog - WHERE age < 4 -``` - -## Joins - -Harper allows developers to join any number of tables and currently supports the following join types: - -- INNER JOIN LEFT -- INNER JOIN LEFT -- OUTER JOIN - -Here’s a basic example joining two tables from our Get Started example- joining a dogs table with a breeds table: - -``` -SELECT d.id, d.dog_name, d.owner_name, b.name, b.section - FROM dev.dog AS d - INNER JOIN dev.breed AS b ON d.breed_id = b.id - WHERE d.owner_name IN ('Kyle', 'Zach', 'Stephen') - AND b.section = 'Mutt' - ORDER BY d.dog_name -``` diff --git a/docs/developers/sql-guide/date-functions.md b/docs/developers/sql-guide/date-functions.md index 3c3dce7f..0133e089 100644 --- a/docs/developers/sql-guide/date-functions.md +++ b/docs/developers/sql-guide/date-functions.md @@ -1,12 +1,16 @@ -{% hint style="warning" %} +--- +title: SQL Date Functions +--- + +:::warning Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. -{% endhint %} +::: # SQL Date Functions -Harper utilizes [Coordinated Universal Time (UTC)](https://en.wikipedia.org/wiki/Coordinated_Universal_Time) in all internal SQL operations. This means that date values passed into any of the functions below will be assumed to be in UTC or in a format that can be translated to UTC. +Harper utilizes [Coordinated Universal Time (UTC)](https:/en.wikipedia.org/wiki/Coordinated_Universal_Time) in all internal SQL operations. This means that date values passed into any of the functions below will be assumed to be in UTC or in a format that can be translated to UTC. -When parsing date values passed to SQL date functions in HDB, we first check for [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) formats, then for [RFC 2822](https://tools.ietf.org/html/rfc2822#section-3.3) date-time format and then fall back to new Date(date_string)if a known format is not found. +When parsing date values passed to SQL date functions in HDB, we first check for [ISO 8601](https:/en.wikipedia.org/wiki/ISO_8601) formats, then for [RFC 2822](https:/tools.ietf.org/html/rfc2822#section-3.3) date-time format and then fall back to new Date(date_string)if a known format is not found. ### CURRENT_DATE() @@ -117,7 +121,7 @@ AS date_diff_result" returns ### DATE_FORMAT(date, format) -Formats and returns a date value in the String format provided. Find more details on accepted format values in the [moment.js docs](https://momentjs.com/docs/#/displaying/format/). +Formats and returns a date value in the String format provided. Find more details on accepted format values in the [moment.js docs](https:/momentjs.com/docs/#/displaying/format/). ``` "SELECT DATE_FORMAT(1524412627973, 'YYYY-MM-DD HH:mm:ss') diff --git a/docs/developers/sql-guide/features-matrix.md b/docs/developers/sql-guide/features-matrix.md index 853a18cd..f436ad62 100644 --- a/docs/developers/sql-guide/features-matrix.md +++ b/docs/developers/sql-guide/features-matrix.md @@ -1,8 +1,12 @@ +--- +title: SQL Features Matrix +--- + # SQL Features Matrix -{% hint style="warning" %} +:::warning Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. -{% endhint %} +::: ## SQL Features Matrix diff --git a/docs/developers/sql-guide/functions.md b/docs/developers/sql-guide/functions.md index e0a3ee2e..bf5fd219 100644 --- a/docs/developers/sql-guide/functions.md +++ b/docs/developers/sql-guide/functions.md @@ -1,6 +1,10 @@ -{% hint style="warning" %} +--- +title: Harper SQL Functions +--- + +:::warning Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. -{% endhint %} +::: # Harper SQL Functions @@ -21,7 +25,7 @@ This SQL keywords reference contains the SQL functions available in Harper. | ARRAY\* | ARRAY(_expression_) | Returns a list of data as a field. | | DISTINCT_ARRAY\* | DISTINCT*ARRAY(\_expression*) | When placed around a standard ARRAY() function, returns a distinct (deduplicated) results set. | -\*For more information on ARRAY() and DISTINCT_ARRAY() see [this blog](https://www.harperdb.io/post/sql-queries-to-complex-objects). +\*For more information on ARRAY() and DISTINCT_ARRAY() see [this blog](https:/www.harperdb.io/post/sql-queries-to-complex-objects). ### Conversion diff --git a/docs/developers/sql-guide/index.md b/docs/developers/sql-guide/index.md new file mode 100644 index 00000000..f7f66fc5 --- /dev/null +++ b/docs/developers/sql-guide/index.md @@ -0,0 +1,88 @@ +--- +title: SQL Guide +--- + +# SQL Guide + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +## Harper SQL Guide + +The purpose of this guide is to describe the available functionality of Harper as it relates to supported SQL functionality. The SQL parser is still actively being developed, many SQL features may not be optimized or utilize indexes. This document will be updated as more features and functionality becomes available. Generally, the REST interface provides a more stable, secure, and performant interface for data interaction, but the SQL functionality can be useful for administrative ad-hoc querying, and utilizing existing SQL statements. **A high-level view of supported features can be found** [**here**](features-matrix)**.** + +Harper adheres to the concept of database & tables. This allows developers to isolate table structures from each other all within one database. + +## Select + +Harper has robust SELECT support, from simple queries all the way to complex joins with multi-conditions, aggregates, grouping & ordering. + +All results are returned as JSON object arrays. + +Query for all records and attributes in the dev.dog table: + +``` +SELECT * FROM dev.dog +``` + +Query specific columns from all rows in the dev.dog table: + +``` +SELECT id, dog_name, age FROM dev.dog +``` + +Query for all records and attributes in the dev.dog table ORDERED BY age in ASC order: + +``` +SELECT * FROM dev.dog ORDER BY age +``` + +_The ORDER BY keyword sorts in ascending order by default. To sort in descending order, use the DESC keyword._ + +## Insert + +Harper supports inserting 1 to n records into a table. The primary key must be unique (not used by any other record). If no primary key is provided, it will be assigned an auto-generated UUID. Harper does not support selecting from one table to insert into another at this time. + +``` +INSERT INTO dev.dog (id, dog_name, age, breed_id) + VALUES(1, 'Penny', 5, 347), (2, 'Kato', 4, 347) +``` + +## Update + +Harper supports updating existing table row(s) via UPDATE statements. Multiple conditions can be applied to filter the row(s) to update. At this time selecting from one table to update another is not supported. + +``` +UPDATE dev.dog + SET owner_name = 'Kyle' + WHERE id IN (1, 2) +``` + +## Delete + +Harper supports deleting records from a table with condition support. + +``` +DELETE FROM dev.dog + WHERE age < 4 +``` + +## Joins + +Harper allows developers to join any number of tables and currently supports the following join types: + +- INNER JOIN LEFT +- INNER JOIN LEFT +- OUTER JOIN + +Here’s a basic example joining two tables from our Get Started example- joining a dogs table with a breeds table: + +``` +SELECT d.id, d.dog_name, d.owner_name, b.name, b.section + FROM dev.dog AS d + INNER JOIN dev.breed AS b ON d.breed_id = b.id + WHERE d.owner_name IN ('Kyle', 'Zach', 'Stephen') + AND b.section = 'Mutt' + ORDER BY d.dog_name +``` diff --git a/docs/developers/sql-guide/json-search.md b/docs/developers/sql-guide/json-search.md index f6cb15a3..b078baa7 100644 --- a/docs/developers/sql-guide/json-search.md +++ b/docs/developers/sql-guide/json-search.md @@ -1,10 +1,14 @@ -{% hint style="warning" %} +--- +title: SQL JSON Search +--- + +:::warning Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. -{% endhint %} +::: # SQL JSON Search -Harper automatically indexes all top level attributes in a row / object written to a table. However, any attributes which hold JSON data do not have their nested attributes indexed. In order to make searching and/or transforming these JSON documents easy, Harper offers a special SQL function called SEARCH_JSON. The SEARCH_JSON function works in SELECT & WHERE clauses allowing queries to perform powerful filtering on any element of your JSON by implementing the [JSONata library](http://docs.jsonata.org/overview.html) into our SQL engine. +Harper automatically indexes all top level attributes in a row / object written to a table. However, any attributes which hold JSON data do not have their nested attributes indexed. In order to make searching and/or transforming these JSON documents easy, Harper offers a special SQL function called SEARCH_JSON. The SEARCH_JSON function works in SELECT & WHERE clauses allowing queries to perform powerful filtering on any element of your JSON by implementing the [JSONata library](http:/docs.jsonata.org/overview.html) into our SQL engine. ## Syntax @@ -121,7 +125,7 @@ Then the expression tells the function to only return entries where the name att name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"] ``` -So far, we’ve iterated the array and filtered out rows, but we also want the results formatted in a specific way, so we’ve chained an expression on our filter with: {“actor”: name, “character”: character}. This tells the function to create a specific object for each matching entry. +So far, we’ve iterated the array and filtered out rows, but we also want the results formatted in a specific way, so we’ve chained an expression on our filter with: `{“actor”: name, “character”: character}`. This tells the function to create a specific object for each matching entry. **Sample Result** @@ -168,6 +172,6 @@ SEARCH_JSON( As seen above we execute the same name filter against the cast array, the primary difference is we are wrapping the filtered results in $count(…). As it looks this returns a count of the results back which we then use against our SQL comparator of >= 2. -To see further SEARCH_JSON examples in action view our Postman Collection that provides a [sample database & data with query examples](../operations-api/advanced-json-sql-examples.md). +To see further SEARCH_JSON examples in action view our Postman Collection that provides a [sample database & data with query examples](../operations-api/advanced-json-sql-examples). -To learn more about how to build expressions check out the JSONata documentation: [http://docs.jsonata.org/overview](http://docs.jsonata.org/overview) +To learn more about how to build expressions check out the JSONata documentation: [http:/docs.jsonata.org/overview](http:/docs.jsonata.org/overview) diff --git a/docs/developers/sql-guide/reserved-word.md b/docs/developers/sql-guide/reserved-word.md index d285dc3e..2cd812ba 100644 --- a/docs/developers/sql-guide/reserved-word.md +++ b/docs/developers/sql-guide/reserved-word.md @@ -1,6 +1,10 @@ -{% hint style="warning" %} +--- +title: Harper SQL Reserved Words +--- + +:::warning Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. -{% endhint %} +::: # Harper SQL Reserved Words diff --git a/docs/developers/sql-guide/sql-geospatial-functions.md b/docs/developers/sql-guide/sql-geospatial-functions.md index d45ef30a..e00986f3 100644 --- a/docs/developers/sql-guide/sql-geospatial-functions.md +++ b/docs/developers/sql-guide/sql-geospatial-functions.md @@ -1,16 +1,20 @@ -{% hint style="warning" %} +--- +title: SQL Geospatial Functions +--- + +:::warning Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. -{% endhint %} +::: # SQL Geospatial Functions -Harper geospatial features require data to be stored in a single column using the [GeoJSON standard](http://geojson.org/), a standard commonly used in geospatial technologies. Geospatial functions are available to be used in SQL statements. +Harper geospatial features require data to be stored in a single column using the [GeoJSON standard](http:/geojson.org/), a standard commonly used in geospatial technologies. Geospatial functions are available to be used in SQL statements. -If you are new to GeoJSON you should check out the full specification here: http://geojson.org/. There are a few important things to point out before getting started. +If you are new to GeoJSON you should check out the full specification here: http:/geojson.org/. There are a few important things to point out before getting started. 1. All GeoJSON coordinates are stored in `[longitude, latitude]` format. -2. Coordinates or GeoJSON geometries must be passed as string when written directly in a SQL statement. -3. Note if you are using Postman for you testing. Due to limitations in the Postman client, you will need to escape quotes in your strings and your SQL will need to be passed on a single line. +1. Coordinates or GeoJSON geometries must be passed as string when written directly in a SQL statement. +1. Note if you are using Postman for you testing. Due to limitations in the Postman client, you will need to escape quotes in your strings and your SQL will need to be passed on a single line. In the examples contained in the left-hand navigation, database and table names may change, but all GeoJSON data will be stored in a column named geo_data. @@ -299,7 +303,7 @@ WHERE geoContains(geo_data, '{ # geoEqual -Determines if two GeoJSON features are the same type and have identical X,Y coordinate values. For more information see https://developers.arcgis.com/documentation/spatial-references/. Returns a Boolean. +Determines if two GeoJSON features are the same type and have identical X,Y coordinate values. For more information see https:/developers.arcgis.com/documentation/spatial-references/. Returns a Boolean. ## Syntax diff --git a/docs/getting-started/README.md b/docs/getting-started/README.md deleted file mode 100644 index 4273fdec..00000000 --- a/docs/getting-started/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Getting Started - -If you're new to Harper, this section will guide you through the essential resources you need to get started. - -Follow the steps in this documentation to discover how Harper can simplify your backend stack, eliminate many inter-process communication delays, and achieve a more predictable and performant application experience. - -For more advanced concepts in Harper, see our [blog](https://www.harpersystems.dev/blog). - -## Harper Basics - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Install Harper - Pick the installation method that best suits your environmentinstall-harper
What is HarperLearn about Harper, how it works, and some of its usecaseswhat-is-harper
Harper ConceptsLearn about Harper's fundamental concepts and how they interactharper-concepts
diff --git a/docs/getting-started/first-harper-app.md b/docs/getting-started/first-harper-app.md index c7859eef..d1f52b47 100644 --- a/docs/getting-started/first-harper-app.md +++ b/docs/getting-started/first-harper-app.md @@ -1,3 +1,7 @@ +--- +title: Create Your First Application +--- + # Create Your First Application Now that you've set up Harper, let's build a simple API. Harper lets you build powerful APIs with minimal effort. In just a few minutes, you'll have a functional REST API with automatic validation, indexing, and querying—all without writing a single line of code. @@ -7,7 +11,7 @@ Now that you've set up Harper, let's build a simple API. Harper lets you build p Start by cloning the Harper application template: ```bash -git clone https://github.com/HarperDB/application-template my-app +git clone https:/github.com/HarperDB/application-template my-app cd my-app ``` @@ -15,7 +19,7 @@ cd my-app The core of a Harper application is the database, so let's create a database table. -A quick and expressive way to define a table is through a [GraphQL Schema](https://graphql.org/learn/schema). Using your editor of choice, edit the file named `schema.graphql` in the root of the application directory, `my-app`, that we created above. To create a table, we will need to add a `type` of `@table` named `Dog` (and you can remove the example table in the template): +A quick and expressive way to define a table is through a [GraphQL Schema](https:/graphql.org/learn/schema). Using your editor of choice, edit the file named `schema.graphql` in the root of the application directory, `my-app`, that we created above. To create a table, we will need to add a `type` of `@table` named `Dog` (and you can remove the example table in the template): ```graphql type Dog @table { @@ -84,7 +88,7 @@ type Dog @table @export { } ``` -By default the application HTTP server port is `9926` (this can be [configured here](../deployments/configuration.md#http)), so the local URL would be `http://localhost:9926/Dog/` with a full REST API. We can PUT or POST data into this table using this new path, and then GET or DELETE from it as well (you can even view data directly from the browser). If you have not added any records yet, we could use a PUT or POST to add a record. PUT is appropriate if you know the id, and POST can be used to assign an id: +By default the application HTTP server port is `9926` (this can be [configured here](../deployments/configuration#http)), so the local URL would be `http:/localhost:9926/Dog/` with a full REST API. We can PUT or POST data into this table using this new path, and then GET or DELETE from it as well (you can even view data directly from the browser). If you have not added any records yet, we could use a PUT or POST to add a record. PUT is appropriate if you know the id, and POST can be used to assign an id: ```json POST /Dog/ @@ -98,7 +102,7 @@ Content-Type: application/json } ``` -With this a record will be created and the auto-assigned id will be available through the `Location` header. If you added a record, you can visit the path `/Dog/` to view that record. Alternately, the curl command curl `http://localhost:9926/Dog/` will achieve the same thing. +With this a record will be created and the auto-assigned id will be available through the `Location` header. If you added a record, you can visit the path `/Dog/` to view that record. Alternately, the curl command curl `http:/localhost:9926/Dog/` will achieve the same thing. ## Authenticating Endpoints @@ -106,7 +110,7 @@ Now that you've created your first API endpoints, it's important to ensure they' Endpoints created with Harper automatically support `Basic`, `Cookie`, and `JWT` authentication methods. See the documentation on [security](../developers/security/) for more information on different levels of access. -By default, Harper also automatically authorizes all requests from loopback IP addresses (from the same computer) as the superuser, to make it simple to interact for local development. If you want to test authentication/authorization, or enforce stricter security, you may want to disable the [`authentication.authorizeLocal` setting](../deployments/configuration.md#authentication). +By default, Harper also automatically authorizes all requests from loopback IP addresses (from the same computer) as the superuser, to make it simple to interact for local development. If you want to test authentication/authorization, or enforce stricter security, you may want to disable the [`authentication.authorizeLocal` setting](../deployments/configuration#authentication). ### Content Negotiation @@ -122,7 +126,7 @@ If-None-Match: "etag-id" # browsers can automatically provide this ## Querying -Querying your application database is straightforward and easy, as tables exported with the `@export` directive are automatically exposed via [REST endpoints](../developers/rest.md). Simple queries can be crafted through [URL query parameters](https://en.wikipedia.org/wiki/Query_string). +Querying your application database is straightforward and easy, as tables exported with the `@export` directive are automatically exposed via [REST endpoints](../developers/rest). Simple queries can be crafted through [URL query parameters](https:/en.wikipedia.org/wiki/Query_string). In order to maintain reasonable query speed on a database as it grows in size, it is critical to select and establish the proper indexes. So, before we add the `@export` declaration to our `Dog` table and begin querying it, let's take a moment to target some table properties for indexing. We'll use `name` and `breed` as indexed table properties on our `Dog` table. All we need to do to accomplish this is tag these properties with the `@indexed` directive: @@ -153,14 +157,14 @@ type Dog @table @export { Now we can start querying. Again, we just simply access the endpoint with query parameters (basic GET requests), like: ``` -http://localhost:9926/Dog/?name=Harper -http://localhost:9926/Dog/?breed=Labrador -http://localhost:9926/Dog/?breed=Husky&name=Balto&select(id,name,breed) +http:/localhost:9926/Dog/?name=Harper +http:/localhost:9926/Dog/?breed=Labrador +http:/localhost:9926/Dog/?breed=Husky&name=Balto&select(id,name,breed) ``` -Congratulations, you now have created a secure database application backend with a table, a well-defined structure, access controls, and a functional REST endpoint with query capabilities! See the [REST documentation for more information on HTTP access](../developers/rest.md) and see the [Schema reference](../developers/applications/defining-schemas.md) for more options for defining schemas. +Congratulations, you now have created a secure database application backend with a table, a well-defined structure, access controls, and a functional REST endpoint with query capabilities! See the [REST documentation for more information on HTTP access](../developers/rest) and see the [Schema reference](../developers/applications/defining-schemas) for more options for defining schemas. -> Additionally, you may now use GraphQL (over HTTP) to create queries. See the documentation for that new feature [here](../technical-details/reference/graphql.md). +> Additionally, you may now use GraphQL (over HTTP) to create queries. See the documentation for that new feature [here](../technical-details/reference/graphql). ## Key Takeaway diff --git a/docs/getting-started/harper-concepts.md b/docs/getting-started/harper-concepts.md index 208950c7..87734062 100644 --- a/docs/getting-started/harper-concepts.md +++ b/docs/getting-started/harper-concepts.md @@ -1,16 +1,20 @@ +--- +title: Harper Concepts +--- + # Harper Concepts As you begin your journey with Harper, there are a few concepts and definitions that you should understand. ## Components -Harper components are a core Harper concept defined as flexible JavaScript based extensions of the highly extensible core Harper platform. They are executed by Harper directly and have complete access to the Harper [Global APIs](../technical-details/reference/globals.md) (such as Resource, databases, and tables). +Harper components are a core Harper concept defined as flexible JavaScript based extensions of the highly extensible core Harper platform. They are executed by Harper directly and have complete access to the Harper [Global APIs](../technical-details/reference/globals) (such as Resource, databases, and tables). -A key aspect to components are their extensibility; components can be built on other components. For example, a [Harper Application](../developers/applications/README.md) is a component that uses many other components. The [application template](https://github.com/HarperDB/application-template) demonstrates many of Harper's built-in components such as [rest](../technical-details/reference/components/built-in-extensions.md#rest) (for automatic REST endpoint generation), [graphqlSchema](../technical-details/reference/components/built-in-extensions.md#graphqlschema) (for table schema definitions), and many more. +A key aspect to components are their extensibility; components can be built on other components. For example, a [Harper Application](../developers/applications/) is a component that uses many other components. The [application template](https:/github.com/HarperDB/application-template) demonstrates many of Harper's built-in components such as [rest](../technical-details/reference/components/built-in-extensions#rest) (for automatic REST endpoint generation), [graphqlSchema](../technical-details/reference/components/built-in-extensions#graphqlschema) (for table schema definitions), and many more. ## Applications -Applications are a subset of components that cannot be used directly and must depend on other extensions. Examples include defining schemas (using [graphqlSchema](../technical-details/reference/components/built-in-extensions.md#graphqlschema) built-in extension), defining custom resources (using [jsResource](../technical-details/reference/components/built-in-extensions.md#jsresource) built-in extension), hosting static files (using [static](../technical-details/reference/components/built-in-extensions.md#static) built-in extension), enabling REST querying of resources (using [rest](../technical-details/reference/components/built-in-extensions.md#rest) built-in extension), and running [Next.js](https://github.com/HarperDB/nextjs), [Astro](https://github.com/HarperDB/astro), or [Apollo](https://github.com/HarperDB/apollo) applications through their respective extensions. +Applications are a subset of components that cannot be used directly and must depend on other extensions. Examples include defining schemas (using [graphqlSchema](../technical-details/reference/components/built-in-extensions#graphqlschema) built-in extension), defining custom resources (using [jsResource](../technical-details/reference/components/built-in-extensions#jsresource) built-in extension), hosting static files (using [static](../technical-details/reference/components/built-in-extensions#static) built-in extension), enabling REST querying of resources (using [rest](../technical-details/reference/components/built-in-extensions#rest) built-in extension), and running [Next.js](https:/github.com/HarperDB/nextjs), [Astro](https:/github.com/HarperDB/astro), or [Apollo](https:/github.com/HarperDB/apollo) applications through their respective extensions. ## Resources diff --git a/docs/getting-started/index.md b/docs/getting-started/index.md new file mode 100644 index 00000000..841ff062 --- /dev/null +++ b/docs/getting-started/index.md @@ -0,0 +1,46 @@ +--- +title: Getting Started +--- + +# Getting Started + +If you're new to Harper, this section will guide you through the essential resources you need to get started. + +Follow the steps in this documentation to discover how Harper can simplify your backend stack, eliminate many inter-process communication delays, and achieve a more predictable and performant application experience. + +For more advanced concepts in Harper, see our [blog](https:/www.harpersystems.dev/blog). + +## Harper Basics + +
+
+

+ + Install Harper + +

+

+ Pick the installation method that best suits your environment +

+
+
+

+ + What is Harper + +

+

+ Learn about Harper, how it works, and some of its usecases +

+
+
+

+ + Harper Concepts + +

+

+ Learn about Harper's fundamental concepts and how they interact +

+
+
diff --git a/docs/getting-started/install-harper.md b/docs/getting-started/install-harper.md index 43f4e6cc..be315672 100644 --- a/docs/getting-started/install-harper.md +++ b/docs/getting-started/install-harper.md @@ -1,10 +1,14 @@ +--- +title: Install Harper +--- + # Install Harper There are three ways to install a Harper instance: using a package manager like npm, deploying it as a Docker container, and offline installation. Below is a step-by-step tutorial for each method. ## Installing via NPM -Before you begin, ensure you have [Node.js](https://nodejs.org/) LTS version or newer. Node.js comes with npm, which will be used to install Harper. +Before you begin, ensure you have [Node.js](https:/nodejs.org/) LTS version or newer. Node.js comes with npm, which will be used to install Harper. Open your terminal or command prompt and install Harper globally by executing the command below. Installing globally allows the `harperdb` command to be accessible from anywhere on your machine, making it easier to manage multiple projects. @@ -24,7 +28,7 @@ At this point, your local Harper instance is up and running, giving you the abil ## Installing via Docker -Using Docker to run Harper is an efficient way to manage a containerized instance that encapsulates all of Harper’s functionality. First, ensure that Docker is installed and running on your system. If it isn’t, download it from the [official Docker website](https://docs.docker.com/engine/install/) and complete the installation process. +Using Docker to run Harper is an efficient way to manage a containerized instance that encapsulates all of Harper’s functionality. First, ensure that Docker is installed and running on your system. If it isn’t, download it from the [official Docker website](https:/docs.docker.com/engine/install/) and complete the installation process. Next, open your terminal and pull the latest Harper image by running the following command: @@ -42,7 +46,7 @@ In this command, the `-d` flag runs the container in detached mode, allowing it ### How to Use this Image -[Harper configuration settings⁠](https://harperdb.io/docs/reference/configuration-file/) can be passed as Docker run environment variables. If no environment variables are provided, Harper will operate with default configuration settings, such as: +[Harper configuration settings⁠](https:/harperdb.io/docs/reference/configuration-file/) can be passed as Docker run environment variables. If no environment variables are provided, Harper will operate with default configuration settings, such as: - ROOTPATH=/home/harperdb/hdb - OPERATIONSAPI_NETWORK_PORT=9925 @@ -54,9 +58,9 @@ These defaults allow you to quickly start an instance, though you can customize Containers created from this image store all data and Harper configuration at `/home/harperdb/hdb`. To ensure that your data persists beyond the lifecycle of a container, you should mount this directory to a directory on the container host using a Docker volume. This ensures that your database remains available and your settings are not lost when the container is stopped or removed. -{% hint style="info" %} -Test your Harper instance is up and running by querying `curl http://localhost:9925/health` -{% endhint %} +:::info +Test your Harper instance is up and running by querying `curl http:/localhost:9925/health` +::: ### Example Deployments @@ -119,11 +123,11 @@ If you want to inspect the logs to ensure that Harper has started correctly, use docker logs ``` -Once verified, you can access your Harper instance by opening your web browser and navigating to http://localhost:9925 (or the appropriate port based on your configuration). +Once verified, you can access your Harper instance by opening your web browser and navigating to http:/localhost:9925 (or the appropriate port based on your configuration). ### Raw binary installation -There's a different way to install Harper. You can choose your version and download the npm package and install it directly (you’ll still need Node.js and NPM). Click [this link](https://products-harperdb-io.s3.us-east-2.amazonaws.com/index.html) to download and install the package. Once you’ve downloaded the .tgz file, run the following command from the directory where you’ve placed it: +There's a different way to install Harper. You can choose your version and download the npm package and install it directly (you’ll still need Node.js and NPM). Click [this link](https:/products-harperdb-io.s3.us-east-2.amazonaws.com/index.html) to download and install the package. Once you’ve downloaded the .tgz file, run the following command from the directory where you’ve placed it: ```bash npm install -g harperdb-X.X.X.tgz harperdb install diff --git a/docs/getting-started/what-is-harper.md b/docs/getting-started/what-is-harper.md index 01fd4931..7557b468 100644 --- a/docs/getting-started/what-is-harper.md +++ b/docs/getting-started/what-is-harper.md @@ -1,8 +1,12 @@ +--- +title: What is Harper +--- + # What is Harper -{% hint style="info" %} -[Connect with our team!](https://www.harpersystems.dev/contact) -{% endhint %} +:::info +[Connect with our team!](https:/www.harpersystems.dev/contact) +::: ## What is Harper? Performance, Simplicity, and Scale. @@ -10,7 +14,7 @@ Harper is an all-in-one backend technology that fuses database technologies, cac Harper simplifies scaling with clustering and native data replication. At scale, architectures tend to include 4 to 16 redundant, geo-distributed nodes located near every user population center. This ensures that every user experiences minimal network latency and maximum reliability in addition to the already rapid server responses. -
Comparison of Harper's all-in-one technology (left) versus traditional multi-system approaches (right), highlighting Harper's speed, simplicity, and efficiency with no intermediary processes, against the latency and complexity of legacy strategies.
+![](/harperstack.jpg) ## Understanding the Paradigm Shift @@ -20,11 +24,11 @@ What we realized is that networking systems together in this way is inefficient ## Build With Harper -Start by running Harper locally with [npm](https://www.npmjs.com/package/harperdb) or [Docker](https://hub.docker.com/r/harperdb/harperdb). +Start by running Harper locally with [npm](https:/www.npmjs.com/package/harperdb) or [Docker](https:/hub.docker.com/r/harperdb/harperdb). Since technology tends to be built around the storage, processing, and transfer of data, start by [defining your schema](../developers/applications/#creating-our-first-table) with the `schema.graphql` file in the root of the application directory. -If you would like to [query](../developers/applications/#adding-an-endpoint) this data, add the `@export` directive to our data schema and test out the [REST](../developers/rest.md), [MQTT](../developers/real-time.md#mqtt), or [WebSocket](../developers/real-time.md#websockets) endpoints. +If you would like to [query](../developers/applications/#adding-an-endpoint) this data, add the `@export` directive to our data schema and test out the [REST](../developers/rest), [MQTT](../developers/real-time#mqtt), or [WebSocket](../developers/real-time#websockets) endpoints. When you are ready for something a little more advanced, start [customizing your application](../developers/applications/#custom-functionality-with-javascript). @@ -32,9 +36,9 @@ Finally, when it’s time to deploy, explore [replication](../developers/replica If you would like to jump into the most advanced capabilities, learn about [components](../technical-details/reference/components/). -{% hint style="warning" %} -Need help? Please don’t hesitate to [reach out](https://www.harpersystems.dev/contact). -{% endhint %} +:::warning +Need help? Please don’t hesitate to [reach out](https:/www.harpersystems.dev/contact). +::: ## Popular Use Cases @@ -42,14 +46,14 @@ With so much functionality built in, the use cases span nearly all application s ### Online Catalogs & Content Delivery -For use cases like e-commerce, real estate listing, and content-oriented sites, Harper’s breakthroughs in performance and distribution pay dividends in the form of better SEO and higher conversion rates. One common implementation leverages Harper’s [Next.js Component](https://github.com/HarperDB/nextjs) to host modern, performant frontend applications. Other implementations leverage the built-in caching layer and JavaScript application system to [server-side render pages](https://www.harpersystems.dev/development/tutorials/server-side-rendering-with-multi-tier-cache) that remain fully responsive because of built-in WebSocket connections. +For use cases like e-commerce, real estate listing, and content-oriented sites, Harper’s breakthroughs in performance and distribution pay dividends in the form of better SEO and higher conversion rates. One common implementation leverages Harper’s [Next.js Component](https:/github.com/HarperDB/nextjs) to host modern, performant frontend applications. Other implementations leverage the built-in caching layer and JavaScript application system to [server-side render pages](https:/www.harpersystems.dev/development/tutorials/server-side-rendering-with-multi-tier-cache) that remain fully responsive because of built-in WebSocket connections. ### Data Delivery Networks -For use cases like real-time sports updates, flight tracking, and zero-day software update distribution, Harper is rapidly gaining popularity. Harper’s ability to receive and broadcast messages while simultaneously handling application logic and data storage streamlines operations and eliminates the need for multiple separate systems. To build an understanding of our messaging system function, refer to our [real-time documentation](../developers/real-time.md). +For use cases like real-time sports updates, flight tracking, and zero-day software update distribution, Harper is rapidly gaining popularity. Harper’s ability to receive and broadcast messages while simultaneously handling application logic and data storage streamlines operations and eliminates the need for multiple separate systems. To build an understanding of our messaging system function, refer to our [real-time documentation](../developers/real-time). ### Edge Inference Systems Capturing, storing, and processing real-time data streams from client and IoT systems typically requires a stack of technology. Harper’s selective data replication and self-healing connections make for an ideal multi-tier system where edge and cloud systems both run Harper, making everything more performant. -[We’re happy](https://www.harpersystems.dev/contact) to walk you through how to do this. +[We’re happy](https:/www.harpersystems.dev/contact) to walk you through how to do this. diff --git a/docs/index.md b/docs/index.md index 17d4e100..98e5f5d0 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1 +1,104 @@ - +--- +title: Harper Docs +--- + +# Harper Docs + +:::info +[Connect with our team!](https:/www.harpersystems.dev/contact) +::: + +Welcome to the Harper Documentation! Here, you'll find all things Harper, and everything you need to get started, troubleshoot issues, and make the most of our platform. + +## Getting Started + +
+
+

+ + Install Harper + +

+

+ Pick the installation method that best suits your environment +

+
+
+

+ + What is Harper + +

+

+ Learn about Harper, how it works, and some of its usecases +

+
+
+

+ + Harper Concepts + +

+

+ Learn about Harper's fundamental concepts and how they interact +

+
+
+ +## Building with Harper + +
+
+

+ + Harper Applications + +

+

+ Build your a fully featured Harper Component with custom functionality +

+
+
+

+ + REST Queries + +

+

+ The recommended HTTP interface for data access, querying, and manipulation +

+
+
+

+ + Operations API + +

+

+ Configure, deploy, administer, and control your Harper instance +

+
+
+ +
+
+

+ + Clustering & Replication + +

+

+ The process of connecting multiple Harper databases together to create a database mesh network that enables users to define data replication patterns. +

+
+
+

+ + Explore the Harper Studio + +

+

+ The web-based GUI for Harper. Studio enables you to administer, navigate, and monitor all of your Harper instances in a simple, user friendly interface. +

+
+
diff --git a/docs/technical-details/_category_.json b/docs/technical-details/_category_.json new file mode 100644 index 00000000..69ce80a6 --- /dev/null +++ b/docs/technical-details/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Technical Details", + "position": 4, + "link": { + "type": "generated-index", + "title": "Technical Details Documentation", + "description": "Reference documentation and technical specifications", + "keywords": [ + "technical-details" + ] + } +} \ No newline at end of file diff --git a/docs/technical-details/reference/README.md b/docs/technical-details/reference/README.md deleted file mode 100644 index 15c23c26..00000000 --- a/docs/technical-details/reference/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Reference - -This section contains technical details and reference materials for Harper. - -- [Analytics](analytics.md) -- [Architecture](architecture.md) -- [Blob](blob.md) -- [Content Types](content-types.md) -- [Components](components/README.md) - - [Applications](components/applications.md) - - [Built-In Extensions](components/built-in-extensions.md) - - [Configuration](components/configuration.md) - - [Extensions](components/extensions.md) - - [(Experimental) Plugins](components/plugins.md) -- [Data Types](data-types.md) -- [Dynamic Schema](dynamic-schema.md) -- [Globals](globals.md) -- [GraphQL](graphql.md) -- [Headers](headers.md) -- [Limits](limits.md) -- [Resources](resources/README.md) - - [Migration](resources/migration.md) - - [Instance Binding](resources/instance-binding.md) -- [Storage Algorithm](storage-algorithm.md) -- [Transactions](transactions.md) diff --git a/docs/technical-details/reference/analytics.md b/docs/technical-details/reference/analytics.md index e63bb0fd..39c92109 100644 --- a/docs/technical-details/reference/analytics.md +++ b/docs/technical-details/reference/analytics.md @@ -1,3 +1,7 @@ +--- +title: Analytics +--- + # Analytics Harper provides extensive telemetry and analytics data to help monitor the status of the server and work loads, and to help understand traffic and usage patterns to identify issues and scaling needs, and identify queries and actions that are consuming the most resources. @@ -7,7 +11,7 @@ Harper collects statistics for all operations, URL endpoints, and messaging topi There are two "levels" of analytics in the Harper analytics table: the first is the immediate level of raw direct logging of real-time statistics. These analytics entries are recorded once a second (when there is activity) by each thread, and include all recorded activity in the last second, along with system resource information. The records have a primary key that is the timestamp in milliseconds since epoch. This can be queried (with `superuser` permission) using the search_by_conditions operation (this will search for 10 seconds worth of analytics) on the `hdb_raw_analytics` table: ``` -POST http://localhost:9925 +POST http:/localhost:9925 Content-Type: application/json { @@ -65,7 +69,7 @@ And a typical response looks like: The second level of analytics recording is aggregate data. The aggregate records are recorded once a minute, and aggregate the results from all the per-second entries from all the threads, creating a summary of statistics once a minute. The ids for these milliseconds since epoch can be queried from the `hdb_analytics` table. You can query these with an operation like: ``` -POST http://localhost:9925 +POST http:/localhost:9925 Content-Type: application/json { diff --git a/docs/technical-details/reference/architecture.md b/docs/technical-details/reference/architecture.md index d82c4b50..4155d5ff 100644 --- a/docs/technical-details/reference/architecture.md +++ b/docs/technical-details/reference/architecture.md @@ -1,3 +1,7 @@ +--- +title: Architecture +--- + # Architecture Harper's architecture consists of resources, which includes tables and user defined data sources and extensions, and server interfaces, which includes the RESTful HTTP interface, operations API, and MQTT. Servers are supported by routing and auth services. diff --git a/docs/technical-details/reference/blob.md b/docs/technical-details/reference/blob.md index d99b666c..c747fd28 100644 --- a/docs/technical-details/reference/blob.md +++ b/docs/technical-details/reference/blob.md @@ -1,3 +1,7 @@ +--- +title: Blob +--- + # Blob Blobs are binary large objects that can be used to store any type of unstructured/binary data and is designed for large content. Blobs support streaming and feature better performance for content larger than about 20KB. Blobs are built off the native JavaScript `Blob` type, and HarperDB extends the native `Blob` type for integrated storage with the database. To use blobs, you would generally want to declare a field as a `Blob` type in your schema: @@ -30,7 +34,7 @@ export class MyEndpoint extends MyTable { return { status: 200, headers: {}, - body: this.data, // this.data is a blob + body: this.data, / this.data is a blob }); } } @@ -40,11 +44,11 @@ One of the important characteristics of blobs is they natively support asynchron ```javascript let blob = await createBlob(stream); -// at this point the blob exists, but the data is still being written to storage +/ at this point the blob exists, but the data is still being written to storage await MyTable.put({ id: 'my-record', data: blob }); -// we now have written a record that references the blob +/ we now have written a record that references the blob let record = await MyTable.get('my-record'); -// we now have a record that gives us access to the blob. We can asynchronously access the blob's data or stream the data, and it will be available as blob the stream is written to the blob. +/ we now have a record that gives us access to the blob. We can asynchronously access the blob's data or stream the data, and it will be available as blob the stream is written to the blob. let stream = record.data.stream(); ``` @@ -53,13 +57,13 @@ Alternately, we can also wait for the blob to be fully written to storage before ```javascript let blob = await createBlob(stream); -// at this point the blob exists, but the data is was not been written to storage +/ at this point the blob exists, but the data is was not been written to storage await blob.save(MyTable); -// we now know the blob is fully written to storage +/ we now know the blob is fully written to storage await MyTable.put({ id: 'my-record', data: blob }); ``` -Note that this means that blobs are _not_ atomic or [ACID](https://en.wikipedia.org/wiki/ACID) compliant; streaming functionality achieves the opposite behavior of ACID/atomic writes that would prevent access to data as it is being written. +Note that this means that blobs are _not_ atomic or [ACID](https:/en.wikipedia.org/wiki/ACID) compliant; streaming functionality achieves the opposite behavior of ACID/atomic writes that would prevent access to data as it is being written. ### Error Handling @@ -69,7 +73,7 @@ Because blobs can be streamed and referenced prior to their completion, there is export class MyEndpoint extends MyTable { let blob = this.data; blob.on('error', () => { - // if this was a caching table, we may want to invalidate or delete this record: + / if this was a caching table, we may want to invalidate or delete this record: this.invalidate(); }); async get() { @@ -89,14 +93,14 @@ Blobs that are created from streams may not have the standard `size` property av ```javascript let record = await MyTable.get('my-record'); let blob = record.data; -blob.size // will be available if it was saved with a known size -let stream blob.stream(); // start streaming the data +blob.size / will be available if it was saved with a known size +let stream blob.stream(); / start streaming the data if (blob.size === undefined) { blob.on('size', (size) => { - // will be called once the size is available + / will be called once the size is available }) } ``` -See the [configuration](../../deployments/configuration.md) documentation for more information on configuring where blob are stored. +See the [configuration](../../deployments/configuration) documentation for more information on configuring where blob are stored. diff --git a/docs/technical-details/reference/components/README.md b/docs/technical-details/reference/components/README.md deleted file mode 100644 index 9203b3de..00000000 --- a/docs/technical-details/reference/components/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# Components - -**Components** are the high-level concept for modules that extend the Harper core platform adding additional functionality. Components encapsulate both applications and extensions. - -> We are actively working to disambiguate the terminology. When you see "component", such as in the Operations API or CLI, it generally refers to an application. We will do our best to clarify exactly which classification of a component whenever possible. - -**Applications** are best defined as the implementation of a specific user-facing feature or functionality. Applications are built on top of extensions and can be thought of as the end product that users interact with. For example, a Next.js application that serves a web interface or an Apollo GraphQL server that provides a GraphQL API are both applications. - -**Extensions** are the building blocks of the Harper component system. Applications depend on extensions to provide the functionality the application is implementing. For example, the built-in `graphqlSchema` extension enables applications to define their databases and tables using GraphQL schemas. Furthermore, the `@harperdb/nextjs` and `@harperdb/apollo` extensions are the building blocks that provide support for building Next.js and Apollo applications. - -> As of Harper v4.6, a new, **experimental** component system has been introduced called **plugins**. Plugins are a **new iteration of the existing extension system**. They are simultaneously a simplification and an extensibility upgrade. Instead of defining multiple methods (`start` vs `startOnMainThread`, `handleFile` vs `setupFile`, `handleDirectory` vs `setupDirectory`), plugins only have to define a single `handleApplication` method. Plugins are **experimental**, and complete documentation is available on the [plugin API](plugins.md) page. In time we plan to deprecate the concept of extensions in favor of plugins, but for now, both are supported. - -All together, the support for implementing a feature is the extension, and the actual implementation of the feature is the application. - -For more information on the differences between applications and extensions, refer to the beginning of the [Applications](../../../developers/applications/) guide documentation section. - -This technical reference section has detailed information on various component systems: - -- [Built-In Extensions](./built-in-extensions.md) -- [Configuration](./configuration.md) -- [Managing Applications](./applications.md) -- [Extensions](./extensions.md) -- [(Experimental) Plugins](./plugins.md) - -## Custom Applications - -- [`@harperdb/status-check`](https://github.com/HarperDB/status-check) -- [`@harperdb/prometheus-exporter`](https://github.com/HarperDB/prometheus-exporter) -- [`@harperdb/acl-connect`](https://github.com/HarperDB/acl-connect) - -## Custom Extensions - -- [`@harperdb/nextjs`](https://github.com/HarperDB/nextjs) -- [`@harperdb/apollo`](https://github.com/HarperDB/apollo) -- [`@harperdb/astro`](https://github.com/HarperDB/astro) diff --git a/docs/technical-details/reference/components/applications.md b/docs/technical-details/reference/components/applications.md index 289d0af6..524a7e08 100644 --- a/docs/technical-details/reference/components/applications.md +++ b/docs/technical-details/reference/components/applications.md @@ -1,3 +1,7 @@ +--- +title: Applications +--- + # Applications > The contents of this page predominantly relate to **application** components. Extensions are not necessarily _deployable_. The ambiguity of the term "components" is being worked on and will be improved in future releases. As we work to clarify the terminology, please keep in mind that the component operations are synonymous with application management. In general, "components" is the general term for both applications and extensions, but in context of the operations API it refers to applications only. @@ -8,7 +12,7 @@ Harper offers several approaches to managing applications that differ between lo Harper is designed to be simple to run locally. Generally, Harper should be installed locally on a machine using a global package manager install (i.e. `npm i -g harperdb`). -> Before continuing, ensure Harper is installed and the `harperdb` CLI is available. For more information, review the [installation guide](../../../deployments/install-harper/README.md). +> Before continuing, ensure Harper is installed and the `harperdb` CLI is available. For more information, review the [installation guide](../../../deployments/install-harper/). When developing an application locally there are a number of ways to run it on Harper. @@ -27,7 +31,7 @@ Stop execution for either of these processes by sending a SIGINT (generally CTRL Alternatively, to mimic interfacing with a hosted Harper instance, use operation commands instead. 1. Start up Harper with `harperdb` -2. _Deploy_ the application to the local instance by executing: +1. _Deploy_ the application to the local instance by executing: ```sh harperdb deploy \ @@ -41,14 +45,14 @@ Alternatively, to mimic interfacing with a hosted Harper instance, use operation - The `restart=true` option automatically restarts Harper threads after the application is deployed - If set to `'rolling'`, a rolling restart will be triggered after the application is deployed -3. In another terminal, use the `harperdb restart` command to restart the instance's threads at any time +1. In another terminal, use the `harperdb restart` command to restart the instance's threads at any time - With `package=`, the application source is symlinked so changes will automatically be picked up between restarts - If `package` was omitted, run the `deploy` command again with any new changes -4. To remove the application use `harperdb drop_component project=` +1. To remove the application use `harperdb drop_component project=` Similar to the previous section, if the main thread needs to be restarted, start and stop the Harper instance manually (with the application deployed). Upon Harper startup, the application will automatically be loaded and executed across all threads. -> Not all [component operations](../../../developers/operations-api/components.md) are available via CLI. When in doubt, switch to using the Operations API via network requests to the local Harper instance. +> Not all [component operations](../../../developers/operations-api/components) are available via CLI. When in doubt, switch to using the Operations API via network requests to the local Harper instance. For example, to properly _deploy_ a `test-application` locally, the command would look like: @@ -65,7 +69,7 @@ Keep in mind that using a local file path for `package` will only work locally; ## Remote Management -Managing applications on a remote Harper instance is best accomplished through [component operations](../../../developers/operations-api/components.md), similar to using the `deploy` command locally. Before continuing, always backup critical Harper instances. Managing, deploying, and executing applications can directly impact a live system. +Managing applications on a remote Harper instance is best accomplished through [component operations](../../../developers/operations-api/components), similar to using the `deploy` command locally. Before continuing, always backup critical Harper instances. Managing, deploying, and executing applications can directly impact a live system. Remote Harper instances work very similarly to local Harper instances. The primary application management operations still include `deploy_component`, `drop_component`, and `restart`. @@ -101,13 +105,13 @@ Unlike local development where `package` should be set to a local file path for A local application can be deployed to a remote instance by **omitting** the `package` field. Harper will automatically package the local directory and include that along with the rest of the deployment operation. -Furthermore, the `package` field can be set to any valid [npm dependency value](https://docs.npmjs.com/cli/v11/configuring-npm/package-json#dependencies). +Furthermore, the `package` field can be set to any valid [npm dependency value](https:/docs.npmjs.com/cli/v11/configuring-npm/package-json#dependencies). - For applications deployed to npm, specify the package name: `package="@harperdb/status-check"` -- For applications on GitHub, specify the URL: `package="https://github.com/HarperDB/status-check"`, or the shorthand `package=HarperDB/status-check` -- Private repositories also work if the correct SSH keys are on the server: `package="git+ssh://git@github.com:HarperDB/secret-applications.git"` - - Reference the [SSH Key](../../../developers/operations-api/components.md#add-ssh-key) operations for more information on managing SSH keys on a remote instance -- Even tarball URLs are supported: `package="https://example.com/application.tar.gz"` +- For applications on GitHub, specify the URL: `package="https:/github.com/HarperDB/status-check"`, or the shorthand `package=HarperDB/status-check` +- Private repositories also work if the correct SSH keys are on the server: `package="git+ssh:/git@github.com:HarperDB/secret-applications.git"` + - Reference the [SSH Key](../../../developers/operations-api/components#add-ssh-key) operations for more information on managing SSH keys on a remote instance +- Even tarball URLs are supported: `package="https:/example.com/application.tar.gz"` > When using git tags, we highly recommend that you use the semver directive to ensure consistent and reliable installation by npm. In addition to tags, you can also reference branches or commit numbers. @@ -121,7 +125,7 @@ The following methods are advanced and should be executed with caution as they c First, locate the Harper installation `rootPath` directory. Generally, this is `~/hdb`. It can be retrieved by running `harperdb get_configuration` and looking for the `rootPath` field. -> For a useful shortcut on POSIX compliant machines run: `harperdb get_configuration json=true | jq ".rootPath" | sed 's/"//g'` +> For a useful shortcut on POSIX compliant machines run: `harperdb get_configuration json=true | jq ".rootPath" | sed 's/"/g'` This path is the Harper instance. Within this directory, locate the root config titled `harperdb-config.yaml`, and the components root path. The components root path will be `/components` by default (thus, `~/hdb/components`), but it can also be configured. If necessary, use `harperdb get_configuration` again and look for the `componentsRoot` field for the exact path. @@ -150,7 +154,7 @@ myTarBall: myLocal: package: /Users/harper/local # install from local path myWebsite: - package: https://harperdb-component # install from URL + package: https:/harperdb-component # install from URL ``` Harper will generate a `package.json` like: @@ -162,7 +166,7 @@ Harper will generate a `package.json` like: "myNPMComponent": "npm:harperdb", "myTarBall": "file:/Users/harper/cool-component.tar", "myLocal": "file:/Users/harper/local", - "myWebsite": "https://harperdb-component" + "myWebsite": "https:/harperdb-component" } } ``` diff --git a/docs/technical-details/reference/components/built-in-extensions.md b/docs/technical-details/reference/components/built-in-extensions.md index be60cb1f..150145bd 100644 --- a/docs/technical-details/reference/components/built-in-extensions.md +++ b/docs/technical-details/reference/components/built-in-extensions.md @@ -1,3 +1,7 @@ +--- +title: Built-In Extensions +--- + # Built-In Extensions Harper provides extended features using built-in extensions. They do **not** need to be installed with a package manager, and simply must be specified in a config to run. These are used throughout many Harper docs, guides, and examples. Unlike custom extensions which have their own semantic versions, built-in extensions follow Harper's semantic version. @@ -26,9 +30,9 @@ For more information read the [Components, Applications, and Extensions](../../. Load data from JSON or YAML files into Harper tables as part of component deployment. -This component is an [Extension](./reference.md#extensions) and can be configured with the `files` configuration option. +This component is an [Extension](..#extensions) and can be configured with the `files` configuration option. -Complete documentation for this feature is available here: [Data Loader](../../../developers/applications/data-loader.md) +Complete documentation for this feature is available here: [Data Loader](../../../developers/applications/data-loader) ```yaml dataLoader: @@ -37,11 +41,11 @@ dataLoader: ## fastifyRoutes -Specify custom endpoints using [Fastify](https://fastify.dev/). +Specify custom endpoints using [Fastify](https:/fastify.dev/). -This component is a [Resource Extension](./extensions.md#resource-extension) and can be configured with the [`files` and `urlPath`](./extensions.md#resource-extension-configuration) configuration options. +This component is a [Resource Extension](./extensions#resource-extension) and can be configured with the [`files` and `urlPath`](./extensions#resource-extension-configuration) configuration options. -Complete documentation for this feature is available here: [Define Fastify Routes](../../../developers/applications/define-routes.md) +Complete documentation for this feature is available here: [Define Fastify Routes](../../../developers/applications/define-routes) ```yaml fastifyRoutes: @@ -54,7 +58,7 @@ fastifyRoutes: Enables GraphQL querying via a `/graphql` endpoint loosely implementing the GraphQL Over HTTP specification. -Complete documentation for this feature is available here: [GraphQL](../graphql.md) +Complete documentation for this feature is available here: [GraphQL](../graphql) ```yaml graphql: true @@ -64,9 +68,9 @@ graphql: true Specify schemas for Harper tables and resources via GraphQL schema syntax. -This component is a [Resource Extension](./extensions.md#resource-extension) and can be configured with the [`files` and `urlPath`](./extensions.md#resource-extension-configuration) configuration options. +This component is a [Resource Extension](./extensions#resource-extension) and can be configured with the [`files` and `urlPath`](./extensions#resource-extension-configuration) configuration options. -Complete documentation for this feature is available here: [Defining Schemas](../../../developers/applications/defining-schemas.md) +Complete documentation for this feature is available here: [Defining Schemas](../../../developers/applications/defining-schemas) ```yaml graphqlSchema: @@ -77,9 +81,9 @@ graphqlSchema: Specify custom, JavaScript based Harper resources. -Refer to the Application [Custom Functionality with JavaScript](../../../developers/applications/README.md#custom-functionality-with-javascript) guide, or [Resource Class](../resources/README.md) reference documentation for more information on custom resources. +Refer to the Application [Custom Functionality with JavaScript](../../../developers/applications/#custom-functionality-with-javascript) guide, or [Resource Class](../resources/) reference documentation for more information on custom resources. -This component is a [Resource Extension](./extensions.md#resource-extension) and can be configured with the [`files` and `urlPath`](./extensions.md#resource-extension-configuration) configuration options. +This component is a [Resource Extension](./extensions#resource-extension) and can be configured with the [`files` and `urlPath`](./extensions#resource-extension-configuration) configuration options. ```yaml jsResource: @@ -90,7 +94,7 @@ jsResource: Load environment variables via files like `.env`. -This component is a [Resource Extension](./extensions.md#resource-extension) and can be configured with the [`files` and `urlPath`](./extensions.md#resource-extension-configuration) configuration options. +This component is a [Resource Extension](./extensions#resource-extension) and can be configured with the [`files` and `urlPath`](./extensions#resource-extension-configuration) configuration options. Ensure this component is specified first in `config.yaml` so that environment variables are loaded prior to loading any other components. @@ -121,7 +125,7 @@ loadEnv: Enable automatic REST endpoint generation for exported resources with this component. -Complete documentation for this feature is available here: [REST](../../../developers/rest.md) +Complete documentation for this feature is available here: [REST](../../../developers/rest) ```yaml rest: true @@ -147,9 +151,9 @@ rest: Specify roles for Harper tables and resources. -This component is a [Resource Extension](./extensions.md#resource-extension) and can be configured with the [`files` and `urlPath`](./extensions.md#resource-extension-configuration) configuration options. +This component is a [Resource Extension](./extensions#resource-extension) and can be configured with the [`files` and `urlPath`](./extensions#resource-extension-configuration) configuration options. -Complete documentation for this feature is available here: [Defining Roles](../../../developers/applications/defining-roles.md) +Complete documentation for this feature is available here: [Defining Roles](../../../developers/applications/defining-roles) ```yaml roles: @@ -160,7 +164,7 @@ roles: Serve static files via HTTP. -The `static` plugin serves static files based on the `files` and `urlPath` [plugin configuration options](./plugins.md#configuration). It supports additional features via other options (detailed below), but the core to the plugin is to statically serve the files matched by the `files` pattern. The plugin will serve files relative to the longest non-ambiguous path within the pattern. The `urlPath` can be specified to customize the URL path that the files are served from, otherwise they match the file pattern. For example, given an application directory structure: +Use the [Resource Extension](./extensions#resource-extension) configuration options [`files` and `urlPath`](./extensions#resource-extension-configuration) to specify the files to be served. ``` my-app/ diff --git a/docs/technical-details/reference/components/configuration.md b/docs/technical-details/reference/components/configuration.md index ac4c0f4c..08fa4cc2 100644 --- a/docs/technical-details/reference/components/configuration.md +++ b/docs/technical-details/reference/components/configuration.md @@ -1,3 +1,7 @@ +--- +title: Component Configuration +--- + # Component Configuration > For information on the distinction between the types of components (applications and extensions), refer to beginning of the [Applications](../../../developers/applications) documentation section. @@ -10,17 +14,17 @@ name: option-2: value ``` -It is the entry's `name` that is used for component resolution. It can be one of the [built-in extensions](./built-in-extensions.md), or it must match a package dependency of the component as specified by `package.json`. The [Custom Component Configuration](#custom-component-configuration) section provides more details and examples. +It is the entry's `name` that is used for component resolution. It can be one of the [built-in extensions](./built-in-extensions), or it must match a package dependency of the component as specified by `package.json`. The [Custom Component Configuration](#custom-component-configuration) section provides more details and examples. -For some built-in extensions they can be configured with as little as a top-level boolean; for example, the [rest](./built-in-extensions.md#rest) extension can be enabled with just: +For some built-in extensions they can be configured with as little as a top-level boolean; for example, the [rest](./built-in-extensions#rest) extension can be enabled with just: ```yaml rest: true ``` -Most components generally have more configuration options. Some options are ubiquitous to the Harper platform, such as the `files` and `urlPath` options for an [extension](./extensions.md) or [plugin](./plugins.md), or `package` for any [custom component](#custom-component-configuration). +Most components generally have more configuration options. Some options are ubiquitous to the Harper platform, such as the `files` and `urlPath` options for an [extension](./extensions) or [plugin](./plugins), or `package` for any [custom component](#custom-component-configuration). -[Extensions](./extensions.md) and [plugins](./plugins.md) require specifying the `extensionModule` or `pluginModule` option respectively. Refer to their respective API reference documentation for more information. +[Extensions](./extensions) and [plugins](./plugins) require specifying the `extensionModule` or `pluginModule` option respectively. Refer to their respective API reference documentation for more information. ## Custom Component Configuration @@ -42,7 +46,7 @@ Then, within `config.yaml` it can be enabled and configured using: # ... ``` -Since npm allows for a [variety of dependency configurations](https://docs.npmjs.com/cli/configuring-npm/package-json#dependencies), this can be used to create custom references. For example, to depend on a specific GitHub branch, first update the `package.json`: +Since npm allows for a [variety of dependency configurations](https:/docs.npmjs.com/cli/configuring-npm/package-json#dependencies), this can be used to create custom references. For example, to depend on a specific GitHub branch, first update the `package.json`: ```json { @@ -80,6 +84,6 @@ static: files: 'web/**' ``` -Refer to the [built-in components](./built-in-extensions.md) documentation for more information on these fields. +Refer to the [built-in components](./built-in-extensions) documentation for more information on these fields. If a `config.yaml` is defined, it will **not** be merged with the default config. diff --git a/docs/technical-details/reference/components/extensions.md b/docs/technical-details/reference/components/extensions.md index 46cb7ec0..b2a613b1 100644 --- a/docs/technical-details/reference/components/extensions.md +++ b/docs/technical-details/reference/components/extensions.md @@ -1,6 +1,10 @@ +--- +title: Extensions API +--- + # Extensions API -> As of Harper v4.6, a new iteration of the extension API was released called **Plugins**. They are simultaneously a simplification and an extensibility upgrade. Plugins are **experimental**, but we encourage developers to consider developing with the [plugin API](./plugins.md) instead of the extension API. In time we plan to deprecate the concept of extensions in favor of plugins, but for now, both are supported. +> As of Harper v4.6, a new iteration of the extension API was released called **Plugins**. They are simultaneously a simplification and an extensibility upgrade. Plugins are **experimental**, but we encourage developers to consider developing with the [plugin API](./plugins) instead of the extension API. In time we plan to deprecate the concept of extensions in favor of plugins, but for now, both are supported. There are two key types of Extensions: **Resource Extension** and **Protocol Extensions**. The key difference is a **Protocol Extensions** can return a **Resource Extension**. @@ -8,13 +12,13 @@ Furthermore, what defines an extension separately from a component is that it le All extensions must define a `config.yaml` file and declare an `extensionModule` option. This must be a path to the extension module source code. The path must resolve from the root of the module directory. -For example, the [Harper Next.js Extension](https://github.com/HarperDB/nextjs) `config.yaml` specifies `extensionModule: ./extension.js`. +For example, the [Harper Next.js Extension](https:/github.com/HarperDB/nextjs) `config.yaml` specifies `extensionModule: ./extension.js`. If the plugin is being written in something other than JavaScript (such as TypeScript), ensure that the path resolves to the built version, (i.e. `extensionModule: ./dist/index.js`) ## Resource Extension -A Resource Extension is for processing a certain type of file or directory. For example, the built-in [jsResource](./built-in-extensions.md#jsresource) extension handles executing JavaScript files. +A Resource Extension is for processing a certain type of file or directory. For example, the built-in [jsResource](./built-in-extensions#jsresource) extension handles executing JavaScript files. Resource Extensions are comprised of four distinct function exports, [`handleFile()`](#handlefilecontents-urlpath-absolutepath-resources-void--promisevoid), [`handleDirectory()`](#handledirectoryurlpath-absolutepath-resources-boolean--void--promiseboolean--void), [`setupFile()`](#setupfilecontents-urlpath-absolutepath-resources-void--promisevoid), and [`setupDirectory()`](#setupdirectoryurlpath-absolutepath-resources-boolean--void--promiseboolean--void). The `handleFile()` and `handleDirectory()` methods are executed on **all worker threads**, and are _executed again during restarts_. The `setupFile()` and `setupDirectory()` methods are only executed **once** on the **main thread** during the initial system start sequence. @@ -26,9 +30,9 @@ Other than their execution behavior, the `handleFile()` and `setupFile()` method Any [Resource Extension](#resource-extension) can be configured with the `files` and `urlPath` options. These options control how _files_ and _directories_ are resolved in order to be passed to the extension's `handleFile()`, `setupFile()`, `handleDirectory()`, and `setupDirectory()` methods. -> Harper relies on the [fast-glob](https://github.com/mrmlnc/fast-glob) library for glob pattern matching. +> Harper relies on the [fast-glob](https:/github.com/mrmlnc/fast-glob) library for glob pattern matching. -- **files** - `string | string[] | Object` - _required_ - A [glob pattern](https://github.com/mrmlnc/fast-glob?tab=readme-ov-file#pattern-syntax) string, array of glob pattern strings, or a more expressive glob options object determining the set of files and directories to be resolved for the extension. If specified as an object, the `source` property is required. By default, Harper **matches files and directories**; this is configurable using the `only` option. +- **files** - `string | string[] | Object` - _required_ - A [glob pattern](https:/github.com/mrmlnc/fast-glob?tab=readme-ov-file#pattern-syntax) string, array of glob pattern strings, or a more expressive glob options object determining the set of files and directories to be resolved for the extension. If specified as an object, the `source` property is required. By default, Harper **matches files and directories**; this is configurable using the `only` option. - **source** - `string | string[]` - _required_ - The glob pattern string or array of strings. - **only** - `'all' | 'files' | 'directories'` - _optional_ - The glob pattern will match only the specified entry type. Defaults to `'all'`. - **ignore** - `string[]` - _optional_ - An array of glob patterns to exclude from matches. This is an alternative way to use negative patterns. Defaults to `[]`. @@ -38,7 +42,7 @@ Any [Resource Extension](#resource-extension) can be configured with the `files` - Note: `..` is an invalid pattern and will result in an error - Otherwise, the value here will be base url path. Leading and trailing `/` characters will be handled automatically (`/static/`, `/static`, and `static/` are all equivalent to `static`) -For example, to configure the [static](./built-in-extensions.md#static) component to serve all HTML files from the `web` source directory on the `static` URL endpoint: +For example, to configure the [static](./built-in-extensions#static) component to serve all HTML files from the `web` source directory on the `static` URL endpoint: ```yaml static: @@ -50,7 +54,7 @@ If there are files such as `web/index.html` and `web/blog.html`, they would be a Furthermore, if the component is located in the `test-component` directory, and the `urlPath` was set to `'./static/'` instead, then the files would be served from `localhost/test-component/static/*` instead. -The `urlPath` is optional, for example to configure the [graphqlSchema](./built-in-extensions.md#graphqlschema) component to load all schemas within the `src/schema` directory, only specifying a `files` glob pattern is required: +The `urlPath` is optional, for example to configure the [graphqlSchema](./built-in-extensions#graphqlschema) component to load all schemas within the `src/schema` directory, only specifying a `files` glob pattern is required: ```yaml graphqlSchema: @@ -82,11 +86,11 @@ test-component: In order for an extension to be classified as a Resource Extension it must implement at least one of the `handleFile()`, `handleDirectory()`, `setupFile()`, or `setupDirectory()` methods. As a standalone extension, these methods should be named and exported directly. For example: ```js -// ESM +/ ESM export function handleFile() {} export function setupDirectory() {} -// or CJS +/ or CJS function handleDirectory() {} function setupFile() {} @@ -150,13 +154,13 @@ Returns: `boolean | void | Promise` ## Protocol Extension -A Protocol Extension is a more advanced form of a Resource Extension and is mainly used for implementing higher level protocols. For example, the [Harper Next.js Extension](https://github.com/HarperDB/nextjs) handles building and running a Next.js project. A Protocol Extension is particularly useful for adding custom networking handlers (see the [`server`](../globals.md#server) global API documentation for more information). +A Protocol Extension is a more advanced form of a Resource Extension and is mainly used for implementing higher level protocols. For example, the [Harper Next.js Extension](https:/github.com/HarperDB/nextjs) handles building and running a Next.js project. A Protocol Extension is particularly useful for adding custom networking handlers (see the [`server`](../globals#server) global API documentation for more information). ### Protocol Extension Configuration In addition to the `files` and `urlPath` [Resource Extension configuration](#resource-extension-configuration) options, and the `package` [Custom Component configuration](#custom-component-configuration) option, Protocol Extensions can also specify additional configuration options. Any options added to the extension configuration (in `config.yaml`), will be passed through to the `options` object of the `start()` and `startOnMainThread()` methods. -For example, the [Harper Next.js Extension](https://github.com/HarperDB/nextjs#options) specifies multiple option that can be included in its configuration. For example, a Next.js app using `@harperdb/nextjs` may specify the following `config.yaml`: +For example, the [Harper Next.js Extension](https:/github.com/HarperDB/nextjs#options) specifies multiple option that can be included in its configuration. For example, a Next.js app using `@harperdb/nextjs` may specify the following `config.yaml`: ```yaml '@harperdb/nextjs': @@ -166,7 +170,7 @@ For example, the [Harper Next.js Extension](https://github.com/HarperDB/nextjs#o dev: false ``` -Many protocol extensions will use the `port` and `securePort` options for configuring networking handlers. Many of the [`server`](../globals.md#server) global APIs accept `port` and `securePort` options, so components replicated this for simpler pass-through. +Many protocol extensions will use the `port` and `securePort` options for configuring networking handlers. Many of the [`server`](../globals#server) global APIs accept `port` and `securePort` options, so components replicated this for simpler pass-through. ### Protocol Extension API diff --git a/docs/technical-details/reference/components/index.md b/docs/technical-details/reference/components/index.md new file mode 100644 index 00000000..42a8d092 --- /dev/null +++ b/docs/technical-details/reference/components/index.md @@ -0,0 +1,39 @@ +--- +title: Components +--- + +# Components + +**Components** are the high-level concept for modules that extend the Harper core platform adding additional functionality. Components encapsulate both applications and extensions. + +> We are actively working to disambiguate the terminology. When you see "component", such as in the Operations API or CLI, it generally refers to an application. We will do our best to clarify exactly which classification of a component whenever possible. + +**Applications** are best defined as the implementation of a specific user-facing feature or functionality. Applications are built on top of extensions and can be thought of as the end product that users interact with. For example, a Next.js application that serves a web interface or an Apollo GraphQL server that provides a GraphQL API are both applications. + +**Extensions** are the building blocks of the Harper component system. Applications depend on extensions to provide the functionality the application is implementing. For example, the built-in `graphqlSchema` extension enables applications to define their databases and tables using GraphQL schemas. Furthermore, the `@harperdb/nextjs` and `@harperdb/apollo` extensions are the building blocks that provide support for building Next.js and Apollo applications. + +> As of Harper v4.6, a new, **experimental** component system has been introduced called **plugins**. Plugins are a **new iteration of the existing extension system**. They are simultaneously a simplification and an extensibility upgrade. Instead of defining multiple methods (`start` vs `startOnMainThread`, `handleFile` vs `setupFile`, `handleDirectory` vs `setupDirectory`), plugins only have to define a single `handleApplication` method. Plugins are **experimental**, and complete documentation is available on the [plugin API](plugins) page. In time we plan to deprecate the concept of extensions in favor of plugins, but for now, both are supported. + +All together, the support for implementing a feature is the extension, and the actual implementation of the feature is the application. + +For more information on the differences between applications and extensions, refer to the beginning of the [Applications](../../../developers/applications/) guide documentation section. + +This technical reference section has detailed information on various component systems: + +- [Built-In Extensions](./built-in-extensions) +- [Configuration](./configuration) +- [Managing Applications](./applications) +- [Extensions](./extensions) +- [(Experimental) Plugins](./plugins) + +## Custom Applications + +- [`@harperdb/status-check`](https:/github.com/HarperDB/status-check) +- [`@harperdb/prometheus-exporter`](https:/github.com/HarperDB/prometheus-exporter) +- [`@harperdb/acl-connect`](https:/github.com/HarperDB/acl-connect) + +## Custom Extensions + +- [`@harperdb/nextjs`](https:/github.com/HarperDB/nextjs) +- [`@harperdb/apollo`](https:/github.com/HarperDB/apollo) +- [`@harperdb/astro`](https:/github.com/HarperDB/astro) diff --git a/docs/technical-details/reference/components/plugins.md b/docs/technical-details/reference/components/plugins.md index 97b0ddd1..8bfbdd25 100644 --- a/docs/technical-details/reference/components/plugins.md +++ b/docs/technical-details/reference/components/plugins.md @@ -1,3 +1,7 @@ +--- +title: Experimental Plugins +--- + # Experimental Plugins The new, experimental **plugin** API is an iteration of the existing extension system. It simplifies the API by removing the need for multiple methods (`start`, `startOnMainThread`, `handleFile`, `setupFile`, etc.) and instead only requires a single `handleApplication` method. Plugins are designed to be more extensible and easier to use, and they are intended to replace the concept of extensions in the future. @@ -6,7 +10,7 @@ Similar to the existing extension API, a plugin must specify an `pluginModule` o If the plugin is being written in something other than JavaScript (such as TypeScript), ensure that the path resolves to the built version, (i.e. `pluginModule: ./dist/index.js`) -It is also recommended that all extensions have a `package.json` that specifies JavaScript package metadata such as name, version, type, etc. Since plugins are just JavaScript packages, they can do anything a JavaScript package can normally do. It can be written in TypeScript, and compiled to JavaScript. It can export an executable (using the [bin](https://docs.npmjs.com/cli/configuring-npm/package-json#bin) property). It can be published to npm. The possibilities are endless! +It is also recommended that all extensions have a `package.json` that specifies JavaScript package metadata such as name, version, type, etc. Since plugins are just JavaScript packages, they can do anything a JavaScript package can normally do. It can be written in TypeScript, and compiled to JavaScript. It can export an executable (using the [bin](https:/docs.npmjs.com/cli/configuring-npm/package-json#bin) property). It can be published to npm. The possibilities are endless! The key to a plugin is the [`handleApplication()`](#function-handleapplicationscope-scope-void--promisevoid) method. It must be exported by the `pluginModule`, and cannot coexist with any of the other extension methods such as `start`, `handleFile`, etc. The component loader will throw an error if both are defined. @@ -41,7 +45,7 @@ The `urlPath` option is a base URL path that is prepended to the resolved `files - It **cannot** contain `..`. - If it starts with `./` or is just `.`, the name of the plugin will be automatically prepended to it. -Putting this all together, to configure the [static](./built-in-extensions.md#static) built-in extension to serve files from the `web` directory but at the `/static/` path, the `config.yaml` would look like this: +Putting this all together, to configure the [static](./built-in-extensions#static) built-in extension to serve files from the `web` directory but at the `/static/` path, the `config.yaml` would look like this: ```yaml static: @@ -51,7 +55,7 @@ static: Keep in mind the `urlPath` option is completely optional. -As another example, to configure the [graphqlSchema](./built-in-extensions.md#graphqlschema) built-in extension to serve only `*.graphql` files from within the top-level of the `src/schema` directory, the `config.yaml` would look like this: +As another example, to configure the [graphqlSchema](./built-in-extensions#graphqlschema) built-in extension to serve only `*.graphql` files from within the top-level of the `src/schema` directory, the `config.yaml` would look like this: ```yaml graphqlSchema: @@ -69,7 +73,7 @@ static: ignore: 'web/images/**' ``` -> If you're transitioning from the [extension](./extensions.md) system, the `files` option object no longer supports an `only` field. Instead, use the `entryEvent.entryType` or the specific `entryEvent.eventType` fields in [`onEntryEventHandler(entryEvent)`](#function-onentryeventhandlerentryevent-fileentryevent--directoryentryevent-void) method or any of the specific [`EntryHandler`](#class-entryhandler) events. +> If you're transitioning from the [extension](./extensions) system, the `files` option object no longer supports an `only` field. Instead, use the `entryEvent.entryType` or the specific `entryEvent.eventType` fields in [`onEntryEventHandler(entryEvent)`](#function-onentryeventhandlerentryevent-fileentryevent--directoryentryevent-void) method or any of the specific [`EntryHandler`](#class-entryhandler) events. ### Timeouts @@ -80,7 +84,7 @@ The plugin module can export a `defaultTimeout` variable (in milliseconds) that For example: ```typescript -export const defaultTimeout = 60_000; // 60 seconds +export const defaultTimeout = 60_000; / 60 seconds ``` Additionally, users can specify a `timeout` option in their application's `config.yaml` file for a specific plugin. This option takes precedence over the plugin's `defaultTimeout` and the system default. @@ -98,7 +102,7 @@ customPlugin: This is a functional example of how the `handleApplication()` method and `scope` argument can be used to create a simple static file server plugin. This example assumes that the component has a `config.yaml` with the `files` option set to a glob pattern that matches the files to be served. -> This is a simplified form of the [static](./built-in-extensions.md#static) built-in extension. +> This is a simplified form of the [static](./built-in-extensions#static) built-in extension. ```js export function handleApplication(scope) { @@ -106,7 +110,7 @@ export function handleApplication(scope) { scope.options.on('change', (key, value, config) => { if (key[0] === 'files' || key[0] === 'urlPath') { - // If the files or urlPath options change, we need to reinitialize the static files map + / If the files or urlPath options change, we need to reinitialize the static files map staticFiles.clear(); logger.info(`Static files reinitialized due to change in ${key.join('.')}`); } @@ -121,11 +125,11 @@ export function handleApplication(scope) { switch (entry.eventType) { case 'add': case 'change': - // Store / Update the file contents in memory for serving + / Store / Update the file contents in memory for serving staticFiles.set(entry.urlPath, entry.contents); break; case 'unlink': - // Remove the file from memory when it is deleted + / Remove the file from memory when it is deleted staticFiles.delete(entry.urlPath); break; } @@ -135,7 +139,7 @@ export function handleApplication(scope) { (req, next) => { if (req.method !== 'GET') return next(req); - // Attempt to retrieve the requested static file from memory + / Attempt to retrieve the requested static file from memory const staticFile = staticFiles.get(req.pathname); return staticFile @@ -169,7 +173,7 @@ This is the only method a plugin module must export. It can be async and is awai ## Class: `Scope` -- Extends [`EventEmitter`](https://nodejs.org/docs/latest/api/events.html#class-eventemitter) +- Extends [`EventEmitter`](https:/nodejs.org/docs/latest/api/events.html#class-eventemitter) ### Event: `'close'` @@ -209,15 +213,15 @@ For example: ```js export function handleApplication(scope) { - // Get the default EntryHandler instance + / Get the default EntryHandler instance const defaultEntryHandler = scope.handleEntry(); - // Assign a handler for the 'all' event on the default EntryHandler + / Assign a handler for the 'all' event on the default EntryHandler scope.handleEntry((entry) => { /* ... */ }); - // Create a new EntryHandler for the 'src/**/*.js' files option with a custom `'all'` event handler. + / Create a new EntryHandler for the 'src/**/*.js' files option with a custom `'all'` event handler. const customEntryHandler = scope.handleEntry( { files: 'src/**/*.js', @@ -227,7 +231,7 @@ export function handleApplication(scope) { } ); - // Create another custom EntryHandler for the 'src/**/*.ts' files option, but without a `'all'` event handler. + / Create another custom EntryHandler for the 'src/**/*.ts' files option, but without a `'all'` event handler. const anotherCustomEntryHandler = scope.handleEntry({ files: 'src/**/*.ts', }); @@ -253,11 +257,11 @@ This method is called automatically by the `scope` instance if the user has not ### `scope.resources` -Returns: `Map` - A map of the currently loaded [Resource](../globals.md#resource) instances. +Returns: `Map` - A map of the currently loaded [Resource](../globals#resource) instances. ### `scope.server` -Returns: `server` - A reference to the [server](../globals.md#server) global API. +Returns: `server` - A reference to the [server](../globals#server) global API. ### `scope.options` @@ -276,7 +280,7 @@ And has the following `handleApplication(scope)` implementation: export function handleApplication(scope) { scope.options.on('change', (key, value, config) => { if (key[0] === 'files') { - // Handle the change in the files option + / Handle the change in the files option scope.logger.info(`Files option changed to: ${value}`); } }); @@ -291,7 +295,7 @@ Files option changed to: bar.js ### `scope.logger` -Returns: `logger` - A scoped instance of the [`logger`](../globals.md#logger) class that provides logging capabilities for the plugin. +Returns: `logger` - A scoped instance of the [`logger`](../globals#logger) class that provides logging capabilities for the plugin. It is recommended to use this instead of the `logger` global. @@ -319,7 +323,7 @@ Returns: `string` - The directory of the application. This is the root directory ## Class: `OptionsWatcher` -- Extends [`EventEmitter`](https://nodejs.org/docs/latest/api/events.html#class-eventemitter) +- Extends [`EventEmitter`](https:/nodejs.org/docs/latest/api/events.html#class-eventemitter) ### Event: `'change'` @@ -344,9 +348,9 @@ For example, if the `files` option for `customPlugin` is changed to `web/**/*.js ```js scope.options.on('change', (key, value, config) => { - key; // ['files'] - value; // 'web/**/*.js' - config; // { files: 'web/**/*.js' } + key; / ['files'] + value; / 'web/**/*.js' + config; / { files: 'web/**/*.js' } }); ``` @@ -410,7 +414,7 @@ Any valid configuration value type. Essentially, the primitive types, an array o ## Class: `EntryHandler` -Extends: [`EventEmitter`](https://nodejs.org/docs/latest/api/events.html#class-eventemitter) +Extends: [`EventEmitter`](https:/nodejs.org/docs/latest/api/events.html#class-eventemitter) Created by calling [`scope.handleEntry()`](#scopehandleentry) method. @@ -427,19 +431,19 @@ async function handleApplication(scope) { scope.handleEntry((entry) => { switch (entry.eventType) { case 'add': - // Handle file addition + / Handle file addition break; case 'change': - // Handle file change + / Handle file change break; case 'unlink': - // Handle file deletion + / Handle file deletion break; case 'addDir': - // Handle directory addition + / Handle directory addition break; case 'unlinkDir': - // Handle directory deletion + / Handle directory deletion break; } }); @@ -518,7 +522,7 @@ This method returns a promise associated with the ready event of the updated han ### Interface: `BaseEntry` -- **stats** - [`fs.Stats`](https://nodejs.org/docs/latest/api/fs.html#class-fsstats) | `undefined` - The file system stats for the entry. +- **stats** - [`fs.Stats`](https:/nodejs.org/docs/latest/api/fs.html#class-fsstats) | `undefined` - The file system stats for the entry. - **urlPath** - `string` - The recommended URL path of the entry. - **absolutePath** - `string` - The absolute path of the entry. diff --git a/docs/technical-details/reference/content-types.md b/docs/technical-details/reference/content-types.md index bc6ae084..d7567f7f 100644 --- a/docs/technical-details/reference/content-types.md +++ b/docs/technical-details/reference/content-types.md @@ -1,3 +1,7 @@ +--- +title: Content Types +--- + # Content Types Harper supports several different content types (or MIME types) for both HTTP request bodies (describing operations) as well as for serializing content into HTTP response bodies. Harper follows HTTP standards for specifying both request body content types and acceptable response body content types. Any of these content types can be used with any of the standard Harper operations. @@ -22,4 +26,4 @@ MessagePack is another efficient binary format like CBOR, with support for all H Comma-separated values is an easy to use and understand format that can be readily imported into spreadsheets or used for data processing. CSV lacks hierarchical structure for most data types, and shouldn't be used for frequent/production use, but when you need it, it is available. -In addition, with the REST interface, you can use file-style extensions to indicate an encoding like http://host/path.csv to indicate CSV encoding. See the [REST documentation](../../developers/rest.md) for more information on how to do this. +In addition, with the REST interface, you can use file-style extensions to indicate an encoding like http:/host/path.csv to indicate CSV encoding. See the [REST documentation](../../developers/rest) for more information on how to do this. diff --git a/docs/technical-details/reference/data-types.md b/docs/technical-details/reference/data-types.md index 10c2c59d..39f54bd6 100644 --- a/docs/technical-details/reference/data-types.md +++ b/docs/technical-details/reference/data-types.md @@ -1,6 +1,10 @@ +--- +title: Data Types +--- + # Data Types -Harper supports a rich set of data types for use in records in databases. Various data types can be used from both direct JavaScript interfaces in Custom Functions and the HTTP operations APIs. Using JSON for communication naturally limits the data types to those available in JSON (Harper’s supports all of JSON data types), but JavaScript code and alternate data formats facilitate the use of additional data types. Harper supports [MessagePack and CBOR](content-types.md), which allows for all of Harper supported data types. [Schema definitions can specify the expected types for fields, with GraphQL Schema Types](../../developers/applications/defining-schemas.md), which are used for validation of incoming typed data (JSON, MessagePack), and is used for auto-conversion of untyped data (CSV, [query parameters](../../developers/rest.md)). Available data types include: +Harper supports a rich set of data types for use in records in databases. Various data types can be used from both direct JavaScript interfaces in Custom Functions and the HTTP operations APIs. Using JSON for communication naturally limits the data types to those available in JSON (Harper’s supports all of JSON data types), but JavaScript code and alternate data formats facilitate the use of additional data types. Harper supports MessagePack and CBOR, which allows for all of Harper supported data types. [Schema definitions can specify the expected types for fields, with GraphQL Schema Types](../../developers/applications/defining-schemas), which are used for validation of incoming typed data (JSON, MessagePack), and is used for auto-conversion of untyped data (CSV, [query parameters](../../developers/rest)). Available data types include: (Note that these labels are descriptive, they do not necessarily correspond to the GraphQL schema type names, but the schema type names are noted where possible) @@ -16,7 +20,7 @@ Strings, or text, are a sequence of any unicode characters and are internally en Numbers can be stored as signed integers up to a 1000 bits of precision (about 300 digits) or floating point with 64-bit floating point precision, and numbers are automatically stored using the most optimal type. With JSON, numbers are automatically parsed and stored in the most appropriate format. Custom components and applications may use BigInt numbers to store/access integers that are larger than 53-bit. The following GraphQL schema type name are supported: -- `Float` - Any number that can be represented with [64-bit double precision floating point number](https://en.wikipedia.org/wiki/Double-precision_floating-point_format) ("double") +- `Float` - Any number that can be represented with [64-bit double precision floating point number](https:/en.wikipedia.org/wiki/Double-precision_floating-point_format) ("double") - `Int` - Any integer between from -2147483648 to 2147483647 - `Long` - Any integer between from -9007199254740992 to 9007199254740992 - `BigInt` - Any integer (negative or positive) with less than 300 digits @@ -49,7 +53,7 @@ JSON doesn’t have any support for encoding binary data, but MessagePack and CB ### Blobs -Binary data can also be stored with [`Blob`s](blob.md), which can scale much better for larger content than `Bytes`, as it is designed to be streamed and does not need to be held entirely in memory. It is recommended that `Blob`s are used for content larger than 20KB. +Binary data can also be stored with [`Blob`s](blob), which can scale much better for larger content than `Bytes`, as it is designed to be streamed and does not need to be held entirely in memory. It is recommended that `Blob`s are used for content larger than 20KB. ## Explicit Map/Set diff --git a/docs/technical-details/reference/dynamic-schema.md b/docs/technical-details/reference/dynamic-schema.md index 32a7faf2..740320a1 100644 --- a/docs/technical-details/reference/dynamic-schema.md +++ b/docs/technical-details/reference/dynamic-schema.md @@ -1,3 +1,7 @@ +--- +title: Dynamic Schema +--- + # Dynamic Schema When tables are created without any schema, through the operations API (without specifying attributes) or studio, the tables follow "dynamic-schema" behavior. Generally it is best-practice to define schemas for your tables to ensure predictable, consistent structures with data integrity and precise control over indexing, without dependency on data itself. However, it can often be simpler and quicker to simply create a table and let the data auto-generate the schema dynamically with everything being auto-indexed for broad querying. @@ -17,7 +21,7 @@ Harper tables group records together with a common data pattern. To create a tab ## Primary Key -The primary key (also referred to as the `hash_attribute`) is used to uniquely identify records. Uniqueness is enforced on the primary; inserts with the same primary key will be rejected. If a primary key is not provided on insert, a GUID will be automatically generated and returned to the user. The [Harper Storage Algorithm](storage-algorithm.md) utilizes this value for indexing. +The primary key (also referred to as the `hash_attribute`) is used to uniquely identify records. Uniqueness is enforced on the primary; inserts with the same primary key will be rejected. If a primary key is not provided on insert, a GUID will be automatically generated and returned to the user. The [Harper Storage Algorithm](storage-algorithm) utilizes this value for indexing. **Standard Attributes** @@ -27,12 +31,12 @@ With tables that are using dynamic schemas, additional attributes are reflexivel Harper automatically creates two audit attributes used on each record if the table is created without a schema. -- `__createdtime__`: The time the record was created in [Unix Epoch with milliseconds](https://www.epochconverter.com/) format. -- `__updatedtime__`: The time the record was updated in [Unix Epoch with milliseconds](https://www.epochconverter.com/) format. +- `__createdtime__`: The time the record was created in [Unix Epoch with milliseconds](https:/www.epochconverter.com/) format. +- `__updatedtime__`: The time the record was updated in [Unix Epoch with milliseconds](https:/www.epochconverter.com/) format. ### Dynamic Schema Example -To better understand the behavior let’s take a look at an example. This example utilizes [Harper API operations](../../developers/operations-api/databases-and-tables.md). +To better understand the behavior let’s take a look at an example. This example utilizes [Harper API operations](../../developers/operations-api/databases-and-tables). **Create a Database** @@ -60,7 +64,7 @@ At this point the table does not have structure beyond what we provided, so the **dev.dog** -![](../../../images/reference/dynamic_schema_2_create_table.png.webp) +![](/reference/dynamic_schema_2_create_table.png.webp) **Insert Record** @@ -81,7 +85,7 @@ With a single record inserted and new attributes defined, our table now looks li **dev.dog** -![](../../../images/reference/dynamic_schema_3_insert_record.png.webp) +![](/reference/dynamic_schema_3_insert_record.png.webp) Indexes have been automatically created for `dog_name` and `owner_name` attributes. @@ -105,7 +109,7 @@ In this case, there is no change to the schema. Our table now looks like this: **dev.dog** -![](../../../images/reference/dynamic_schema_4_insert_additional_record.png.webp) +![](/reference/dynamic_schema_4_insert_additional_record.png.webp) **Update Existing Record** @@ -126,7 +130,7 @@ Now we have a new attribute called `weight_lbs`. Our table now looks like this: **dev.dog** -![](../../../images/reference/dynamic_schema_5_update_existing_record.png.webp) +![](/reference/dynamic_schema_5_update_existing_record.png.webp) **Query Table with SQL** @@ -141,4 +145,4 @@ Now if we query for all records where `weight_lbs` is `null` we expect to get ba This results in the expected two records being returned. -![](../../../images/reference/dynamic_schema_6_query_table_with_sql.png.webp) +![](/reference/dynamic_schema_6_query_table_with_sql.png.webp) diff --git a/docs/technical-details/reference/globals.md b/docs/technical-details/reference/globals.md index 7910dc4a..8e3a64f7 100644 --- a/docs/technical-details/reference/globals.md +++ b/docs/technical-details/reference/globals.md @@ -1,3 +1,7 @@ +--- +title: Globals +--- + # Globals The primary way that JavaScript code can interact with Harper is through the global variables, which has several objects and classes that provide access to the tables, server hooks, and resources that Harper provides for building applications. As global variables, these can be directly accessed in any module. @@ -34,7 +38,7 @@ async function getRecord() { } ``` -It is recommended that you [define a database](../../developers/applications/defining-schemas.md) for all the tables that are required to exist in your application. This will ensure that the tables exist on the `tables` object. Also note that the property names follow a CamelCase convention for use in JavaScript and in the GraphQL Schemas, but these are translated to snake_case for the actual table names, and converted back to CamelCase when added to the `tables` object. +It is recommended that you [define a database](../../developers/applications/defining-schemas) for all the tables that are required to exist in your application. This will ensure that the tables exist on the `tables` object. Also note that the property names follow a CamelCase convention for use in JavaScript and in the GraphQL Schemas, but these are translated to snake_case for the actual table names, and converted back to CamelCase when added to the `tables` object. ## `databases` @@ -47,7 +51,7 @@ const { Dog } = databases.dev; ## `Resource` -This is the base class for all resources, including tables and external data sources. This is provided so that you can extend it to implement custom data source providers. See the [Resource API documentation](resources/README.md) for more details about implementing a Resource class. +This is the base class for all resources, including tables and external data sources. This is provided so that you can extend it to implement custom data source providers. See the [Resource API documentation](resources/) for more details about implementing a Resource class. ## `auth(username, password?): Promise` @@ -55,7 +59,7 @@ This returns the user object with permissions/authorization information based on ## `logger` -This provides methods `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify` for logging. See the [logging documentation](../../administration/logging/logging.md) for more information. +This provides methods `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify` for logging. See the [logging documentation](../../administration/logging/standard-logging) for more information. ## `server` @@ -77,7 +81,7 @@ server.http( return request.url === '/graphql' ? handleGraphQLRequest(request) : next(request); }, { - runFirst: true, // run this handler first + runFirst: true, / run this handler first } ); ``` @@ -90,18 +94,18 @@ The HTTP request listener to be added to the middleware chain. To continue chain ### `Request` and `Response` -The `Request` and `Response` classes are based on the WHATWG APIs for the [`Request`](https://developer.mozilla.org/en-US/docs/Web/API/Request) and [`Response`](https://developer.mozilla.org/en-US/docs/Web/API/Response) classes. Requests and responses are based on these standard-based APIs to facilitate reuse with modern web code. While Node.js' HTTP APIs are powerful low-level APIs, the `Request`/`Response` APIs provide excellent composability characteristics, well suited for layered middleware and for clean mapping to [RESTful method handlers](./resources/README.md) with promise-based responses, as well as interoperability with other standards-based APIs like [streams](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream) used with [`Blob`s](https://developer.mozilla.org/en-US/docs/Web/API/Blob). However, the Harper implementation of these classes is not a direct implementation of the WHATWG APIs, but implements additional/distinct properties for the the Harper server environment: +The `Request` and `Response` classes are based on the WHATWG APIs for the [`Request`](https:/developer.mozilla.org/en-US/docs/Web/API/Request) and [`Response`](https:/developer.mozilla.org/en-US/docs/Web/API/Response) classes. Requests and responses are based on these standard-based APIs to facilitate reuse with modern web code. While Node.js' HTTP APIs are powerful low-level APIs, the `Request`/`Response` APIs provide excellent composability characteristics, well suited for layered middleware and for clean mapping to [RESTful method handlers](./resources/) with promise-based responses, as well as interoperability with other standards-based APIs like [streams](https:/developer.mozilla.org/en-US/docs/Web/API/ReadableStream) used with [`Blob`s](https:/developer.mozilla.org/en-US/docs/Web/API/Blob). However, the Harper implementation of these classes is not a direct implementation of the WHATWG APIs, but implements additional/distinct properties for the the Harper server environment: #### `Request` A `Request` object is passed to the direct static REST handlers, and preserved as the context for instance methods, and has the following properties: -- `url` - This is the request target, which is the portion of the URL that was received by the server. If a client sends a request to `http://example.com:8080/path?query=string`, the actual received request is `GET /path?query=string` and the `url` property will be `/path?query=string`. +- `url` - This is the request target, which is the portion of the URL that was received by the server. If a client sends a request to `http:/example.com:8080/path?query=string`, the actual received request is `GET /path?query=string` and the `url` property will be `/path?query=string`. - `method` - This is the HTTP method of the request. This is a string like `GET`, `POST`, `PUT`, `DELETE`, etc. -- `headers` - This is a [`Headers`](https://developer.mozilla.org/en-US/docs/Web/API/Headers) object that contains the headers of the request. +- `headers` - This is a [`Headers`](https:/developer.mozilla.org/en-US/docs/Web/API/Headers) object that contains the headers of the request. - `pathname` - This is the path portion of the URL, without the query string. For example, if the URL is `/path?query=string`, the `pathname` will be `/path`. - `protocol` - This is the protocol of the request, like `http` or `https`. -- `data` - This is the deserialized body of the request (based on the type of data specified by [`Content-Type`](./content-types.md) header). +- `data` - This is the deserialized body of the request (based on the type of data specified by `Content-Type` header). - `ip` - This is the remote IP address of the client that made the request (or the remote IP address of the last proxy to connect to Harper). - `host` - This is the host of the request, like `example.com`. - `sendEarlyHints(link: string, headers?: object): void` - This method sends an early hints response to the client, prior to actually returning a response. This is useful for sending a link header to the client to indicate that another resource should be preloaded. The `headers` argument can be used to send additional headers with the early hints response, in addition to the `link`. This is generally most helpful in a cache resolution function, where you can send hints _if_ the data is not in the cache and is resolving from an origin: @@ -109,7 +113,7 @@ A `Request` object is passed to the direct static REST handlers, and preserved a ```javascript class Origin { async get(request) { - // if we are fetching data from origin, send early hints + / if we are fetching data from origin, send early hints this.getContext().requestContext.sendEarlyHints(''); let response = await fetch(request); ... @@ -120,17 +124,17 @@ Cache.sourcedFrom(Origin); - `login(username, password): Promise` - This method can be called to start an authenticated session. The login will authenticate the user by username and password. If the authentication was successful, a session will be created and a cookie will be set on the response header that references the session. All subsequent requests from the client that sends the cookie in requests will be authenticated as the user that logged in and the session record will be attached to the request. This method returns a promise that resolves when the login is successful, and rejects if the login is unsuccessful. - `session` - This is the session object that is associated with current cookie-maintained session. This object is used to store session data for the current session. This is `Table` record instance, and can be updated by calling `request.session.update({ key: value })` or session can be retrieved with `request.session.get()`. If the cookie has not been set yet, a cookie will be set the first time a session is updated or a login occurs. -- `_nodeRequest` - This is the underlying Node.js [`http.IncomingMessage`](https://nodejs.org/api/http.html#http_class_http_incomingmessage) object. This can be used to access the raw request data, such as the raw headers, raw body, etc. However, this is discouraged and should be used with caution since it will likely break any other server handlers that depends on the layered `Request` call with `Response` return pattern. -- `_nodeResponse` - This is the underlying Node.js [`http.ServerResponse`](https://nodejs.org/api/http.html#http_class_http_serverresponse) object. This can be used to access the raw response data, such as the raw headers. Again, this is discouraged and can cause problems for middleware, should only be used if you are certain that other server handlers will not attempt to return a different `Response` object. +- `_nodeRequest` - This is the underlying Node.js [`http.IncomingMessage`](https:/nodejs.org/api/http.html#http_class_http_incomingmessage) object. This can be used to access the raw request data, such as the raw headers, raw body, etc. However, this is discouraged and should be used with caution since it will likely break any other server handlers that depends on the layered `Request` call with `Response` return pattern. +- `_nodeResponse` - This is the underlying Node.js [`http.ServerResponse`](https:/nodejs.org/api/http.html#http_class_http_serverresponse) object. This can be used to access the raw response data, such as the raw headers. Again, this is discouraged and can cause problems for middleware, should only be used if you are certain that other server handlers will not attempt to return a different `Response` object. #### `Response` REST methods can directly return data that is serialized and returned to users, or it can return a `Response` object (or a promise to a `Response`), or it can return a `Response`-like object with the following properties (or again, a promise to it): - `status` - This is the HTTP status code of the response. This is a number like `200`, `404`, `500`, etc. -- `headers` - This is a [`Headers`](https://developer.mozilla.org/en-US/docs/Web/API/Headers) object that contains the headers of the response. -- `data` - This is the data to be returned of the response. This will be serialized using Harper's [content negotiation](./content-types.md). -- `body` - Alternately (to `data`), the raw body can be returned as a `Buffer`, string, stream (Node.js or [`ReadableStream`](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream)), or a [`Blob`](https://developer.mozilla.org/en-US/docs/Web/API/Blob). +- `headers` - This is a [`Headers`](https:/developer.mozilla.org/en-US/docs/Web/API/Headers) object that contains the headers of the response. +- `data` - This is the data to be returned of the response. This will be serialized using Harper's content negotiation. +- `body` - Alternately (to `data`), the raw body can be returned as a `Buffer`, string, stream (Node.js or [`ReadableStream`](https:/developer.mozilla.org/en-US/docs/Web/API/ReadableStream)), or a [`Blob`](https:/developer.mozilla.org/en-US/docs/Web/API/Blob). #### `HttpOptions` @@ -144,7 +148,7 @@ Properties: #### `HttpServer` -Node.js [`http.Server`](https://nodejs.org/api/http.html#class-httpserver) or [`https.SecureServer`](https://nodejs.org/api/https.html#class-httpsserver) instance. +Node.js [`http.Server`](https:/nodejs.org/api/http.html#class-httpserver) or [`https.SecureServer`](https:/nodejs.org/api/https.html#class-httpsserver) instance. ### `server.socket(listener: ConnectionListener, options: SocketOptions): SocketServer` @@ -154,20 +158,20 @@ Only one socket server will be created. A `securePort` takes precedence. #### `ConnectionListener` -Node.js socket server connection listener as documented in [`net.createServer`](https://nodejs.org/api/net.html#netcreateserveroptions-connectionlistener) or [`tls.createServer`](https://nodejs.org/api/tls.html#tlscreateserveroptions-secureconnectionlistener) +Node.js socket server connection listener as documented in [`net.createServer`](https:/nodejs.org/api/net.html#netcreateserveroptions-connectionlistener) or [`tls.createServer`](https:/nodejs.org/api/tls.html#tlscreateserveroptions-secureconnectionlistener) #### `SocketOptions` -- `port` - _optional_ - `number` - Specify the port for the [`net.Server`](https://nodejs.org/api/net.html#class-netserver) instance. -- `securePort` - _optional_ - `number` - Specify the port for the [`tls.Server`](https://nodejs.org/api/tls.html#class-tlsserver) instance. +- `port` - _optional_ - `number` - Specify the port for the [`net.Server`](https:/nodejs.org/api/net.html#class-netserver) instance. +- `securePort` - _optional_ - `number` - Specify the port for the [`tls.Server`](https:/nodejs.org/api/tls.html#class-tlsserver) instance. #### `SocketServer` -Node.js [`net.Server`](https://nodejs.org/api/net.html#class-netserver) or [`tls.Server`](https://nodejs.org/api/tls.html#class-tlsserver) instance. +Node.js [`net.Server`](https:/nodejs.org/api/net.html#class-netserver) or [`tls.Server`](https:/nodejs.org/api/tls.html#class-tlsserver) instance. ### `server.ws(listener: WsListener, options: WsOptions): HttpServer[]` -Add a listener to the WebSocket connection listener middleware chain. The WebSocket server is associated with the HTTP server specified by the `options.port` or `options.securePort`. Use the [`server.upgrade()`](globals.md#serverupgradelistener-upgradelistener-options-upgradeoptions-void) method to add a listener to the upgrade middleware chain. +Add a listener to the WebSocket connection listener middleware chain. The WebSocket server is associated with the HTTP server specified by the `options.port` or `options.securePort`. Use the [`server.upgrade()`](globals#serverupgradelistener-upgradelistener-options-upgradeoptions-void) method to add a listener to the upgrade middleware chain. Example: @@ -191,8 +195,8 @@ Type: `(ws: WebSocket, request: Request, chainCompletion: ChainCompletion, next: The WebSocket connection listener. -- The `ws` argument is the [WebSocket](https://github.com/websockets/ws/blob/master/doc/ws.md#class-websocket) instance as defined by the `ws` module. -- The `request` argument is Harper's transformation of the `IncomingMessage` argument of the standard ['connection'](https://github.com/websockets/ws/blob/master/doc/ws.md#event-connection) listener event for a WebSocket server. +- The `ws` argument is the [WebSocket](https:/github.com/websockets/ws/blob/master/doc/ws.md#class-websocket) instance as defined by the `ws` module. +- The `request` argument is Harper's transformation of the `IncomingMessage` argument of the standard ['connection'](https:/github.com/websockets/ws/blob/master/doc/ws.md#event-connection) listener event for a WebSocket server. - The `chainCompletion` argument is a `Promise` of the associated HTTP server's request chain. Awaiting this promise enables the user to ensure the HTTP request has finished being processed before operating on the WebSocket. - The `next` argument is similar to that of other `next` arguments in Harper's server middlewares. To continue execution of the WebSocket connection listener middleware chain, pass all of the other arguments to this one such as: `next(ws, request, chainCompletion)` @@ -209,13 +213,13 @@ Properties: ### `server.upgrade(listener: UpgradeListener, options: UpgradeOptions): void` -Add a listener to the HTTP Server [upgrade](https://nodejs.org/api/http.html#event-upgrade_1) event. If a WebSocket connection listener is added using [`server.ws()`](globals.md#serverwslistener-wslistener-options-wsoptions-httpserver), a default upgrade handler will be added as well. The default upgrade handler will add a `__harperdb_request_upgraded` boolean to the `request` argument to signal the connection has already been upgraded. It will also check for this boolean _before_ upgrading and if it is `true`, it will pass the arguments along to the `next` listener. +Add a listener to the HTTP Server [upgrade](https:/nodejs.org/api/http.html#event-upgrade_1) event. If a WebSocket connection listener is added using [`server.ws()`](globals#serverwslistener-wslistener-options-wsoptions-httpserver), a default upgrade handler will be added as well. The default upgrade handler will add a `__harperdb_request_upgraded` boolean to the `request` argument to signal the connection has already been upgraded. It will also check for this boolean _before_ upgrading and if it is `true`, it will pass the arguments along to the `next` listener. This method should be used to delegate HTTP upgrade events to an external WebSocket server instance. Example: -> This example is from the Harper Next.js component. See the complete source code [here](https://github.com/HarperDB/nextjs/blob/main/extension.js) +> This example is from the Harper Next.js component. See the complete source code [here](https:/github.com/HarperDB/nextjs/blob/main/extension.js) ```js server.upgrade( @@ -238,7 +242,7 @@ server.upgrade( Type: `(request, socket, head, next) => void` -The arguments are passed to the middleware chain from the HTTP server [`'upgrade'`](https://nodejs.org/api/http.html#event-upgrade_1) event. +The arguments are passed to the middleware chain from the HTTP server [`'upgrade'`](https:/nodejs.org/api/http.html#event-upgrade_1) event. #### `UpgradeOptions` @@ -252,11 +256,11 @@ Properties: ### `server.config` -This provides access to the Harper configuration object. This comes from the [harperdb-config.yaml](../../deployments/configuration.md) (parsed into object form). +This provides access to the Harper configuration object. This comes from the [harperdb-config.yaml](../../deployments/configuration) (parsed into object form). ### `server.recordAnalytics(value, metric, path?, method?, type?)` -This records the provided value as a metric into Harper's analytics. Harper efficiently records and tracks these metrics and makes them available through [analytics API](analytics.md). The values are aggregated and statistical information is computed when many operations are performed. The optional parameters can be used to group statistics. For the parameters, make sure you are not grouping on too fine of a level for useful aggregation. The parameters are: +This records the provided value as a metric into Harper's analytics. Harper efficiently records and tracks these metrics and makes them available through [analytics API](analytics). The values are aggregated and statistical information is computed when many operations are performed. The optional parameters can be used to group statistics. For the parameters, make sure you are not grouping on too fine of a level for useful aggregation. The parameters are: - `value` - This is a numeric value for the metric that is being recorded. This can be a value measuring time or bytes, for example. - `metric` - This is the name of the metric. @@ -266,7 +270,7 @@ This records the provided value as a metric into Harper's analytics. Harper effi ### `server.getUser(username): Promise` -This returns the user object with permissions/authorization information based on the provided username. This does not verify the password, so it is generally used for looking up users by username. If you want to verify a user by password, use [`server.authenticateUser`](globals.md#serverauthenticateuserusername-password-user). +This returns the user object with permissions/authorization information based on the provided username. This does not verify the password, so it is generally used for looking up users by username. If you want to verify a user by password, use [`server.authenticateUser`](globals#serverauthenticateuserusername-password-user). ### `server.authenticateUser(username, password): Promise` @@ -284,7 +288,7 @@ Register a resource with the server. For example: class NewResource extends Resource { } server.resources.set('NewResource', Resource); -// or limit usage: +/ or limit usage: server.resources.set('NewResource', Resource, { rest: true, mqtt: false, 'my-protocol': true }); ``` @@ -294,13 +298,13 @@ Find a resource that matches the path. For example: ``` server.resources.getMatch('/NewResource/some-id'); -// or specify the export/protocol type, to allow it to be limited: +/ or specify the export/protocol type, to allow it to be limited: server.resources.getMatch('/NewResource/some-id', 'my-protocol'); ``` ### `server.operation(operation: Object, context?: Object, authorize?: boolean)` -Execute an operation from the [Operations API](https://docs.harperdb.io/docs/developers/operations-api) +Execute an operation from the [Operations API](https:/docs.harperdb.io/developers/operations-api) Parameters: @@ -308,7 +312,7 @@ Parameters: - **context** - `Object` - `{ username: string}` - _optional_ - The specified user - **authorize** - `boolean` - _optional_ - Indicate the operation should authorize the user or not. Defaults to `false` -Returns a `Promise` with the operation's response as per the [Operations API documentation](https://docs.harperdb.io/docs/developers/operations-api). +Returns a `Promise` with the operation's response as per the [Operations API documentation](https:/docs.harperdb.io/developers/operations-api). ### `server.nodes` diff --git a/docs/technical-details/reference/graphql.md b/docs/technical-details/reference/graphql.md index 4a2e3042..edcc723b 100644 --- a/docs/technical-details/reference/graphql.md +++ b/docs/technical-details/reference/graphql.md @@ -1,12 +1,16 @@ +--- +title: GraphQL Querying +--- + # GraphQL Querying -Harper supports GraphQL in a variety of ways. It can be used for [defining schemas](../../developers/applications/defining-schemas.md), and for querying [Resources](./resources/README.md). +Harper supports GraphQL in a variety of ways. It can be used for [defining schemas](../../developers/applications/defining-schemas), and for querying [Resources](./resources/). Get started by setting `graphql: true` in `config.yaml`. This automatically enables a `/graphql` endpoint that can be used for GraphQL queries. -> Harper's GraphQL component is inspired by the [GraphQL Over HTTP](https://graphql.github.io/graphql-over-http/draft/#) specification; however, it does not fully implement neither that specification nor the [GraphQL](https://spec.graphql.org/) specification. +> Harper's GraphQL component is inspired by the [GraphQL Over HTTP](https:/graphql.github.io/graphql-over-http/draft/#) specification; however, it does not fully implement neither that specification nor the [GraphQL](https:/spec.graphql.org/) specification. Queries can either be `GET` or `POST` requests, and both follow essentially the same request format. `GET` requests must use search parameters, and `POST` requests use the request body. @@ -42,7 +46,7 @@ Accept: application/graphql-response+json > Tip: For the best user experience, include the `Accept: application/graphql-response+json` header in your request. This provides better status codes for errors. -The Harper GraphQL querying system is strictly limited to exported Harper Resources. For many users, this will typically be a table that uses the `@exported` directive in its schema. Queries can only specify Harper Resources and their attributes in the selection set. Queries can filter using [arguments](https://graphql.org/learn/queries/#arguments) on the top-level Resource field. Harper provides a short form pattern for simple queries, and a long form pattern based off of the [Resource Query API](./resources/README.md#query) for more complex queries. +The Harper GraphQL querying system is strictly limited to exported Harper Resources. For many users, this will typically be a table that uses the `@exported` directive in its schema. Queries can only specify Harper Resources and their attributes in the selection set. Queries can filter using [arguments](https:/graphql.org/learn/queries/#arguments) on the top-level Resource field. Harper provides a short form pattern for simple queries, and a long form pattern based off of the [Resource Query API](./resources/#query) for more complex queries. Unlike REST queries, GraphQL queries can specify multiple resources simultaneously: @@ -75,11 +79,11 @@ GET /Owner/?select(id,name,occupation) There are three request parameters for GraphQL queries: `query`, `operationName`, and `variables` 1. `query` - _Required_ - The string representation of the GraphQL document. - 1. Limited to [Executable Definitions](https://spec.graphql.org/October2021/#ExecutableDefinition) only. - 2. i.e. GraphQL [`query`](https://graphql.org/learn/queries/#fields) or `mutation` (coming soon) operations, and [fragments](https://graphql.org/learn/queries/#fragments). - 3. If an shorthand, unnamed, or singular named query is provided, they will be executed by default. Otherwise, if there are multiple queries, the `operationName` parameter must be used. -2. `operationName` - _Optional_ - The name of the query operation to execute if multiple queries are provided in the `query` parameter -3. `variables` - _Optional_ - A map of variable values to be used for the specified query + 1. Limited to [Executable Definitions](https:/spec.graphql.org/October2021/#executabledefinition) only. + 1. i.e. GraphQL [`query`](https:/graphql.org/learn/queries/#fields) or `mutation` (coming soon) operations, and [fragments](https:/graphql.org/learn/queries/#fragments). + 1. If an shorthand, unnamed, or singular named query is provided, they will be executed by default. Otherwise, if there are multiple queries, the `operationName` parameter must be used. +1. `operationName` - _Optional_ - The name of the query operation to execute if multiple queries are provided in the `query` parameter +1. `variables` - _Optional_ - A map of variable values to be used for the specified query ### Type Checking diff --git a/docs/technical-details/reference/headers.md b/docs/technical-details/reference/headers.md index 431ff477..5c85fc88 100644 --- a/docs/technical-details/reference/headers.md +++ b/docs/technical-details/reference/headers.md @@ -1,3 +1,7 @@ +--- +title: Harper Headers +--- + # Harper Headers All Harper API responses include headers that are important for interoperability and debugging purposes. The following headers are returned with all Harper API responses: diff --git a/docs/technical-details/reference/index.md b/docs/technical-details/reference/index.md new file mode 100644 index 00000000..a6f45f0a --- /dev/null +++ b/docs/technical-details/reference/index.md @@ -0,0 +1,29 @@ +--- +title: Reference +--- + +# Reference + +This section contains technical details and reference materials for Harper. + +- [Analytics](analytics) +- [Architecture](architecture) +- [Blob](blob) +- Content Types +- [Components](components/) + - [Applications](components/applications) + - [Built-In Extensions](components/built-in-extensions) + - [Configuration](components/configuration) + - [Extensions](components/extensions) + - [(Experimental) Plugins](components/plugins) +- [Data Types](data-types) +- [Dynamic Schema](dynamic-schema) +- [Globals](globals) +- [GraphQL](graphql) +- [Headers](headers) +- [Limits](limits) +- [Resources](resources/) + - [Migration](resources/migration) + - [Instance Binding](resources/instance-binding) +- [Storage Algorithm](storage-algorithm) +- [Transactions](transactions) diff --git a/docs/technical-details/reference/limits.md b/docs/technical-details/reference/limits.md index f34746e2..97214620 100644 --- a/docs/technical-details/reference/limits.md +++ b/docs/technical-details/reference/limits.md @@ -1,3 +1,7 @@ +--- +title: Harper Limits +--- + # Harper Limits This document outlines limitations of Harper. diff --git a/docs/technical-details/reference/resources/README.md b/docs/technical-details/reference/resources/README.md deleted file mode 100644 index 65e8b0a4..00000000 --- a/docs/technical-details/reference/resources/README.md +++ /dev/null @@ -1,740 +0,0 @@ -# Resource Class - -## Resource Class - -The Resource class is designed to provide a unified API for modeling different data resources within Harper. Database/table data can be accessed through the Resource API. The Resource class can be extended to create new data sources. Resources can be exported to define endpoints. Tables themselves extend the Resource class, and can be extended by users. - -Conceptually, a Resource class provides an interface for accessing, querying, modifying, and monitoring a set of entities or records. Instances of a Resource class can represent a single record or entity, or a collection of records, at a given point in time, that you can interact with through various methods or queries. Resource instances can represent an atomic transactional view of a resource and facilitate transactional interaction. A Resource instance holds the primary key/identifier, context information, and any pending updates to the record, so any instance methods can act on the record and have full access to this information during execution. Therefore, there are distinct resource instances created for every record or query that is accessed, and the instance methods are used for interaction with the data. - -Resource classes also have static methods, which are generally the preferred way to externally interact with tables and resources. The static methods handle parsing paths and query strings, starting a transaction as necessary, performing access authorization checks (if required), creating a resource instance, and calling the instance methods. This general rule for how to interact with resources: - -- If you want to _act upon_ a table or resource, querying or writing to it, then use the static methods to initially access or write data. For example, you could use `MyTable.get(34)` to access the record with a primary key of `34`. -- If you want to _define custom behavior_ for a table or resource (to control how a resource responds to queries/writes), then extend the class and override/define instance methods. - -The Resource API is heavily influenced by the REST/HTTP API, and the methods and properties of the Resource class are designed to map to and be used in a similar way to how you would interact with a RESTful API. - -The REST-based API is a little different from traditional Create-Read-Update-Delete (CRUD) APIs that were designed with single-server interactions in mind. Semantics that attempt to guarantee no existing record or overwrite-only behavior require locks that don't scale well in distributed database. Centralizing writes around `put` calls provides much more scalable, simple, and consistent behavior in a distributed eventually consistent database. You can generally think of CRUD operations mapping to REST operations like this: - -- Read - `get` -- Create with a known primary key - `put` -- Create with a generated primary key - `post`/`create` -- Update (Full) - `put` -- Update (Partial) - `patch` -- Delete - `delete` - -The RESTful HTTP server and other server interfaces will directly call resource methods of the same name to fulfill incoming requests so resources can be defined as endpoints for external interaction. When resources are used by the server interfaces, the static method will be executed (which starts a transaction and does access checks), which will then create the resource instance and call the corresponding instance method. Paths (URL, MQTT topics) are mapped to different resource instances. Using a path that specifies an ID like `/MyResource/3492` will be mapped an instance of MyResource, and will call the instance methods like `get(target)`, `put(target, data)`, and `post(target, data)`, where target is based on the `/3492` part of the path. - -It is recommended that you use the latest version (V2) of the Resource API with the legacy instance binding behavior disabled. This is done by setting the static `loadAsInstance` property to `false` on the Resource class. This will become the default behavior in Harper version 5.0. This page is written assuming `loadAsInstance` is set to `false`. If you want to use the legacy instance binding behavior, you can set `loadAsInstance` to `true` on the Resource class. If you have existing code that you want to migrate, please see the [migration guide](./resource-migration.md) for more information. - -You can create classes that extend `Resource` to define your own data sources, typically to interface with external data sources (the `Resource` base class is available as a global variable in the Harper JS environment). In doing this, you will generally be extending and providing implementations for the instance methods below. For example: - -```javascript -export class MyExternalData extends Resource { - static loadAsInstance = false; // enable the updated API - async get(target) { - // fetch data from an external source, using our id - let response = await this.fetch(target.id); - // do something with the response - } - put(target, data) { - // send the data into the external source - } - delete(target) { - // delete an entity in the external data source - } - subscribe(subscription) { - // if the external data source is capable of real-time notification of changes, can subscribe - } -} -// we can export this class from resources.json as our own endpoint, or use this as the source for -// a Harper data to store and cache the data coming from this data source: -tables.MyCache.sourcedFrom(MyExternalData); -``` - -You can also extend table classes in the same way, overriding the instance methods for custom functionality. The `tables` object is a global variable in the Harper JavaScript environment, along with `Resource`: - -```javascript -export class MyTable extends tables.MyTable { - static loadAsInstance = false; // enable the updated API - get(target) { - // we can add properties or change properties before returning data: - return { ...super.get(target), newProperty: 'newValue', existingProperty: 42 }; // returns the record, with additional properties - } - put(target, data) { - // can change data any way we want - super.put(target, data); - } - delete(target) { - super.delete(target); - } - post(target, data) { - // providing a post handler (for HTTP POST requests) is a common way to create additional - // actions that aren't well described with just PUT or DELETE - } -} -``` - -Make sure that if are extending and `export`ing your table with this class, that you remove the `@export` directive in your schema, so that you aren't exporting the same table/class name twice. - -All Resource methods that are called from HTTP methods may directly return data or may return a [`Response`](https://developer.mozilla.org/en-US/docs/Web/API/Response) object or an object with `headers` and a `status` (HTTP status code), to explicitly return specific headers and status code. - -## Global Variables - -### `tables` - -This is an object with all the tables in the default database (the default database is "data"). Each table that has been declared or created will be available as a (standard) property on this object, and the value will be the table class that can be used to interact with that table. The table classes implement the Resource API. - -### `databases` - -This is an object with all the databases that have been defined in Harper (in the running instance). Each database that has been declared or created will be available as a (standard) property on this object. The property values are an object with the tables in that database, where each property is a table, like the `tables` object. In fact, `databases.data === tables` should always be true. - -### `Resource` - -This is the Resource base class. This can be directly extended for custom resources, and is the base class for all tables. - -### `server` - -This object provides extension points for extension components that wish to implement new server functionality (new protocols, authentication, etc.). See the [extensions documentation for more information](../components/extensions.md). - -### `transaction` - -This provides a function for starting transactions. See the transactions section below for more information. - -### `contentTypes` - -This provides an interface for defining new content type handlers. See the [content type extensions documentation](../content-types.md) for more information. - -### TypeScript Support - -While these objects/methods are all available as global variables, it is easier to get TypeScript support (code assistance, type checking) for these interfaces by explicitly `import`ing them. This can be done by setting up a package link to the main Harper package in your app: - -``` -# you may need to go to your harper directory and set it up as a link first -npm link harperdb -``` - -And then you can import any of the main Harper APIs you will use, and your IDE should understand the full typings associated with them: - -``` -import { databases, tables, Resource } from 'harperdb'; -``` - -## Resource Class (Instance) Methods - -### Properties/attributes declared in schema - -Properties that have been defined in your table's schema can be accessed and modified as direct properties on the Resource instances. - -### `get(target: RequestTarget | Id)`: Promise|AsyncIterable - -This retrieves a record, or queries for records, and is called by HTTP GET requests. This can be called with a `RequestTarget` which can specify a path/id and query parameters as well as search parameters. For tables, this can also be called directly with an id (string or number) to retrieve a record by id. When defining Resource classes, you can define or override this method to define exactly what should be returned when retrieving a record. HTTP requests will always call `get` with a full `RequestTarget`. The default `get` method (`super.get(target)`) returns the current record as a plain object. - -The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. For example, with a request to `/my-resource/some-id?param1=value`, we can access URL/request information: - -```javascript -class extends Resource { - static loadAsInstance = false; - get(target) { - let param1 = target.get('param1'); // returns 'value' - let id = target.id; // returns 'some-id' - let path = target.pathname; // returns /some-id - let fullTarget = target.target; // returns /some-id?param1=value - ... - } -``` - -If `get` is called for a single record (for a request like `/Table/some-id`), the default action is to return the record identified by the path. If `get` is called on a collection (`/Table/?name=value`), the target will have the `isCollection` property set to `true` and default action is to `search` and return an AsyncIterable of results. - -### `search(query: RequestTarget)`: AsyncIterable - -This performs a query on this resource or table. By default, this is called by `get(query)` from a collection resource. When this is called for the root resource (like `/Table/`) it searches through all records in the table. You can define or override this method to define how records should be queried. The default `search` method on tables (`super.search(query)`) will perform a query and return an `AsyncIterable` of results. The `query` object can be used to specify the desired query. - -### `put(target: RequestTarget | Id, data: object): void|Response` - -This will assign the provided record or data to this resource, and is called for HTTP PUT requests. You can define or override this method to define how records should be updated. The default `put` method on tables (`super.put(target, data)`) writes the record to the table (updating or inserting depending on if the record previously existed) as part of the current transaction for the resource instance. - -The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. - -### `patch(target: RequestTarget | Id, data: object): void|Response` - -This will update the existing record with the provided data's properties, and is called for HTTP PATCH requests. You can define or override this method to define how records should be updated. The default `patch` method on tables (`super.patch(target, data)`) updates the record. The properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `data` object. This is performed as part of the current transaction for the resource instance. The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. - -### `update(target: RequestTarget, updates?: object): Updatable` - -This can be called to get an Updatable class for updating a record. An `Updatable` instance provides direct access to record properties as properties on `Updatable` instance. The properties can also be modified and any changes are tracked and written to the record when the transaction commits. For example, if we wanted to update the quantify of a product in the Product table, in response to a post, we could write: - -```javascript -class ... { - post(target, data) { - static loadAsInstance = false; - let updatable = this.update(target); - updatable.quantity = updatable.quantity - 1; - } -} -``` - -In addition, the `Updatable` class has the following methods. - -### `Updatable` class - -#### `addTo(property, value)` - -This adds the provided value to the specified property using conflict-free data type (CRDT) incrementation. This ensures that even if multiple calls are simultaneously made to increment a value, the resulting merge of data changes from different threads and nodes will properly sum all the added values. We could improve the example above to reliably ensure the quantity is decremented even when it occurs in multiple nodes simultaneously: - -```javascript -class ... { - static loadAsInstance = false; - post(target, data) { - let updatable = this.update(target); - updatable.addTo('quantity', -1); - } -} -``` - -#### `subtractFrom(property, value)` - -This functions exactly the same as `addTo`, except it subtracts the value. - -The `Updatable` also inherits the `getUpdatedTime` and `getExpiresAt` methods from the `RecordObject` class. - -### `delete(target: RequestTarget): void|Response` - -This will delete this record or resource identified by the target, and is called for HTTP DELETE requests. You can define or override this method to define how records should be deleted. The default `delete` method on tables (`super.delete(target)`) deletes the record identified by target from the table as part of the current transaction. The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. - -### `publish(target: RequestTarget, message): void|Response` - -This will publish a message to this resource, and is called for MQTT publish commands. You can define or override this method to define how messages should be published. The default `publish` method on tables (`super.publish(target, message)`) records the published message as part of the current transaction; this will not change the data in the record but will notify any subscribers to the record/topic. The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. - -### `post(target: RequestTarget, data: object): void|Response` - -This is called for HTTP POST requests. You can define this method to provide your own implementation of how POST requests should be handled. Generally `POST` provides a generic mechanism for various types of data updates, and is a good place to define custom functionality for updating records. The default behavior is to create a new record/resource. The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. - -### `invalidate(target: RequestTarget)` - -This method is available on tables. This will invalidate the specified record in the table. This can be used with a caching table and is used to indicate that the source data has changed, and the record needs to be reloaded when next accessed. - -### `subscribe(subscriptionRequest: SubscriptionRequest): Promise` - -This will subscribe to the current resource, and is called for MQTT subscribe commands. You can define or override this method to define how subscriptions should be handled. The default `subscribe` method on tables (`super.publish(message)`) will set up a listener that will be called for any changes or published messages to this resource. - -The returned (promise resolves to) Subscription object is an `AsyncIterable` that you can use a `for await` to iterate through. It also has a `queue` property which holds (an array of) any messages that are ready to be delivered immediately (if you have specified a start time, previous count, or there is a message for the current or "retained" record, these may be immediately returned). - -The `SubscriptionRequest` object supports the following properties (all optional): - -- `includeDescendants` - If this is enabled, this will create a subscription to all the record updates/messages that are prefixed with the id. For example, a subscription request of `{id:'sub', includeDescendants: true}` would return events for any update with an id/topic of the form sub/\* (like `sub/1`). -- `startTime` - This will begin the subscription at a past point in time, returning all updates/messages since the start time (a catch-up of historical messages). This can be used to resume a subscription, getting all messages since the last subscription. -- `previousCount` - This specifies the number of previous updates/messages to deliver. For example, `previousCount: 10` would return the last ten messages. Note that `previousCount` can not be used in conjunction with `startTime`. -- `omitCurrent` - Indicates that the current (or retained) record should _not_ be immediately sent as the first update in the subscription (if no `startTime` or `previousCount` was used). By default, the current record is sent as the first update. - -### `connect(target: RequestTarget, incomingMessages?: AsyncIterable): AsyncIterable` - -This is called when a connection is received through WebSockets or Server Sent Events (SSE) to this resource path. This is called with `incomingMessages` as an iterable stream of incoming messages when the connection is from WebSockets, and is called with no arguments when the connection is from a SSE connection. This can return an asynchronous iterable representing the stream of messages to be sent to the client. - -### `getUpdatedTime(): number` - -This returns the last updated time of the resource (timestamp of last commit). This is returned as milliseconds from epoch. - -### `wasLoadedFromSource(): boolean` - -Indicates if the record had been loaded from source. When using caching tables, this indicates that there was a cache miss and the data had to be loaded from the source (or waiting on an inflight request from the source to finish). - -### `getContext(): Context` - -Returns the context for this resource. The context contains information about the current transaction, the user that initiated this action, and other metadata that should be retained through the life of an action. - -#### `Context` - -The `Context` object has the following (potential) properties: - -- `user` - This is the user object, which includes information about the username, role, and authorizations. -- `transaction` - The current transaction If the current method was triggered by an HTTP request, the following properties are available: -- `lastModified` - This value is used to indicate the last modified or updated timestamp of any resource(s) that are accessed and will inform the response's `ETag` (or `Last-Modified`) header. This can be updated by application code if it knows that modification should cause this timestamp to be updated. - -When a resource gets a request through HTTP, the request object is the context, which has the following properties: - -- `url` - The local path/URL of the request (this will not include the protocol or host name, but will start at the path and includes the query string). -- `method` - The method of the HTTP request. -- `headers` - This is an object with the headers that were included in the HTTP request. You can access headers by calling `context.headers.get(headerName)`. -- `responseHeaders` - This is an object with the headers that will be included in the HTTP response. You can set headers by calling `context.responseHeaders.set(headerName, value)`. -- `pathname` - This provides the path part of the URL (no querystring). -- `host` - This provides the host name of the request (from the `Host` header). -- `ip` - This provides the ip address of the client that made the request. -- `body` - This is the request body as a raw NodeJS Readable stream, if there is a request body. -- `data` - If the HTTP request had a request body, this provides a promise to the deserialized data from the request body. (Note that for methods that normally have a request body like `POST` and `PUT`, the resolved deserialized data is passed in as the main argument, but accessing the data from the context provides access to this for requests that do not traditionally have a request body like `DELETE`). - -When a resource is accessed as a data source: - -- `requestContext` - For resources that are acting as a data source for another resource, this provides access to the context of the resource that is making a request for data from the data source resource. Note that it is generally not recommended to rely on this context. The resolved data may be used fulfilled many different requests, and relying on this first request context may not be representative of future requests. Also, source resolution may be triggered by various actions, not just specified endpoints (for example queries, operations, studio, etc.), so make sure you are not relying on specific request context information. - -### `operation(operationObject: Object, authorize?: boolean): Promise` - -This method is available on tables and will execute a Harper operation, using the current table as the target of the operation (the `table` and `database` do not need to be specified). See the [operations API](../../../developers/operations-api/README.md) for available operations that can be performed. You can set the second argument to `true` if you want the current user to be checked for authorization for the operation (if `true`, will throw an error if they are not authorized). - -### `allowStaleWhileRevalidate(entry: { version: number, localTime: number, expiresAt: number, value: object }, id): boolean` - -For caching tables, this can be defined to allow stale entries to be returned while revalidation is taking place, rather than waiting for revalidation. The `version` is the timestamp/version from the source, the `localTime` is when the resource was last refreshed, the `expiresAt` is when the resource expired and became stale, and the `value` is the last value (the stale value) of the record/resource. All times are in milliseconds since epoch. Returning `true` will allow the current stale value to be returned while revalidation takes place concurrently. Returning `false` will cause the response to wait for the data source or origin to revalidate or provide the latest value first, and then return the latest value. - -## Resource Static Methods and Properties - -The Resource class also has static methods that mirror the instance methods with an initial argument that is the id of the record to act on. The static methods are generally the preferred and most convenient method for interacting with tables outside of methods that are directly extending a table. Whereas instances methods are bound to a specific record, the static methods allow you to specify any record in the table to act on. - -The `get`, `put`, `delete`, `publish`, `subscribe`, and `connect` methods all have static equivalents. There is also a `static search()` method for specifically handling searching a table with query parameters. By default, the Resource static methods default to creating an instance bound to the record specified by the arguments, and calling the instance methods. Again, generally static methods are the preferred way to interact with resources and call them from application code. These methods are available on all user Resource classes and tables. - -### `get(target: RequestTarget|Id, context?: Resource|Context)` - -This will retrieve a resource instance by id. For example, if you want to retrieve comments by id in the retrieval of a blog post you could do: - -```javascript -const { MyTable, Comment } = tables; -... -// in class: - async get() { - for (let commentId of this.commentIds) { - let comment = await Comment.get(commentId, this); - // now you can do something with the comment record - } - } -``` - -Type definition for `Id`: - -```typescript -Id = string | number | array; -``` - -### `get(query: Query, context?: Resource|Context)` - -This can be used to retrieve a resource instance by a query. The query can be used to specify a single/unique record by an `id` property, and can be combined with a `select`: - -```javascript -MyTable.get({ id: 34, select: ['name', 'age'] }); -``` - -This method may also be used to retrieve a collection of records by a query. If the query is not for a specific record id, this will call the `search` method, described above. - -### `put(target: RequestTarget|Id, record: object, context?: Resource|Context): Promise` - -This will save the provided record or data to this resource. This will create a new record or fully replace an existing record if one exists with the same `id` (primary key). - -### `put(record: object, context?: Resource|Context): Promise` - -This will save the provided record or data to this resource. This will create a new record or fully replace an existing record if one exists with the same primary key provided in the record. If your table doesn't have a primary key attribute, you will need to use the method with the `id` argument. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. - -### `create(record: object, context?: Resource|Context): Promise` - -This will create a new record using the provided record for all fields (except primary key), generating a new primary key for the record. This does _not_ check for an existing record; the record argument should not have a primary key and should use the generated primary key. This will (asynchronously) return the new resource instance. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. - -### `post(target: RequestTarget|Id, data: object, context?: Resource|Context): Promise|any` - -This will save the provided data to this resource. By default, this will create a new record (by calling `create`). However, the `post` method is specifically intended to be available for custom behaviors, so extending a class to support custom `post` method behavior is encouraged. - -### `patch(target: RequestTarget|Id, recordUpdate: object, context?: Resource|Context): Promise|void` - -This will save the provided updates to the record. The `recordUpdate` object's properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `recordUpdate` object. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. - -### `delete(target: RequestTarget|Id, context?: Resource|Context): Promise|void` - -Deletes this resource's record or data. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. - -### `publish(target: RequestTarget|Id, message: object, context?: Resource|Context): Promise|void` - -Publishes the given message to the record entry specified by the id in the context. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. - -### `subscribe(subscriptionRequest?, context?: Resource|Context): Promise` - -Subscribes to a record/resource. See the description of the `subscriptionRequest` object above for more information on how to use this. - -### `search(query: RequestTarget, context?: Resource|Context): AsyncIterable` - -This will perform a query on this table or collection. The query parameter can be used to specify the desired query. - -### `setComputedAttribute(name: string, computeFunction: (record: object) => any)` - -This will define the function to use for a computed attribute. To use this, the attribute must be defined in the schema as a computed attribute. The `computeFunction` will be called with the record as an argument and should return the computed value for the attribute. For example: - -```javascript -MyTable.setComputedAttribute('computedAttribute', (record) => { - return record.attribute1 + record.attribute2; -}); -``` - -For a schema like: - -```graphql -type MyTable @table { - id: ID @primaryKey - attribute1: Int - attribute2: Int - computedAttribute: Int @computed -} -``` - -See the [schema documentation](../../developers/applications/defining-schemas.md) for more information on computed attributes. - -### `primaryKey` - -This property indicates the name of the primary key attribute for a table. You can get the primary key for a record using this property name. For example: - -```javascript -let record34 = await Table.get(34); -record34[Table.primaryKey] -> 34 -``` - -There are additional methods that are only available on table classes (which are a type of resource). - -### `Table.sourcedFrom(Resource, options)` - -This defines the source for a table. This allows a table to function as a cache for an external resource. When a table is configured to have a source, any request for a record that is not found in the table will be delegated to the source resource to retrieve (via `get`) and the result will be cached/stored in the table. All writes to the table will also first be delegated to the source (if the source defines write functions like `put`, `delete`, etc.). The `options` parameter can include an `expiration` property that will configure the table with a time-to-live expiration window for automatic deletion or invalidation of older entries. The `options` parameter (also) supports: - -- `expiration` - Default expiration time for records in seconds. -- `eviction` - Eviction time for records in seconds. -- `scanInterval` - Time period for scanning the table for records to evict. - -If the source resource implements subscription support, real-time invalidation can be performed to ensure the cache is guaranteed to be fresh (and this can eliminate or reduce the need for time-based expiration of data). - -### `directURLMapping` - -This property can be set to force the direct URL request target to be mapped to the resource primary key. Normally, URL resource targets are parsed, where the path is mapped to the primary key of the resource (and decoded using standard URL decoding), and any query string parameters are used to query that resource. But if this is turned on, the full URL is used as the primary key. For example: - -```javascript -export class MyTable extends tables.MyTable { - static directURLMapping = true; -} -``` - -```http request -GET /MyTable/test?foo=bar -``` - -This will be mapped to the resource with a primary key of `test?foo=bar`, and no querying will be performed on that resource. - -### `getRecordCount({ exactCount: boolean })` - -This will return the number of records in the table. By default, this will return an approximate count of records, which is fast and efficient. If you want an exact count, you can pass `{ exactCount: true }` as the first argument, but this will be slower and more expensive. The return value will be a Promise that resolves to an object with a `recordCount` property, which is the number of records in the table. If this was not an exact count, it will also include `estimatedRange` array with estimate range of the count. - -````javascript - -### `parsePath(path, context, query) {` - -This is called by static methods when they are responding to a URL (from HTTP request, for example), and translates the path to an id. By default, this will parse `.property` suffixes for accessing properties and specifying preferred content type in the URL (and for older tables it will convert a multi-segment path to multipart an array id). However, in some situations you may wish to preserve the path directly as a string. You can override `parsePath` for simpler path to id preservation: - -```javascript - static parsePath(path) { - return path; // return the path as the id - } -```` - -### getRecordCount: Promise<{} - -### `isCollection(resource: Resource): boolean` - -This returns a boolean indicating if the provide resource instance represents a collection (can return a query result) or a single record/entity. - -### Context and Transactions - -Whenever you implement an action that is calling other resources, it is recommended that you provide the "context" for the action. This allows a secondary resource to be accessed through the same transaction, preserving atomicity and isolation. - -This also allows timestamps that are accessed during resolution to be used to determine the overall last updated timestamp, which informs the header timestamps (which facilitates accurate client-side caching). The context also maintains user, session, and request metadata information that is communicated so that contextual request information (like headers) can be accessed and any writes are properly attributed to the correct user, or any additional security checks to be applied to the user. - -When using an export resource class, the REST interface will automatically create a context for you with a transaction and request metadata, and you can pass this to other actions by simply including `this` as the source argument (second argument) to the static methods. - -For example, if we had a method to post a comment on a blog, and when this happens we also want to update an array of comment IDs on the blog record, but then add the comment to a separate comment table. We might do this: - -```javascript -const { Comment } = tables; - -export class BlogPost extends tables.BlogPost { - post(comment) { - // add a comment record to the comment table, using this resource as the source for the context - Comment.put(comment, this); - this.comments.push(comment.id); // add the id for the record to our array of comment ids - // Both of these actions will be committed atomically as part of the same transaction - } -} -``` - -Please see the [transaction documentation](../transactions.md) for more information on how transactions work in Harper. - -### Query - -The `get`/`search` methods accept a Query object that can be used to specify a query for data. The query is an object that has the following properties, which are all optional: - -#### `conditions` - -This is an array of objects that specify the conditions to use the match records (if conditions are omitted or it is an empty array, this is a search for everything in the table). Each condition object can have the following properties: - -- `attribute`: Name of the property/attribute to match on. -- `value`: The value to match. -- `comparator`: This can specify how the value is compared. This defaults to "equals", but can also be "greater_than", "greater_than_equal", "less_than", "less_than_equal", "starts_with", "contains", "ends_with", "between", and "not_equal". -- `conditions`: An array of conditions, which follows the same structure as above. -- `operator`: Specifies the operator to apply to this set of conditions (`and` or `or`. This is optional and defaults to `and`). For example, a complex query might look like: - -For example, a more complex query might look like: - -```javascript -Table.search({ - conditions: [ - { attribute: 'price', comparator: 'less_than', value: 100 }, - { - operator: 'or', - conditions: [ - { attribute: 'rating', comparator: 'greater_than', value: 4 }, - { attribute: 'featured', value: true }, - ], - }, - ], -}); -``` - -**Chained Attributes/Properties** - -Chained attribute/property references can be used to search on properties within related records that are referenced by [relationship properties](../../../developers/applications/defining-schemas.md) (in addition to the [schema documentation](../../../developers/applications/defining-schemas.md), see the [REST documentation](../../../developers/rest.md) for more of overview of relationships and querying). Chained property references are specified with an array, with each entry in the array being a property name for successive property references. For example, if a relationship property called `brand` has been defined that references a `Brand` table, we could search products by brand name: - -```javascript -Product.search({ conditions: [{ attribute: ['brand', 'name'], value: 'Harper' }] }); -``` - -This effectively executes a join, searching on the `Brand` table and joining results with matching records in the `Product` table. Chained array properties can be used in any condition, as well nested/grouped conditions. The chain of properties may also be more than two entries, allowing for multiple relationships to be traversed, effectively joining across multiple tables. An array of chained properties can also be used as the `attribute` in the `sort` property, allowing for sorting by an attribute in a referenced joined tables. - -#### `operator` - -Specifies if the conditions should be applied as an `"and"` (records must match all conditions), or as an "or" (records must match at least one condition). This is optional and defaults to `"and"`. - -#### `limit` - -This specifies the limit of the number of records that should be returned from the query. - -#### `offset` - -This specifies the number of records that should be skipped prior to returning records in the query. This is often used with `limit` to implement "paging" of records. - -#### `select` - -This specifies the specific properties that should be included in each record that is returned. This can be an array, to specify a set of properties that should be included in the returned objects. The array can specify an `select.asArray = true` property and the query results will return a set of arrays of values of the specified properties instead of objects; this can be used to return more compact results. Each of the elements in the array can be a property name, or can be an object with a `name` and `select` array itself that specifies properties that should be returned by the referenced sub-object or related record. For example, a `select` can defined: - -```javascript -Table.search({ select: [ 'name', 'age' ], conditions: ...}) -``` - -Or nested/joined properties from referenced objects can be specified, here we are including the referenced `related` records, and returning the `description` and `id` from each of the related objects: - -```javascript -Table.search({ select: [ 'name', { name: 'related', select: ['description', 'id'] } ], conditions: ...}) -``` - -The select properties can also include certain special properties: - -- `$id` - This will specifically return the primary key of the record (regardless of name, even if there is no defined primary key attribute for the table). -- `$updatedtime` - This will return the last updated timestamp/version of the record (regardless of whether there is an attribute for the updated time). - -Alternately, the select value can be a string value, to specify that the value of the specified property should be returned for each iteration/element in the results. For example to just return an iterator of the `id`s of object: - -```javascript -Table.search({ select: 'id', conditions: ...}) -``` - -#### `sort` - -This defines the sort order, and should be an object that can have the following properties: - -- `attributes`: The attribute to sort on. -- `descending`: If true, will sort in descending order (optional and defaults to `false`). -- `next`: Specifies the next sort order to resolve ties. This is an object that follows the same structure as `sort`. - -#### `explain` - -This will return the conditions re-ordered as Harper will execute them. Harper will estimate the number of the matching records for each condition and apply the narrowest condition applied first. - -#### `enforceExecutionOrder` - -This will force the conditions to be executed in the order they were supplied, rather than using query estimation to re-order them. - -The query results are returned as an `AsyncIterable`. In order to access the elements of the query results, you must use a `for await` loop (it does _not_ return an array, you can not access the results by index). - -For example, we could do a query like: - -```javascript -let { Product } = tables; -let results = Product.search({ - conditions: [ - { attribute: 'rating', value: 4.5, comparator: 'greater_than' }, - { attribute: 'price', value: 100, comparator: 'less_than' }, - ], - offset: 20, - limit: 10, - select: ['id', 'name', 'price', 'rating'], - sort: { attribute: 'price' }, -}); -for await (let record of results) { - // iterate through each record in the query results -} -``` - -`AsyncIterable`s can be returned from resource methods, and will be properly serialized in responses. When a query is performed, this will open/reserve a read transaction until the query results are iterated, either through your own `for await` loop or through serialization. Failing to iterate the results this will result in a long-lived read transaction which can degrade performance (including write performance), and may eventually be aborted. - -### `RequestTarget` - -The `RequestTarget` class is used to represent a URL path that can be mapped to a resource. This is used by the REST interface to map a URL path to a resource class. All REST methods are called with a `RequestTarget` as the first argument, which is used to determine which record or entry to access or modify. Methods on a `Resource` class can be called with a primary key as a string or number value as the first argument, to access or modify a record by primary key, which will work with all the default methods. The static methods will be transform the primary key to a `RequestTarget` instance to call the instance methods for argument normalization. -When RequestTarget is constructed with a URL path (from the REST methods). The static methods will also automatically parse the path to a `RequestTarget` instance, including parsing the search string into query parameters. -Below are the properties and methods of the `RequestTarget` class: - -- `pathname` - The path of the URL relative to the resource path that matched this request. This excluded the query/search string -- `toString()` - The full relative path and search string of the URL -- `search` - The search/query part the target path (the part after the first `?` character) -- `id` - The primary key of the resource, as determined by the path -- `checkPermission` - This property is set to an object indicating that a permission check should be performed on the - resource. This is used by the REST interface to determine if a user has permission to access the resource. The object - contains: - - `action` - The type of action being performed (read/write/delete) - - `resource` - The resource being accessed - - `user` - The user requesting access - -`RequestTarget` is subclass of `URLSearchParams`, and these methods are available for accessing and modifying the query parameters: - -- `get(name: string)` - Get the value of the query parameter with the specified name -- `getAll(name: string)` - Get all the values of the query parameter with the specified name -- `set(name: string, value: string)` - Set the value of the query parameter with the specified name -- `append(name: string, value: string)` - Append the value to the query parameter with the specified name -- `delete(name: string)` - Delete the query parameter with the specified name -- `has(name: string)` - Check if the query parameter with the specified name exists - -In addition, the `RequestTarget` class is an iterable, so you can iterate through the query parameters: - -- `for (let [name, value] of target)` - Iterate through the query parameters - -When a `RequestTarget` has query parameters using Harper's extended query syntax, the REST static methods will parse the `RequestTarget` and potentially add any of the following properties if they are present in the query: - -- `conditions` - An array of conditions that will be used to filter the query results -- `limit` - The limit of the number of records to return -- `offset` - The number of records to skip before returning the results -- `sort` - The sort order of the query results -- `select` - The properties to return in the query results - -### `RecordObject` - -The `get` method will return a `RecordObject` instance, which is an object containing all the properties of the record. Any property on the record can be directly accessed and the properties can be enumerated with standard JS capabilities like `for`-`in` and `Object.keys`. The `RecordObject` instance will also have the following methods: - -- `getUpdatedTime()` - Get the last updated time (the version number) of the record -- `getExpiresAt()` - Get the expiration time of the entry, if there is one. - -### Interacting with the Resource Data Model - -When extending or interacting with table resources, you can interact through standard CRUD/REST methods to create, read, update, and delete records. You can idiomatic property access and modification to interact with the records themselves. For example, let's say we defined a product schema: - -```graphql -type Product @table { - id: ID @primaryKey - name: String - rating: Int - price: Float -} -``` - -If we have extended this table class with our own `get()` we can interact with the record: - -```javascript -export class CustomProduct extends Product { - async get(target) { - let record = await super.get(target); - let name = record.name; // this is the name of the current product - let rating = record.rating; // this is the rating of the current product - // we can't directly modify the record (it is frozen), but we can copy if we want to return a modification - record = { ...record, rating: 3 }; - return record; - } -} -``` - -Likewise, we can interact with resource instances in the same way when retrieving them through the static methods: - -```javascript -let product1 = await Product.get(1); -let name = product1.name; // this is the name of the product with a primary key of 1 -let rating = product1.rating; // this is the rating of the product with a primary key of 1 -// if we want to update a single property: -await Product.patch(1, { rating: 3 }); -``` - -When running inside a transaction, we can use the `update` method and updates are automatically saved when a request completes: - -```javascript -export class CustomProduct extends Product { - post(target, data) { - let record = this.update(target); - record.name = data.name; - record.description = data.description; - // both of these changes will be saved automatically as this transaction commits - } -} -``` - -We can also interact with properties in nested objects and arrays, following the same patterns. For example we could define more complex types on our product: - -```graphql -type Product @table { - id: ID @primaryKey - name: String - rating: Int - price: Float - brand: Brand; - variations: [Variation]; -} -type Brand { - name: String -} -type Variation { - name: String - price: Float -} -``` - -We can interact with these nested properties: - -```javascript -export class CustomProduct extends Product { - post(data) { - let record = this.update(target); - let brandName = record.brand.name; - let firstVariationPrice = record.variations[0].price; - let additionalInfoOnBrand = record.brand.additionalInfo; // not defined in schema, but can still try to access property - // make some changes - record.variations.splice(0, 1); // remove first variation - record.variations.push({ name: 'new variation', price: 9.99 }); // add a new variation - record.brand.name = 'new brand name'; - // all these change will be saved - } -} -``` - -If you need to delete a property, you can do with the `delete` method: - -```javascript -let product1 = await Product.update(1); -product1.delete('additionalInformation'); -``` - -## Response Object - -The resource methods can return an object that will be serialized and returned as the response to the client. However, these methods can also return a `Response` style object with `status`, `headers`, and optionally `body` or `data` properties. This allows you to have more control over the response, including setting custom headers and status codes. For example, you could return a redirect response like: - -```javascript -return { status: 302, headers: { Location: '/new-location' } }; -``` - -If you include a `body` property, this must be a string or buffer that will be returned as the response body. If you include a `data` property, this must be an object that will be serialized as the response body (using the standard content negotiation). For example, we could return an object with a custom header: - -```javascript -return { status: 200, headers: { 'X-Custom-Header': 'custom value' }, data: { message: 'Hello, World!' } }; -``` - -### Throwing Errors - -You may throw errors (and leave them uncaught) from the response methods and these should be caught and handled by protocol the handler. For REST requests/responses, this will result in an error response. By default the status code will be 500. You can assign a property of `statusCode` to errors to indicate the HTTP status code that should be returned. For example: - -```javascript -if (notAuthorized()) { - let error = new Error('You are not authorized to access this'); - error.statusCode = 403; - throw error; -} -``` diff --git a/docs/technical-details/reference/resources/index.md b/docs/technical-details/reference/resources/index.md new file mode 100644 index 00000000..6fd0664e --- /dev/null +++ b/docs/technical-details/reference/resources/index.md @@ -0,0 +1,744 @@ +--- +title: Resource Class +--- + +# Resource Class + +## Resource Class + +The Resource class is designed to provide a unified API for modeling different data resources within Harper. Database/table data can be accessed through the Resource API. The Resource class can be extended to create new data sources. Resources can be exported to define endpoints. Tables themselves extend the Resource class, and can be extended by users. + +Conceptually, a Resource class provides an interface for accessing, querying, modifying, and monitoring a set of entities or records. Instances of a Resource class can represent a single record or entity, or a collection of records, at a given point in time, that you can interact with through various methods or queries. Resource instances can represent an atomic transactional view of a resource and facilitate transactional interaction. A Resource instance holds the primary key/identifier, context information, and any pending updates to the record, so any instance methods can act on the record and have full access to this information during execution. Therefore, there are distinct resource instances created for every record or query that is accessed, and the instance methods are used for interaction with the data. + +Resource classes also have static methods, which are generally the preferred way to externally interact with tables and resources. The static methods handle parsing paths and query strings, starting a transaction as necessary, performing access authorization checks (if required), creating a resource instance, and calling the instance methods. This general rule for how to interact with resources: + +- If you want to _act upon_ a table or resource, querying or writing to it, then use the static methods to initially access or write data. For example, you could use `MyTable.get(34)` to access the record with a primary key of `34`. +- If you want to _define custom behavior_ for a table or resource (to control how a resource responds to queries/writes), then extend the class and override/define instance methods. + +The Resource API is heavily influenced by the REST/HTTP API, and the methods and properties of the Resource class are designed to map to and be used in a similar way to how you would interact with a RESTful API. + +The REST-based API is a little different from traditional Create-Read-Update-Delete (CRUD) APIs that were designed with single-server interactions in mind. Semantics that attempt to guarantee no existing record or overwrite-only behavior require locks that don't scale well in distributed database. Centralizing writes around `put` calls provides much more scalable, simple, and consistent behavior in a distributed eventually consistent database. You can generally think of CRUD operations mapping to REST operations like this: + +- Read - `get` +- Create with a known primary key - `put` +- Create with a generated primary key - `post`/`create` +- Update (Full) - `put` +- Update (Partial) - `patch` +- Delete - `delete` + +The RESTful HTTP server and other server interfaces will directly call resource methods of the same name to fulfill incoming requests so resources can be defined as endpoints for external interaction. When resources are used by the server interfaces, the static method will be executed (which starts a transaction and does access checks), which will then create the resource instance and call the corresponding instance method. Paths (URL, MQTT topics) are mapped to different resource instances. Using a path that specifies an ID like `/MyResource/3492` will be mapped an instance of MyResource, and will call the instance methods like `get(target)`, `put(target, data)`, and `post(target, data)`, where target is based on the `/3492` part of the path. + +It is recommended that you use the latest version (V2) of the Resource API with the legacy instance binding behavior disabled. This is done by setting the static `loadAsInstance` property to `false` on the Resource class. This will become the default behavior in Harper version 5.0. This page is written assuming `loadAsInstance` is set to `false`. If you want to use the legacy instance binding behavior, you can set `loadAsInstance` to `true` on the Resource class. If you have existing code that you want to migrate, please see the [migration guide](./resource-migration) for more information. + +You can create classes that extend `Resource` to define your own data sources, typically to interface with external data sources (the `Resource` base class is available as a global variable in the Harper JS environment). In doing this, you will generally be extending and providing implementations for the instance methods below. For example: + +```javascript +export class MyExternalData extends Resource { + static loadAsInstance = false; / enable the updated API + async get(target) { + / fetch data from an external source, using our id + let response = await this.fetch(target.id); + / do something with the response + } + put(target, data) { + / send the data into the external source + } + delete(target) { + / delete an entity in the external data source + } + subscribe(subscription) { + / if the external data source is capable of real-time notification of changes, can subscribe + } +} +/ we can export this class from resources.json as our own endpoint, or use this as the source for +/ a Harper data to store and cache the data coming from this data source: +tables.MyCache.sourcedFrom(MyExternalData); +``` + +You can also extend table classes in the same way, overriding the instance methods for custom functionality. The `tables` object is a global variable in the Harper JavaScript environment, along with `Resource`: + +```javascript +export class MyTable extends tables.MyTable { + static loadAsInstance = false; / enable the updated API + get(target) { + / we can add properties or change properties before returning data: + return { ...super.get(target), newProperty: 'newValue', existingProperty: 42 }; / returns the record, with additional properties + } + put(target, data) { + / can change data any way we want + super.put(target, data); + } + delete(target) { + super.delete(target); + } + post(target, data) { + / providing a post handler (for HTTP POST requests) is a common way to create additional + / actions that aren't well described with just PUT or DELETE + } +} +``` + +Make sure that if are extending and `export`ing your table with this class, that you remove the `@export` directive in your schema, so that you aren't exporting the same table/class name twice. + +All Resource methods that are called from HTTP methods may directly return data or may return a [`Response`](https:/developer.mozilla.org/en-US/docs/Web/API/Response) object or an object with `headers` and a `status` (HTTP status code), to explicitly return specific headers and status code. + +## Global Variables + +### `tables` + +This is an object with all the tables in the default database (the default database is "data"). Each table that has been declared or created will be available as a (standard) property on this object, and the value will be the table class that can be used to interact with that table. The table classes implement the Resource API. + +### `databases` + +This is an object with all the databases that have been defined in Harper (in the running instance). Each database that has been declared or created will be available as a (standard) property on this object. The property values are an object with the tables in that database, where each property is a table, like the `tables` object. In fact, `databases.data === tables` should always be true. + +### `Resource` + +This is the Resource base class. This can be directly extended for custom resources, and is the base class for all tables. + +### `server` + +This object provides extension points for extension components that wish to implement new server functionality (new protocols, authentication, etc.). See the [extensions documentation for more information](../components/extensions). + +### `transaction` + +This provides a function for starting transactions. See the transactions section below for more information. + +### `contentTypes` + +This provides an interface for defining new content type handlers. See the content type extensions documentation for more information. + +### TypeScript Support + +While these objects/methods are all available as global variables, it is easier to get TypeScript support (code assistance, type checking) for these interfaces by explicitly `import`ing them. This can be done by setting up a package link to the main Harper package in your app: + +``` +# you may need to go to your harper directory and set it up as a link first +npm link harperdb +``` + +And then you can import any of the main Harper APIs you will use, and your IDE should understand the full typings associated with them: + +``` +import { databases, tables, Resource } from 'harperdb'; +``` + +## Resource Class (Instance) Methods + +### Properties/attributes declared in schema + +Properties that have been defined in your table's schema can be accessed and modified as direct properties on the Resource instances. + +### `get(target: RequestTarget | Id): Promise|AsyncIterable` + +This retrieves a record, or queries for records, and is called by HTTP GET requests. This can be called with a `RequestTarget` which can specify a path/id and query parameters as well as search parameters. For tables, this can also be called directly with an id (string or number) to retrieve a record by id. When defining Resource classes, you can define or override this method to define exactly what should be returned when retrieving a record. HTTP requests will always call `get` with a full `RequestTarget`. The default `get` method (`super.get(target)`) returns the current record as a plain object. + +The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. For example, with a request to `/my-resource/some-id?param1=value`, we can access URL/request information: + +```javascript +class extends Resource { + static loadAsInstance = false; + get(target) { + let param1 = target.get('param1'); / returns 'value' + let id = target.id; / returns 'some-id' + let path = target.pathname; / returns /some-id + let fullTarget = target.target; / returns /some-id?param1=value + ... + } +``` + +If `get` is called for a single record (for a request like `/Table/some-id`), the default action is to return the record identified by the path. If `get` is called on a collection (`/Table/?name=value`), the target will have the `isCollection` property set to `true` and default action is to `search` and return an AsyncIterable of results. + +### `search(query: RequestTarget)`: AsyncIterable + +This performs a query on this resource or table. By default, this is called by `get(query)` from a collection resource. When this is called for the root resource (like `/Table/`) it searches through all records in the table. You can define or override this method to define how records should be queried. The default `search` method on tables (`super.search(query)`) will perform a query and return an `AsyncIterable` of results. The `query` object can be used to specify the desired query. + +### `put(target: RequestTarget | Id, data: object): void|Response` + +This will assign the provided record or data to this resource, and is called for HTTP PUT requests. You can define or override this method to define how records should be updated. The default `put` method on tables (`super.put(target, data)`) writes the record to the table (updating or inserting depending on if the record previously existed) as part of the current transaction for the resource instance. + +The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. + +### `patch(target: RequestTarget | Id, data: object): void|Response` + +This will update the existing record with the provided data's properties, and is called for HTTP PATCH requests. You can define or override this method to define how records should be updated. The default `patch` method on tables (`super.patch(target, data)`) updates the record. The properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `data` object. This is performed as part of the current transaction for the resource instance. The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. + +### `update(target: RequestTarget, updates?: object): Updatable` + +This can be called to get an Updatable class for updating a record. An `Updatable` instance provides direct access to record properties as properties on `Updatable` instance. The properties can also be modified and any changes are tracked and written to the record when the transaction commits. For example, if we wanted to update the quantify of a product in the Product table, in response to a post, we could write: + +```javascript +class ... { + post(target, data) { + static loadAsInstance = false; + let updatable = this.update(target); + updatable.quantity = updatable.quantity - 1; + } +} +``` + +In addition, the `Updatable` class has the following methods. + +### `Updatable` class + +#### `addTo(property, value)` + +This adds the provided value to the specified property using conflict-free data type (CRDT) incrementation. This ensures that even if multiple calls are simultaneously made to increment a value, the resulting merge of data changes from different threads and nodes will properly sum all the added values. We could improve the example above to reliably ensure the quantity is decremented even when it occurs in multiple nodes simultaneously: + +```javascript +class ... { + static loadAsInstance = false; + post(target, data) { + let updatable = this.update(target); + updatable.addTo('quantity', -1); + } +} +``` + +#### `subtractFrom(property, value)` + +This functions exactly the same as `addTo`, except it subtracts the value. + +The `Updatable` also inherits the `getUpdatedTime` and `getExpiresAt` methods from the `RecordObject` class. + +### `delete(target: RequestTarget): void|Response` + +This will delete this record or resource identified by the target, and is called for HTTP DELETE requests. You can define or override this method to define how records should be deleted. The default `delete` method on tables (`super.delete(target)`) deletes the record identified by target from the table as part of the current transaction. The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. + +### `publish(target: RequestTarget, message): void|Response` + +This will publish a message to this resource, and is called for MQTT publish commands. You can define or override this method to define how messages should be published. The default `publish` method on tables (`super.publish(target, message)`) records the published message as part of the current transaction; this will not change the data in the record but will notify any subscribers to the record/topic. The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. + +### `post(target: RequestTarget, data: object): void|Response` + +This is called for HTTP POST requests. You can define this method to provide your own implementation of how POST requests should be handled. Generally `POST` provides a generic mechanism for various types of data updates, and is a good place to define custom functionality for updating records. The default behavior is to create a new record/resource. The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. + +### `invalidate(target: RequestTarget)` + +This method is available on tables. This will invalidate the specified record in the table. This can be used with a caching table and is used to indicate that the source data has changed, and the record needs to be reloaded when next accessed. + +### `subscribe(subscriptionRequest: SubscriptionRequest): Promise` + +This will subscribe to the current resource, and is called for MQTT subscribe commands. You can define or override this method to define how subscriptions should be handled. The default `subscribe` method on tables (`super.publish(message)`) will set up a listener that will be called for any changes or published messages to this resource. + +The returned (promise resolves to) Subscription object is an `AsyncIterable` that you can use a `for await` to iterate through. It also has a `queue` property which holds (an array of) any messages that are ready to be delivered immediately (if you have specified a start time, previous count, or there is a message for the current or "retained" record, these may be immediately returned). + +The `SubscriptionRequest` object supports the following properties (all optional): + +- `includeDescendants` - If this is enabled, this will create a subscription to all the record updates/messages that are prefixed with the id. For example, a subscription request of `{id:'sub', includeDescendants: true}` would return events for any update with an id/topic of the form sub/\* (like `sub/1`). +- `startTime` - This will begin the subscription at a past point in time, returning all updates/messages since the start time (a catch-up of historical messages). This can be used to resume a subscription, getting all messages since the last subscription. +- `previousCount` - This specifies the number of previous updates/messages to deliver. For example, `previousCount: 10` would return the last ten messages. Note that `previousCount` can not be used in conjunction with `startTime`. +- `omitCurrent` - Indicates that the current (or retained) record should _not_ be immediately sent as the first update in the subscription (if no `startTime` or `previousCount` was used). By default, the current record is sent as the first update. + +### `connect(target: RequestTarget, incomingMessages?: AsyncIterable): AsyncIterable` + +This is called when a connection is received through WebSockets or Server Sent Events (SSE) to this resource path. This is called with `incomingMessages` as an iterable stream of incoming messages when the connection is from WebSockets, and is called with no arguments when the connection is from a SSE connection. This can return an asynchronous iterable representing the stream of messages to be sent to the client. + +### `getUpdatedTime(): number` + +This returns the last updated time of the resource (timestamp of last commit). This is returned as milliseconds from epoch. + +### `wasLoadedFromSource(): boolean` + +Indicates if the record had been loaded from source. When using caching tables, this indicates that there was a cache miss and the data had to be loaded from the source (or waiting on an inflight request from the source to finish). + +### `getContext(): Context` + +Returns the context for this resource. The context contains information about the current transaction, the user that initiated this action, and other metadata that should be retained through the life of an action. + +#### `Context` + +The `Context` object has the following (potential) properties: + +- `user` - This is the user object, which includes information about the username, role, and authorizations. +- `transaction` - The current transaction If the current method was triggered by an HTTP request, the following properties are available: +- `lastModified` - This value is used to indicate the last modified or updated timestamp of any resource(s) that are accessed and will inform the response's `ETag` (or `Last-Modified`) header. This can be updated by application code if it knows that modification should cause this timestamp to be updated. + +When a resource gets a request through HTTP, the request object is the context, which has the following properties: + +- `url` - The local path/URL of the request (this will not include the protocol or host name, but will start at the path and includes the query string). +- `method` - The method of the HTTP request. +- `headers` - This is an object with the headers that were included in the HTTP request. You can access headers by calling `context.headers.get(headerName)`. +- `responseHeaders` - This is an object with the headers that will be included in the HTTP response. You can set headers by calling `context.responseHeaders.set(headerName, value)`. +- `pathname` - This provides the path part of the URL (no querystring). +- `host` - This provides the host name of the request (from the `Host` header). +- `ip` - This provides the ip address of the client that made the request. +- `body` - This is the request body as a raw NodeJS Readable stream, if there is a request body. +- `data` - If the HTTP request had a request body, this provides a promise to the deserialized data from the request body. (Note that for methods that normally have a request body like `POST` and `PUT`, the resolved deserialized data is passed in as the main argument, but accessing the data from the context provides access to this for requests that do not traditionally have a request body like `DELETE`). + +When a resource is accessed as a data source: + +- `requestContext` - For resources that are acting as a data source for another resource, this provides access to the context of the resource that is making a request for data from the data source resource. Note that it is generally not recommended to rely on this context. The resolved data may be used fulfilled many different requests, and relying on this first request context may not be representative of future requests. Also, source resolution may be triggered by various actions, not just specified endpoints (for example queries, operations, studio, etc.), so make sure you are not relying on specific request context information. + +### `operation(operationObject: Object, authorize?: boolean): Promise` + +This method is available on tables and will execute a Harper operation, using the current table as the target of the operation (the `table` and `database` do not need to be specified). See the [operations API](../../../developers/operations-api/) for available operations that can be performed. You can set the second argument to `true` if you want the current user to be checked for authorization for the operation (if `true`, will throw an error if they are not authorized). + +### `allowStaleWhileRevalidate(entry: { version: number, localTime: number, expiresAt: number, value: object }, id): boolean` + +For caching tables, this can be defined to allow stale entries to be returned while revalidation is taking place, rather than waiting for revalidation. The `version` is the timestamp/version from the source, the `localTime` is when the resource was last refreshed, the `expiresAt` is when the resource expired and became stale, and the `value` is the last value (the stale value) of the record/resource. All times are in milliseconds since epoch. Returning `true` will allow the current stale value to be returned while revalidation takes place concurrently. Returning `false` will cause the response to wait for the data source or origin to revalidate or provide the latest value first, and then return the latest value. + +## Resource Static Methods and Properties + +The Resource class also has static methods that mirror the instance methods with an initial argument that is the id of the record to act on. The static methods are generally the preferred and most convenient method for interacting with tables outside of methods that are directly extending a table. Whereas instances methods are bound to a specific record, the static methods allow you to specify any record in the table to act on. + +The `get`, `put`, `delete`, `publish`, `subscribe`, and `connect` methods all have static equivalents. There is also a `static search()` method for specifically handling searching a table with query parameters. By default, the Resource static methods default to creating an instance bound to the record specified by the arguments, and calling the instance methods. Again, generally static methods are the preferred way to interact with resources and call them from application code. These methods are available on all user Resource classes and tables. + +### `get(target: RequestTarget|Id, context?: Resource|Context)` + +This will retrieve a resource instance by id. For example, if you want to retrieve comments by id in the retrieval of a blog post you could do: + +```javascript +const { MyTable, Comment } = tables; +... +/ in class: + async get() { + for (let commentId of this.commentIds) { + let comment = await Comment.get(commentId, this); + / now you can do something with the comment record + } + } +``` + +Type definition for `Id`: + +```typescript +Id = string | number | array; +``` + +### `get(query: Query, context?: Resource|Context)` + +This can be used to retrieve a resource instance by a query. The query can be used to specify a single/unique record by an `id` property, and can be combined with a `select`: + +```javascript +MyTable.get({ id: 34, select: ['name', 'age'] }); +``` + +This method may also be used to retrieve a collection of records by a query. If the query is not for a specific record id, this will call the `search` method, described above. + +### `put(target: RequestTarget|Id, record: object, context?: Resource|Context): Promise` + +This will save the provided record or data to this resource. This will create a new record or fully replace an existing record if one exists with the same `id` (primary key). + +### `put(record: object, context?: Resource|Context): Promise` + +This will save the provided record or data to this resource. This will create a new record or fully replace an existing record if one exists with the same primary key provided in the record. If your table doesn't have a primary key attribute, you will need to use the method with the `id` argument. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `create(record: object, context?: Resource|Context): Promise` + +This will create a new record using the provided record for all fields (except primary key), generating a new primary key for the record. This does _not_ check for an existing record; the record argument should not have a primary key and should use the generated primary key. This will (asynchronously) return the new resource instance. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `post(target: RequestTarget|Id, data: object, context?: Resource|Context): Promise|any` + +This will save the provided data to this resource. By default, this will create a new record (by calling `create`). However, the `post` method is specifically intended to be available for custom behaviors, so extending a class to support custom `post` method behavior is encouraged. + +### `patch(target: RequestTarget|Id, recordUpdate: object, context?: Resource|Context): Promise|void` + +This will save the provided updates to the record. The `recordUpdate` object's properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `recordUpdate` object. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `delete(target: RequestTarget|Id, context?: Resource|Context): Promise|void` + +Deletes this resource's record or data. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `publish(target: RequestTarget|Id, message: object, context?: Resource|Context): Promise|void` + +Publishes the given message to the record entry specified by the id in the context. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `subscribe(subscriptionRequest?, context?: Resource|Context): Promise` + +Subscribes to a record/resource. See the description of the `subscriptionRequest` object above for more information on how to use this. + +### `search(query: RequestTarget, context?: Resource|Context): AsyncIterable` + +This will perform a query on this table or collection. The query parameter can be used to specify the desired query. + +### `setComputedAttribute(name: string, computeFunction: (record: object) => any)` + +This will define the function to use for a computed attribute. To use this, the attribute must be defined in the schema as a computed attribute. The `computeFunction` will be called with the record as an argument and should return the computed value for the attribute. For example: + +```javascript +MyTable.setComputedAttribute('computedAttribute', (record) => { + return record.attribute1 + record.attribute2; +}); +``` + +For a schema like: + +```graphql +type MyTable @table { + id: ID @primaryKey + attribute1: Int + attribute2: Int + computedAttribute: Int @computed +} +``` + +See the [schema documentation](../../../developers/applications/defining-schemas) for more information on computed attributes. + +### `primaryKey` + +This property indicates the name of the primary key attribute for a table. You can get the primary key for a record using this property name. For example: + +```javascript +let record34 = await Table.get(34); +record34[Table.primaryKey] -> 34 +``` + +There are additional methods that are only available on table classes (which are a type of resource). + +### `Table.sourcedFrom(Resource, options)` + +This defines the source for a table. This allows a table to function as a cache for an external resource. When a table is configured to have a source, any request for a record that is not found in the table will be delegated to the source resource to retrieve (via `get`) and the result will be cached/stored in the table. All writes to the table will also first be delegated to the source (if the source defines write functions like `put`, `delete`, etc.). The `options` parameter can include an `expiration` property that will configure the table with a time-to-live expiration window for automatic deletion or invalidation of older entries. The `options` parameter (also) supports: + +- `expiration` - Default expiration time for records in seconds. +- `eviction` - Eviction time for records in seconds. +- `scanInterval` - Time period for scanning the table for records to evict. + +If the source resource implements subscription support, real-time invalidation can be performed to ensure the cache is guaranteed to be fresh (and this can eliminate or reduce the need for time-based expiration of data). + +### `directURLMapping` + +This property can be set to force the direct URL request target to be mapped to the resource primary key. Normally, URL resource targets are parsed, where the path is mapped to the primary key of the resource (and decoded using standard URL decoding), and any query string parameters are used to query that resource. But if this is turned on, the full URL is used as the primary key. For example: + +```javascript +export class MyTable extends tables.MyTable { + static directURLMapping = true; +} +``` + +```http request +GET /MyTable/test?foo=bar +``` + +This will be mapped to the resource with a primary key of `test?foo=bar`, and no querying will be performed on that resource. + +### `getRecordCount({ exactCount: boolean })` + +This will return the number of records in the table. By default, this will return an approximate count of records, which is fast and efficient. If you want an exact count, you can pass `{ exactCount: true }` as the first argument, but this will be slower and more expensive. The return value will be a Promise that resolves to an object with a `recordCount` property, which is the number of records in the table. If this was not an exact count, it will also include `estimatedRange` array with estimate range of the count. + +````javascript + +### `parsePath(path, context, query) {` + +This is called by static methods when they are responding to a URL (from HTTP request, for example), and translates the path to an id. By default, this will parse `.property` suffixes for accessing properties and specifying preferred content type in the URL (and for older tables it will convert a multi-segment path to multipart an array id). However, in some situations you may wish to preserve the path directly as a string. You can override `parsePath` for simpler path to id preservation: + +````javascript + static parsePath(path) { + return path; / return the path as the id + } +```` + +### `getRecordCount: Promise<{}>` + +### `isCollection(resource: Resource): boolean` + +This returns a boolean indicating if the provide resource instance represents a collection (can return a query result) or a single record/entity. + +### Context and Transactions + +Whenever you implement an action that is calling other resources, it is recommended that you provide the "context" for the action. This allows a secondary resource to be accessed through the same transaction, preserving atomicity and isolation. + +This also allows timestamps that are accessed during resolution to be used to determine the overall last updated timestamp, which informs the header timestamps (which facilitates accurate client-side caching). The context also maintains user, session, and request metadata information that is communicated so that contextual request information (like headers) can be accessed and any writes are properly attributed to the correct user, or any additional security checks to be applied to the user. + +When using an export resource class, the REST interface will automatically create a context for you with a transaction and request metadata, and you can pass this to other actions by simply including `this` as the source argument (second argument) to the static methods. + +For example, if we had a method to post a comment on a blog, and when this happens we also want to update an array of comment IDs on the blog record, but then add the comment to a separate comment table. We might do this: + +````javascript +const { Comment } = tables; + +export class BlogPost extends tables.BlogPost { + post(comment) { + / add a comment record to the comment table, using this resource as the source for the context + Comment.put(comment, this); + this.comments.push(comment.id); / add the id for the record to our array of comment ids + / Both of these actions will be committed atomically as part of the same transaction + } +} +``` + +Please see the [transaction documentation](../transactions) for more information on how transactions work in Harper. + +### Query + +The `get`/`search` methods accept a Query object that can be used to specify a query for data. The query is an object that has the following properties, which are all optional: + +#### `conditions` + +This is an array of objects that specify the conditions to use the match records (if conditions are omitted or it is an empty array, this is a search for everything in the table). Each condition object can have the following properties: + +- `attribute`: Name of the property/attribute to match on. +- `value`: The value to match. +- `comparator`: This can specify how the value is compared. This defaults to "equals", but can also be "greater_than", "greater_than_equal", "less_than", "less_than_equal", "starts_with", "contains", "ends_with", "between", and "not_equal". +- `conditions`: An array of conditions, which follows the same structure as above. +- `operator`: Specifies the operator to apply to this set of conditions (`and` or `or`. This is optional and defaults to `and`). For example, a complex query might look like: + +For example, a more complex query might look like: + +```javascript +Table.search({ + conditions: [ + { attribute: 'price', comparator: 'less_than', value: 100 }, + { + operator: 'or', + conditions: [ + { attribute: 'rating', comparator: 'greater_than', value: 4 }, + { attribute: 'featured', value: true }, + ], + }, + ], +}); +``` + +**Chained Attributes/Properties** + +Chained attribute/property references can be used to search on properties within related records that are referenced by [relationship properties](../../../developers/applications/defining-schemas) (in addition to the [schema documentation](../../../developers/applications/defining-schemas), see the [REST documentation](../../../developers/rest) for more of overview of relationships and querying). Chained property references are specified with an array, with each entry in the array being a property name for successive property references. For example, if a relationship property called `brand` has been defined that references a `Brand` table, we could search products by brand name: + +```javascript +Product.search({ conditions: [{ attribute: ['brand', 'name'], value: 'Harper' }] }); +``` + +This effectively executes a join, searching on the `Brand` table and joining results with matching records in the `Product` table. Chained array properties can be used in any condition, as well nested/grouped conditions. The chain of properties may also be more than two entries, allowing for multiple relationships to be traversed, effectively joining across multiple tables. An array of chained properties can also be used as the `attribute` in the `sort` property, allowing for sorting by an attribute in a referenced joined tables. + +#### `operator` + +Specifies if the conditions should be applied as an `"and"` (records must match all conditions), or as an "or" (records must match at least one condition). This is optional and defaults to `"and"`. + +#### `limit` + +This specifies the limit of the number of records that should be returned from the query. + +#### `offset` + +This specifies the number of records that should be skipped prior to returning records in the query. This is often used with `limit` to implement "paging" of records. + +#### `select` + +This specifies the specific properties that should be included in each record that is returned. This can be an array, to specify a set of properties that should be included in the returned objects. The array can specify an `select.asArray = true` property and the query results will return a set of arrays of values of the specified properties instead of objects; this can be used to return more compact results. Each of the elements in the array can be a property name, or can be an object with a `name` and `select` array itself that specifies properties that should be returned by the referenced sub-object or related record. For example, a `select` can defined: + +```javascript +Table.search({ select: [ 'name', 'age' ], conditions: ...}) +``` + +Or nested/joined properties from referenced objects can be specified, here we are including the referenced `related` records, and returning the `description` and `id` from each of the related objects: + +```javascript +Table.search({ select: [ 'name', `{ name: 'related', select: ['description', 'id'] }` ], conditions: ...}) +``` + +The select properties can also include certain special properties: + +- `$id` - This will specifically return the primary key of the record (regardless of name, even if there is no defined primary key attribute for the table). +- `$updatedtime` - This will return the last updated timestamp/version of the record (regardless of whether there is an attribute for the updated time). + +Alternately, the select value can be a string value, to specify that the value of the specified property should be returned for each iteration/element in the results. For example to just return an iterator of the `id`s of object: + +```javascript +Table.search({ select: 'id', conditions: ...}) +``` + +#### `sort` + +This defines the sort order, and should be an object that can have the following properties: + +- `attributes`: The attribute to sort on. +- `descending`: If true, will sort in descending order (optional and defaults to `false`). +- `next`: Specifies the next sort order to resolve ties. This is an object that follows the same structure as `sort`. + +#### `explain` + +This will return the conditions re-ordered as Harper will execute them. Harper will estimate the number of the matching records for each condition and apply the narrowest condition applied first. + +#### `enforceExecutionOrder` + +This will force the conditions to be executed in the order they were supplied, rather than using query estimation to re-order them. + +The query results are returned as an `AsyncIterable`. In order to access the elements of the query results, you must use a `for await` loop (it does _not_ return an array, you can not access the results by index). + +For example, we could do a query like: + +```javascript +let { Product } = tables; +let results = Product.search({ + conditions: [ + { attribute: 'rating', value: 4.5, comparator: 'greater_than' }, + { attribute: 'price', value: 100, comparator: 'less_than' }, + ], + offset: 20, + limit: 10, + select: ['id', 'name', 'price', 'rating'], + sort: { attribute: 'price' }, +}); +for await (let record of results) { + / iterate through each record in the query results +} +``` + +`AsyncIterable`s can be returned from resource methods, and will be properly serialized in responses. When a query is performed, this will open/reserve a read transaction until the query results are iterated, either through your own `for await` loop or through serialization. Failing to iterate the results this will result in a long-lived read transaction which can degrade performance (including write performance), and may eventually be aborted. + +### `RequestTarget` + +The `RequestTarget` class is used to represent a URL path that can be mapped to a resource. This is used by the REST interface to map a URL path to a resource class. All REST methods are called with a `RequestTarget` as the first argument, which is used to determine which record or entry to access or modify. Methods on a `Resource` class can be called with a primary key as a string or number value as the first argument, to access or modify a record by primary key, which will work with all the default methods. The static methods will be transform the primary key to a `RequestTarget` instance to call the instance methods for argument normalization. +When RequestTarget is constructed with a URL path (from the REST methods). The static methods will also automatically parse the path to a `RequestTarget` instance, including parsing the search string into query parameters. +Below are the properties and methods of the `RequestTarget` class: + +- `pathname` - The path of the URL relative to the resource path that matched this request. This excluded the query/search string +- `toString()` - The full relative path and search string of the URL +- `search` - The search/query part the target path (the part after the first `?` character) +- `id` - The primary key of the resource, as determined by the path +- `checkPermission` - This property is set to an object indicating that a permission check should be performed on the + resource. This is used by the REST interface to determine if a user has permission to access the resource. The object + contains: + - `action` - The type of action being performed (read/write/delete) + - `resource` - The resource being accessed + - `user` - The user requesting access + +`RequestTarget` is subclass of `URLSearchParams`, and these methods are available for accessing and modifying the query parameters: + +- `get(name: string)` - Get the value of the query parameter with the specified name +- `getAll(name: string)` - Get all the values of the query parameter with the specified name +- `set(name: string, value: string)` - Set the value of the query parameter with the specified name +- `append(name: string, value: string)` - Append the value to the query parameter with the specified name +- `delete(name: string)` - Delete the query parameter with the specified name +- `has(name: string)` - Check if the query parameter with the specified name exists + +In addition, the `RequestTarget` class is an iterable, so you can iterate through the query parameters: + +- `for (let [name, value] of target)` - Iterate through the query parameters + +When a `RequestTarget` has query parameters using Harper's extended query syntax, the REST static methods will parse the `RequestTarget` and potentially add any of the following properties if they are present in the query: + +- `conditions` - An array of conditions that will be used to filter the query results +- `limit` - The limit of the number of records to return +- `offset` - The number of records to skip before returning the results +- `sort` - The sort order of the query results +- `select` - The properties to return in the query results + +### `RecordObject` + +The `get` method will return a `RecordObject` instance, which is an object containing all the properties of the record. Any property on the record can be directly accessed and the properties can be enumerated with standard JS capabilities like `for`-`in` and `Object.keys`. The `RecordObject` instance will also have the following methods: + +- `getUpdatedTime()` - Get the last updated time (the version number) of the record +- `getExpiresAt()` - Get the expiration time of the entry, if there is one. + +### Interacting with the Resource Data Model + +When extending or interacting with table resources, you can interact through standard CRUD/REST methods to create, read, update, and delete records. You can idiomatic property access and modification to interact with the records themselves. For example, let's say we defined a product schema: + +```graphql +type Product @table { + id: ID @primaryKey + name: String + rating: Int + price: Float +} +``` + +If we have extended this table class with our own `get()` we can interact with the record: + +```javascript +export class CustomProduct extends Product { + async get(target) { + let record = await super.get(target); + let name = record.name; / this is the name of the current product + let rating = record.rating; / this is the rating of the current product + / we can't directly modify the record (it is frozen), but we can copy if we want to return a modification + record = { ...record, rating: 3 }; + return record; + } +} +``` + +Likewise, we can interact with resource instances in the same way when retrieving them through the static methods: + +```javascript +let product1 = await Product.get(1); +let name = product1.name; / this is the name of the product with a primary key of 1 +let rating = product1.rating; / this is the rating of the product with a primary key of 1 +/ if we want to update a single property: +await Product.patch(1, { rating: 3 }); +``` + +When running inside a transaction, we can use the `update` method and updates are automatically saved when a request completes: + +```javascript +export class CustomProduct extends Product { + post(target, data) { + let record = this.update(target); + record.name = data.name; + record.description = data.description; + / both of these changes will be saved automatically as this transaction commits + } +} +``` + +We can also interact with properties in nested objects and arrays, following the same patterns. For example we could define more complex types on our product: + +```graphql +type Product @table { + id: ID @primaryKey + name: String + rating: Int + price: Float + brand: Brand; + variations: [Variation]; +} +type Brand { + name: String +} +type Variation { + name: String + price: Float +} +``` + +We can interact with these nested properties: + +```javascript +export class CustomProduct extends Product { + post(data) { + let record = this.update(target); + let brandName = record.brand.name; + let firstVariationPrice = record.variations[0].price; + let additionalInfoOnBrand = record.brand.additionalInfo; / not defined in schema, but can still try to access property + / make some changes + record.variations.splice(0, 1); / remove first variation + record.variations.push({ name: 'new variation', price: 9.99 }); / add a new variation + record.brand.name = 'new brand name'; + / all these change will be saved + } +} +``` + +If you need to delete a property, you can do with the `delete` method: + +```javascript +let product1 = await Product.update(1); +product1.delete('additionalInformation'); +``` + +## Response Object + +The resource methods can return an object that will be serialized and returned as the response to the client. However, these methods can also return a `Response` style object with `status`, `headers`, and optionally `body` or `data` properties. This allows you to have more control over the response, including setting custom headers and status codes. For example, you could return a redirect response like: + +```javascript +return `{ status: 302, headers: { Location: '/new-location' }` }; +``` + +If you include a `body` property, this must be a string or buffer that will be returned as the response body. If you include a `data` property, this must be an object that will be serialized as the response body (using the standard content negotiation). For example, we could return an object with a custom header: + +```javascript +return { status: 200, headers: { 'X-Custom-Header': 'custom value' }, data: `{ message: 'Hello, World!' }` }; +``` + +### Throwing Errors + +You may throw errors (and leave them uncaught) from the response methods and these should be caught and handled by protocol the handler. For REST requests/responses, this will result in an error response. By default the status code will be 500. You can assign a property of `statusCode` to errors to indicate the HTTP status code that should be returned. For example: + +```javascript +if (notAuthorized()) { + let error = new Error('You are not authorized to access this'); + error.statusCode = 403; + throw error; +} +``` diff --git a/docs/technical-details/reference/resources/instance-binding.md b/docs/technical-details/reference/resources/instance-binding.md index 6eba8ea0..7c701976 100644 --- a/docs/technical-details/reference/resources/instance-binding.md +++ b/docs/technical-details/reference/resources/instance-binding.md @@ -1,6 +1,10 @@ +--- +title: Resource Class with Resource Instance Binding behavior +--- + # Resource Class with Resource Instance Binding behavior -This document describes the legacy instance binding behavior of the Resource class. It is recommended that you use the [updated behavior of the Resource API](./README.md) instead, but this legacy API is preserved for backwards compatibility. +This document describes the legacy instance binding behavior of the Resource class. It is recommended that you use the [updated behavior of the Resource API](./) instead, but this legacy API is preserved for backwards compatibility. ## Resource Class @@ -8,22 +12,22 @@ This document describes the legacy instance binding behavior of the Resource cla export class MyExternalData extends Resource { static loadAsInstance = true; async get() { - // fetch data from an external source, using our id + / fetch data from an external source, using our id let response = await this.fetch(this.id); - // do something with the response + / do something with the response } put(data) { - // send the data into the external source + / send the data into the external source } delete() { - // delete an entity in the external data source + / delete an entity in the external data source } subscribe(options) { - // if the external data source is capable of real-time notification of changes, can subscribe + / if the external data source is capable of real-time notification of changes, can subscribe } } -// we can export this class from resources.json as our own endpoint, or use this as the source for -// a Harper data to store and cache the data coming from this data source: +/ we can export this class from resources.json as our own endpoint, or use this as the source for +/ a Harper data to store and cache the data coming from this data source: tables.MyCache.sourcedFrom(MyExternalData); ``` @@ -32,28 +36,28 @@ You can also extend table classes in the same way, overriding the instance metho ```javascript export class MyTable extends tables.MyTable { get() { - // we can add properties or change properties before returning data: + / we can add properties or change properties before returning data: this.newProperty = 'newValue'; this.existingProperty = 44; - return super.get(); // returns the record, modified with the changes above + return super.get(); / returns the record, modified with the changes above } put(data) { - // can change data any way we want + / can change data any way we want super.put(data); } delete() { super.delete(); } post(data) { - // providing a post handler (for HTTP POST requests) is a common way to create additional - // actions that aren't well described with just PUT or DELETE + / providing a post handler (for HTTP POST requests) is a common way to create additional + / actions that aren't well described with just PUT or DELETE } } ``` Make sure that if are extending and `export`ing your table with this class, that you remove the `@export` directive in your schema, so that you aren't exporting the same table/class name twice. -All Resource methods that are called from HTTP methods may directly return data or may return a [`Response`](https://developer.mozilla.org/en-US/docs/Web/API/Response) object or an object with `headers` and a `status` (HTTP status code), to explicitly return specific headers and status code. +All Resource methods that are called from HTTP methods may directly return data or may return a [`Response`](https:/developer.mozilla.org/en-US/docs/Web/API/Response) object or an object with `headers` and a `status` (HTTP status code), to explicitly return specific headers and status code. ## Global Variables @@ -71,7 +75,7 @@ This is the Resource base class. This can be directly extended for custom resour ### `server` -This object provides extension points for extension components that wish to implement new server functionality (new protocols, authentication, etc.). See the [extensions documentation for more information](../components/extensions.md). +This object provides extension points for extension components that wish to implement new server functionality (new protocols, authentication, etc.). See the [extensions documentation for more information](../components/extensions). ### `transaction` @@ -79,7 +83,7 @@ This provides a function for starting transactions. See the transactions section ### `contentTypes` -This provides an interface for defining new content type handlers. See the [content type extensions documentation](content-types.md) for more information. +This provides an interface for defining new content type handlers. See the content type extensions documentation for more information. ### TypeScript Support @@ -110,9 +114,9 @@ The query object can be used to access any query parameters that were included i ```javascript get(query) { - // note that query will only exist (as an object) if there is a query string - let param1 = query?.get?.('param1'); // returns 'value' - let id = this.getId(); // returns 'some-id' + / note that query will only exist (as an object) if there is a query string + let param1 = query?.get?.('param1'); / returns 'value' + let id = this.getId(); / returns 'some-id' ... } ``` @@ -139,7 +143,7 @@ The `query` argument is used to represent any additional query parameters that w ```javascript put(data, query) { - let param1 = query?.get?.('param1'); // returns 'value' + let param1 = query?.get?.('param1'); / returns 'value' ... } ``` @@ -268,11 +272,11 @@ This will retrieve a resource instance by id. For example, if you want to retrie ```javascript const { MyTable, Comment } = tables; ... -// in class: +/ in class: async get() { for (let commentId of this.commentIds) { let comment = await Comment.get(commentId, this); - // now you can do something with the comment record + / now you can do something with the comment record } } ``` @@ -356,7 +360,7 @@ type MyTable @table { } ``` -See the [schema documentation](../../developers/applications/defining-schemas.md) for more information on computed attributes. +See the [schema documentation](../../../developers/applications/defining-schemas) for more information on computed attributes. ### `primaryKey` @@ -405,13 +409,13 @@ This will return the number of records in the table. By default, this will retur This is called by static methods when they are responding to a URL (from HTTP request, for example), and translates the path to an id. By default, this will parse `.property` suffixes for accessing properties and specifying preferred content type in the URL (and for older tables it will convert a multi-segment path to multipart an array id). However, in some situations you may wish to preserve the path directly as a string. You can override `parsePath` for simpler path to id preservation: -```javascript +````javascript static parsePath(path) { - return path; // return the path as the id + return path; / return the path as the id } ```` -### getRecordCount: Promise<{} +### `getRecordCount: Promise<{}>` ### `isCollection(resource: Resource): boolean` @@ -427,20 +431,20 @@ When using an export resource class, the REST interface will automatically creat For example, if we had a method to post a comment on a blog, and when this happens we also want to update an array of comment IDs on the blog record, but then add the comment to a separate comment table. We might do this: -```javascript +````javascript const { Comment } = tables; export class BlogPost extends tables.BlogPost { post(comment) { - // add a comment record to the comment table, using this resource as the source for the context + / add a comment record to the comment table, using this resource as the source for the context Comment.put(comment, this); - this.comments.push(comment.id); // add the id for the record to our array of comment ids - // Both of these actions will be committed atomically as part of the same transaction + this.comments.push(comment.id); / add the id for the record to our array of comment ids + / Both of these actions will be committed atomically as part of the same transaction } } ``` -Please see the [transaction documentation](transactions.md) for more information on how transactions work in Harper. +Please see the [transaction documentation](../transactions) for more information on how transactions work in Harper. ### Query @@ -475,7 +479,7 @@ Table.search({ **Chained Attributes/Properties** -Chained attribute/property references can be used to search on properties within related records that are referenced by [relationship properties](../../developers/applications/defining-schemas.md) (in addition to the [schema documentation](../../developers/applications/defining-schemas.md), see the [REST documentation](../../developers/rest.md) for more of overview of relationships and querying). Chained property references are specified with an array, with each entry in the array being a property name for successive property references. For example, if a relationship property called `brand` has been defined that references a `Brand` table, we could search products by brand name: +Chained attribute/property references can be used to search on properties within related records that are referenced by [relationship properties](../../../developers/applications/defining-schemas) (in addition to the [schema documentation](../../../developers/applications/defining-schemas), see the [REST documentation](../../../developers/rest) for more of overview of relationships and querying). Chained property references are specified with an array, with each entry in the array being a property name for successive property references. For example, if a relationship property called `brand` has been defined that references a `Brand` table, we could search products by brand name: ```javascript Product.search({ conditions: [{ attribute: ['brand', 'name'], value: 'Harper' }] }); @@ -506,7 +510,7 @@ Table.search({ select: [ 'name', 'age' ], conditions: ...}) Or nested/joined properties from referenced objects can be specified, here we are including the referenced `related` records, and returning the `description` and `id` from each of the related objects: ```javascript -Table.search({ select: [ 'name', { name: 'related', select: ['description', 'id'] } ], conditions: ...}) +Table.search({ select: [ 'name', `{ name: 'related', select: ['description', 'id'] }` ], conditions: ...}) ``` The select properties can also include certain special properties: @@ -553,7 +557,7 @@ let results = Product.search({ sort: { attribute: 'price' }, }); for await (let record of results) { - // iterate through each record in the query results + / iterate through each record in the query results } ``` @@ -577,10 +581,10 @@ If we have extended this table class with our get() we can interact with any the ```javascript export class CustomProduct extends Product { get(query) { - let name = this.name; // this is the name of the current product - let rating = this.rating; // this is the rating of the current product - this.rating = 3; // we can also modify the rating for the current instance - // (with a get this won't be saved by default, but will be used when serialized) + let name = this.name; / this is the name of the current product + let rating = this.rating; / this is the rating of the current product + this.rating = 3; / we can also modify the rating for the current instance + / (with a get this won't be saved by default, but will be used when serialized) return super.get(query); } } @@ -590,17 +594,17 @@ Likewise, we can interact with resource instances in the same way when retrievin ```javascript let product1 = await Product.get(1); -let name = product1.name; // this is the name of the product with a primary key of 1 -let rating = product1.rating; // this is the rating of the product with a primary key of 1 -product1.rating = 3; // modify the rating for this instance (this will be saved without a call to update()) +let name = product1.name; / this is the name of the product with a primary key of 1 +let rating = product1.rating; / this is the rating of the product with a primary key of 1 +product1.rating = 3; / modify the rating for this instance (this will be saved without a call to update()) ``` If there are additional properties on (some) products that aren't defined in the schema, we can still access them through the resource instance, but since they aren't declared, there won't be getter/setter definition for direct property access, but we can access properties with the `get(propertyName)` method and modify properties with the `set(propertyName, value)` method: ```javascript let product1 = await Product.get(1); -let additionalInformation = product1.get('additionalInformation'); // get the additionalInformation property value even though it isn't defined in the schema -product1.set('newProperty', 'some value'); // we can assign any properties we want with set +let additionalInformation = product1.get('additionalInformation'); / get the additionalInformation property value even though it isn't defined in the schema +product1.set('newProperty', 'some value'); / we can assign any properties we want with set ``` And likewise, we can do this in an instance method, although you will probably want to use super.get()/set() so you don't have to write extra logic to avoid recursion: @@ -608,8 +612,8 @@ And likewise, we can do this in an instance method, although you will probably w ```javascript export class CustomProduct extends Product { get(query) { - let additionalInformation = super.get('additionalInformation'); // get the additionalInformation property value even though it isn't defined in the schema - super.set('newProperty', 'some value'); // we can assign any properties we want with set + let additionalInformation = super.get('additionalInformation'); / get the additionalInformation property value even though it isn't defined in the schema + super.set('newProperty', 'some value'); / we can assign any properties we want with set } } ``` @@ -622,7 +626,7 @@ If you want to save the changes you make, you can call the \`update()\`\` method let product1 = await Product.get(1); product1.rating = 3; product1.set('newProperty', 'some value'); -product1.update(); // save both of these property changes +product1.update(); / save both of these property changes ``` Updates are automatically saved inside modifying methods like put and post: @@ -632,7 +636,7 @@ export class CustomProduct extends Product { post(data) { this.name = data.name; this.set('description', data.description); - // both of these changes will be saved automatically as this transaction commits + / both of these changes will be saved automatically as this transaction commits } } ``` @@ -664,12 +668,12 @@ export class CustomProduct extends Product { post(data) { let brandName = this.brand.name; let firstVariationPrice = this.variations[0].price; - let additionalInfoOnBrand = this.brand.get('additionalInfo'); // not defined in schema, but can still try to access property - // make some changes - this.variations.splice(0, 1); // remove first variation - this.variations.push({ name: 'new variation', price: 9.99 }); // add a new variation + let additionalInfoOnBrand = this.brand.get('additionalInfo'); / not defined in schema, but can still try to access property + / make some changes + this.variations.splice(0, 1); / remove first variation + this.variations.push({ name: 'new variation', price: 9.99 }); / add a new variation this.brand.name = 'new brand name'; - // all these change will be saved + / all these change will be saved } } ``` @@ -688,7 +692,7 @@ You can also get "plain" object representation of a resource instance by calling let product1 = await Product.get(1); let plainObject = product1.toJSON(); for (let key in plainObject) { - // can iterate through the properties of this record + / can iterate through the properties of this record } ``` @@ -697,13 +701,13 @@ for (let key in plainObject) { The resource methods can return an object that will be serialized and returned as the response to the client. However, these methods can also return a `Response` style object with `status`, `headers`, and optionally `body` or `data` properties. This allows you to have more control over the response, including setting custom headers and status codes. For example, you could return a redirect response like: ```javascript -return { status: 302, headers: { Location: '/new-location' } }; +return `{ status: 302, headers: { Location: '/new-location' }` }; ``` If you include a `body` property, this must be a string or buffer that will be returned as the response body. If you include a `data` property, this must be an object that will be serialized as the response body (using the standard content negotiation). For example, we could return an object with a custom header: ```javascript -return { status: 200, headers: { 'X-Custom-Header': 'custom value' }, data: { message: 'Hello, World!' } }; +return { status: 200, headers: { 'X-Custom-Header': 'custom value' }, data: `{ message: 'Hello, World!' }` }; ``` ### Throwing Errors diff --git a/docs/technical-details/reference/resources/migration.md b/docs/technical-details/reference/resources/migration.md index c7d66aa1..6a6899f0 100644 --- a/docs/technical-details/reference/resources/migration.md +++ b/docs/technical-details/reference/resources/migration.md @@ -1,6 +1,10 @@ +--- +title: Migration to Resource API version 2 (non-instance binding) +--- + # Migration to Resource API version 2 (non-instance binding) -The Resource API was inspired by two major design ideas: the REST architectural design and the [Active Record pattern](https://en.wikipedia.org/wiki/Active_record_pattern) (made popular by Ruby on Rails and heavily used as a pattern in many ORMs). The basic design goal of the Resource API is to integrate these concepts into a single construct that can directly map RESTful methods (specifically the "uniform interface" of HTTP) to an active record data model. However, while the active record pattern has been for _consumption_ of data, implementing methods for endpoint definitions and caching sources as a data _provider_ can be confusing and cumbersome to implement. The updated non-instance binding Resource API is designed to make it easier and more consistent to implement a data provider and interact with records across a table, while maintaining more explicit control over what data is loaded and when. +The Resource API was inspired by two major design ideas: the REST architectural design and the [Active Record pattern](https:/en.wikipedia.org/wiki/Active_record_pattern) (made popular by Ruby on Rails and heavily used as a pattern in many ORMs). The basic design goal of the Resource API is to integrate these concepts into a single construct that can directly map RESTful methods (specifically the "uniform interface" of HTTP) to an active record data model. However, while the active record pattern has been for _consumption_ of data, implementing methods for endpoint definitions and caching sources as a data _provider_ can be confusing and cumbersome to implement. The updated non-instance binding Resource API is designed to make it easier and more consistent to implement a data provider and interact with records across a table, while maintaining more explicit control over what data is loaded and when. The updated Resource API is enabled on a per-class basis by setting static `loadAsInstance` property to `false`. When this property is set to `false`, this means that the Resource instances will not be bound to a specific record. Instead instances represent the whole table, capturing the context and current transactional state. Any records in the table can be loaded or modified from `this` instance. There are a number of implications and different behaviors from a Resource class with `static loadAsInstance = false`: @@ -23,15 +27,15 @@ Previous code with a `get` method: ```javascript export class MyData extends tables.MyData { async get(query) { - let id = this.getId(); // get the id + let id = this.getId(); / get the id if (query?.size > 0) { - // check number of query parameters - let idWithQuery = id + query.toString(); // add query parameters - let resource = await tables.MyData.get(idWithQuery, this); // retrieve another record - resource.newProperty = 'value'; // assign a new value to the returned resource instance + / check number of query parameters + let idWithQuery = id + query.toString(); / add query parameters + let resource = await tables.MyData.get(idWithQuery, this); / retrieve another record + resource.newProperty = 'value'; / assign a new value to the returned resource instance return resource; } else { - this.newProperty = 'value'; // assign a new value to this instance + this.newProperty = 'value'; / assign a new value to this instance return super.get(query); } } @@ -42,19 +46,19 @@ Updated code: ```javascript export class MyData extends tables.MyData { - static loadAsInstance = false; // opt in to updated behavior + static loadAsInstance = false; / opt in to updated behavior async get(target) { - let id = target.id; // get the id + let id = target.id; / get the id let record; if (target.size > 0) { - // check number of query parameters - let idWithQuery = target.toString(); // this is the full target with the path query parameters - // we can retrieve another record from this table directly with this.get/super.get or with tables.MyData.get + / check number of query parameters + let idWithQuery = target.toString(); / this is the full target with the path query parameters + / we can retrieve another record from this table directly with this.get/super.get or with tables.MyData.get record = await super.get(idWithQuery); } else { - record = await super.get(target); // we can just directly use the target as well + record = await super.get(target); / we can just directly use the target as well } - // the record itself is frozen, but we can copy/assign to a new object with additional properties if we want + / the record itself is frozen, but we can copy/assign to a new object with additional properties if we want return { ...record, newProperty: 'value' }; } } @@ -66,11 +70,11 @@ Previous code with a `get` method: ```javascript export class MyData extends tables.MyData { allowRead(user) { - // allow any authenticated user + / allow any authenticated user return user ? true : false; } async get(query) { - // any get logic + / any get logic return super.get(query); } } @@ -78,14 +82,14 @@ export class MyData extends tables.MyData { ```javascript export class MyData extends tables.MyData { - static loadAsInstance = false; // opt in to updated behavior + static loadAsInstance = false; / opt in to updated behavior async get(target) { - // While you can still use allowRead, it is not called before get is called, and it is generally encouraged - // to perform/call authorization explicitly in direct get, put, post methods rather than using allow* methods. + / While you can still use allowRead, it is not called before get is called, and it is generally encouraged + / to perform/call authorization explicitly in direct get, put, post methods rather than using allow* methods. if (!this.getContext().user) throw new Error('Unauthorized'); - target.checkPermissions = false; // authorization complete, no need to further check permissions below - // target.checkPermissions is set to true or left in place, this default get method will perform the default permissions checks - return super.get(target); // we can just directly use the query as well + target.checkPermissions = false; / authorization complete, no need to further check permissions below + / target.checkPermissions is set to true or left in place, this default get method will perform the default permissions checks + return super.get(target); / we can just directly use the query as well } } ``` @@ -98,12 +102,12 @@ export class MyData extends tables.MyData { async post(data, query) { let resource = await tables.MyData.get(data.id, this); if (resource) { - // update a property + / update a property resource.someProperty = 'value'; - // or + / or tables.MyData.patch(data.id, { someProperty: 'value' }, this); } else { - // create a new record + / create a new record MyData.create(data, this); } } @@ -114,18 +118,18 @@ Updated code: ```javascript export class MyData extends tables.MyData { - static loadAsInstance = false; // opt in to updated behavior - // IMPORTANT: arguments are reversed: + static loadAsInstance = false; / opt in to updated behavior + / IMPORTANT: arguments are reversed: async post(target, data) { let record = await this.get(data.id); if (record) { - // update a property - const updatable = await this.update(data.id); // we can alternately pass a target to update + / update a property + const updatable = await this.update(data.id); / we can alternately pass a target to update updatable.someProperty = 'value'; - // or + / or this.patch(data.id, { someProperty: 'value' }); } else { - // create a new record + / create a new record this.create(data); } } diff --git a/docs/technical-details/reference/storage-algorithm.md b/docs/technical-details/reference/storage-algorithm.md index ff6c638d..2eccdcfa 100644 --- a/docs/technical-details/reference/storage-algorithm.md +++ b/docs/technical-details/reference/storage-algorithm.md @@ -1,6 +1,10 @@ +--- +title: Storage Algorithm +--- + # Storage Algorithm -The Harper storage algorithm is fundamental to the Harper core functionality, enabling the [Dynamic Schema](dynamic-schema.md) and all other user-facing functionality. Harper is built on top of Lightning Memory-Mapped Database (LMDB), a key-value store offering industry leading performance and functionality, which allows for our storage algorithm to store data in tables as rows/objects. This document will provide additional details on how data is stored within Harper. +The Harper storage algorithm is fundamental to the Harper core functionality, enabling the [Dynamic Schema](dynamic-schema) and all other user-facing functionality. Harper is built on top of Lightning Memory-Mapped Database (LMDB), a key-value store offering industry leading performance and functionality, which allows for our storage algorithm to store data in tables as rows/objects. This document will provide additional details on how data is stored within Harper. ## Query Language Agnostic @@ -12,7 +16,7 @@ Utilizing Multi-Version Concurrency Control (MVCC) through LMDB, Harper offers A ## Universally Indexed -All top level attributes are automatically indexed immediately upon ingestion. The [Harper Dynamic Schema](dynamic-schema.md) reflexively creates both the attribute and index reflexively as new schema metadata comes in. Indexes are agnostic of datatype, honoring the following order: booleans, numbers ordered naturally, strings ordered lexically. Within the LMDB implementation, table records are grouped together into a single LMDB environment file, where each attribute index is a sub-database (dbi) inside said environment file. An example of the indexing scheme can be seen below. +All top level attributes are automatically indexed immediately upon ingestion. The [Harper Dynamic Schema](dynamic-schema) reflexively creates both the attribute and index reflexively as new schema metadata comes in. Indexes are agnostic of datatype, honoring the following order: booleans, numbers ordered naturally, strings ordered lexically. Within the LMDB implementation, table records are grouped together into a single LMDB environment file, where each attribute index is a sub-database (dbi) inside said environment file. An example of the indexing scheme can be seen below. ## Additional LMDB Benefits @@ -20,4 +24,4 @@ Harper inherits both functional and performance benefits by implementing LMDB as ## Harper Indexing Example (Single Table) -![](../../../images/reference/HarperDB-3.0-Storage-Algorithm.png.webp) +![](/reference/HarperDB-3.0-Storage-Algorithm.png.webp) diff --git a/docs/technical-details/reference/transactions.md b/docs/technical-details/reference/transactions.md index 313fe17f..11a8f4dc 100644 --- a/docs/technical-details/reference/transactions.md +++ b/docs/technical-details/reference/transactions.md @@ -1,3 +1,7 @@ +--- +title: Transactions +--- + # Transactions Transactions are an important part of robust handling of data in data-driven applications. Harper provides ACID-compliant support for transactions, allowing for guaranteed atomic, consistent, and isolated data handling within transactions, with durability guarantees on commit. Understanding how transactions are tracked and behave is important for properly leveraging transactional support in Harper. For most operations this is very intuitive, each HTTP request is executed in a transaction, so when multiple actions are executed in a single request, they are normally automatically included in the same transaction. @@ -15,7 +19,7 @@ This executes the callback in a transaction, providing a context that can be use ```javascript import { tables } from 'harperdb'; const { MyTable } = tables; -if (isMainThread) // only on main thread +if (isMainThread) / only on main thread setInterval(async () => { let someData = await (await fetch(... some URL ...)).json(); transaction((txn) => { @@ -23,7 +27,7 @@ if (isMainThread) // only on main thread MyTable.put(item, txn); } }); - }, 3600000); // every hour + }, 3600000); / every hour ``` You can provide your own context object for the transaction to attach to. If you call `transaction` with a context that already has a transaction started, it will simply use the current transaction, execute the callback and immediately return (this can be useful for ensuring that a transaction has started). diff --git a/docs/technical-details/release-notes/1.alby/1.1.0.md b/docs/technical-details/release-notes/1.alby/1.1.0.md deleted file mode 100644 index d3b6a431..00000000 --- a/docs/technical-details/release-notes/1.alby/1.1.0.md +++ /dev/null @@ -1,67 +0,0 @@ -### HarperDB 1.1.0, Alby Release - -4/18/2018 - -**Features** - -- Users & Roles: - - Limit/Assign access to all HarperDB operations - - - Limit/Assign access to schemas, tables & attributes - - - Limit/Assign access to specific SQL operations (`INSERT`, `UPDATE`, `DELETE`, `SELECT`) - -- Enhanced SQL parser - - Added extensive ANSI SQL Support. - - Added Array function, which allows for converting relational data into Object/Hierarchical data - - `Distinct_Array` Function: allows for removing duplicates in the Array function. - - Enhanced SQL Validation: Improved validation around structure of SQL, validating the schema, etc.. - - 10x performance improvement on SQL statements. - -- Export Function: can now call a NoSQL/SQL search and have it export to CSV or JSON. - -- Added upgrade function to CLI - -- Added ability to perform bulk update from CSV - -- Created landing page for HarperDB. - -- Added CORS support to HarperDB - -**Fixes** - -- Fixed memory leak in CSV bulk loads - -- Corrected error when attempting to perform a `SQL DELETE` - -- Added further validation to NoSQL `UPDATE` to validate schema & table exist - -- Fixed install issue occurring when part of the install path does not exist, the install would silently fail. - -- Fixed issues with replicated data when one of the replicas is down - -- Removed logging of initial user’s credentials during install - -- Can now use reserved words as aliases in SQL - -- Removed user(s) password in results when calling `list_users` - -- Corrected forwarding of operations to other nodes in a cluster - -- Corrected lag in schema meta-data passing to other nodes in a cluster - -- Drop table & schema now move the table & schema or table to the trash folder under the Database folder for later permanent deletion. - -- Bulk inserts no longer halt the entire operation if n records already exist, instead the return includes the hashes of records that have been skipped. - -- Added ability to accept EULA from command line - -- Corrected `search_by_value` not searching on the correct attribute - -- Added ability to increase the timeout of a request by adding `SERVER_TIMEOUT_MS` to config/settings.js - -- Add error handling resulting from SQL calculations. - -- Standardized error responses as JSON. - -- Corrected internal process generation to not allow more processes than machine has cores. diff --git a/docs/technical-details/release-notes/1.alby/1.2.0.md b/docs/technical-details/release-notes/1.alby/1.2.0.md deleted file mode 100644 index 259890cd..00000000 --- a/docs/technical-details/release-notes/1.alby/1.2.0.md +++ /dev/null @@ -1,37 +0,0 @@ -### HarperDB 1.2.0, Alby Release - -7/10/2018 - -**Features** - -- Time to Live: Conserve the resources of your edge device by setting data on devices to live for a specific period of time. -- Geo: HarperDB has implemented turf.js into its SQL parser to enable geo based analytics. -- Jobs: CSV Data loads, Exports & Time to Live now all run as back ground jobs. -- Exports: Perform queries that export into JSON or CSV and save to disk or S3. - -**Fixes** - -- Fixed issue where CSV data loads incorrectly report number of records loaded. -- Added validation to stop `BETWEEN` operations in SQL. -- Updated logging to not include internal variables in the logs. -- Cleaned up `add_role` response to not include internal variables. -- Removed old and unused dependencies. -- Build out further unit tests and integration tests. -- Fixed https to handle certificates properly. -- Improved stability of clustering & replication. -- Corrected issue where Objects and Arrays were not casting properly in `SQL SELECT` response. -- Fixed issue where Blob text was not being returned from `SQL SELECT`s. -- Fixed error being returned when querying on table with no data, now correctly returns empty array. -- Improved performance in SQL when searching on exact values. -- Fixed error when ./harperdb stop is called. -- Fixed logging issue causing instability in installer. -- Fixed `read_log` operation to accept date time. -- Added permissions checking to `export_to_s3`. -- Added ability to run SQL on `SELECT` without a `FROM`. -- Fixed issue where updating a user’s password was not encrypting properly. -- Fixed `user_guide.html` to point to readme on git repo. -- Created option to have HarperDB run as a foreground process. -- Updated `user_info` to return the correct role for a user. -- Fixed issue where HarperDB would not stop if the database root was deleted. -- Corrected error message on insert if an invalid schema is provided. -- Added permissions checks for user & role operations. diff --git a/docs/technical-details/release-notes/1.alby/1.3.0.md b/docs/technical-details/release-notes/1.alby/1.3.0.md deleted file mode 100644 index 30043b32..00000000 --- a/docs/technical-details/release-notes/1.alby/1.3.0.md +++ /dev/null @@ -1,22 +0,0 @@ -### HarperDB 1.3.0, Alby Release - -11/2/2018 - -**Features** - -- Upgrade: Upgrade to newest version via command line. -- SQL Support: Added `IS NULL` for SQL parser. -- Added attribute validation to search operations. - -**Fixes** - -- Fixed `SELECT` calculations, i.e. `SELECT` 2+2. -- Fixed select OR not returning expected results. -- No longer allowing reserved words for schema and table names. -- Corrected process interruptions from improper SQL statements. -- Improved message handling between spawned processes that replace killed processes. -- Enhanced error handling for updates to tables that do not exist. -- Fixed error handling for NoSQL responses when `get_attributes` is provided with invalid attributes. -- Fixed issue with new columns not being updated properly in update statements. -- Now validating roles, tables and attributes when creating or updating roles. -- Fixed an issue where in some cases `undefined` was being returned after dropping a role diff --git a/docs/technical-details/release-notes/1.alby/1.3.1.md b/docs/technical-details/release-notes/1.alby/1.3.1.md deleted file mode 100644 index c0190f86..00000000 --- a/docs/technical-details/release-notes/1.alby/1.3.1.md +++ /dev/null @@ -1,24 +0,0 @@ -### HarperDB 1.3.1, Alby Release - -2/26/2019 - -**Features** - -- Clustering connection direction appointment -- Foundations for threading/multi processing -- UUID autogen for hash attributes that were not provided -- Added cluster status operation - -**Bug Fixes and Enhancements** - -- More logging -- Clustering communication enhancements -- Clustering queue ordering by timestamps -- Cluster re connection enhancements -- Number of system core(s) detection -- Node LTS (10.15) compatibility -- Update/Alter users enhancements -- General performance enhancements -- Warning is logged if different versions of harperdb are connected via clustering -- Fixed need to restart after user creation/alteration -- Fixed SQL error that occurred on selecting from an empty table diff --git a/docs/technical-details/release-notes/1.alby/README.md b/docs/technical-details/release-notes/1.alby/README.md deleted file mode 100644 index 59047eb3..00000000 --- a/docs/technical-details/release-notes/1.alby/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# HarperDB Alby (Version 1) - -Did you know our release names are dedicated to employee pups? For our first release, Alby was our pup. - -Here is a bit about Alby: - -![picture of black dog](../../../../images/dogs/alby.webp) - -_Hi, I am Alby. My mom is Kaylan Stock, Director of Marketing at HarperDB. I am a 9-year-old Great Dane mix who loves sun bathing, going for swims, and wreaking havoc on the local squirrels. My favorite snack is whatever you are eating, and I love a good butt scratch!_ diff --git a/docs/technical-details/release-notes/2.penny/2.1.1.md b/docs/technical-details/release-notes/2.penny/2.1.1.md deleted file mode 100644 index eeaedf40..00000000 --- a/docs/technical-details/release-notes/2.penny/2.1.1.md +++ /dev/null @@ -1,23 +0,0 @@ -### HarperDB 2.1.1, Penny Release - -05/22/2020 - -**Highlights** - -- CORE-1007 Added the ability to perform `SQL INSERT` & `UPDATE` with function calls & expressions on values. -- CORE-1023 Fixed minor bug in final SQL step incorrectly trying to translate ordinals to alias in `ORDER BY` statement. -- CORE-1020 Fixed bug allowing 'null' and 'undefined' string values to be passed in as valid hash values. -- CORE-1006 Added SQL functionality that enables `JOIN` statements across different schemas. -- CORE-1005 Implemented JSONata library to handle our JSON document search functionality in SQL, creating the `SEARCH_JSON` function. -- CORE-1009 Updated schema validation to allow all printable ASCII characters to be used in schema/table/attribute names, except, forward slashes and backticks. Same rules apply now for hash attribute values. -- CORE-1003 Fixed handling of ORDER BY statements with function aliases. -- CORE-1004 Fixed bug related to `SELECT*` on `JOIN` queries with table columns with the same name. -- CORE-996 Fixed an issue where the `transact_to_cluster` flag is lost for CSV URL loads, fixed an issue where new attributes created in CSV bulk load do not sync to the cluster. -- CORE-994 Added new operation `system_information`. This operation returns info & metrics for the OS, time, memory, cpu, disk, network. -- CORE-993 Added new custom date functions for AlaSQL & UTC updates. -- CORE-991 Changed jobs to spawn a new process which will run the intended job without impacting a main HarperDB process. -- CORE-992 HTTPS enabled by default. -- CORE-990 Updated `describe_table` to add the record count for the table for LMDB data storage. -- CORE-989 Killed the socket cluster processes prior to HarperDB processes to eliminate a false uptime. -- CORE-975 Updated time values set by SQL Date Functions to be in epoch format. -- CORE-974 Added date functions to `SQL SELECT` column alias functionality. diff --git a/docs/technical-details/release-notes/2.penny/2.2.0.md b/docs/technical-details/release-notes/2.penny/2.2.0.md deleted file mode 100644 index 5888912e..00000000 --- a/docs/technical-details/release-notes/2.penny/2.2.0.md +++ /dev/null @@ -1,39 +0,0 @@ -### HarperDB 2.2.0, Penny Release - -08/24/2020 - -**Features/Updates** - -- CORE-997 Updated the data format for CSV data loads being sync'd across a cluster to take up less resources -- CORE-1018 Adds SQL functionality for `BETWEEN` statements -- CORE-1032 Updates permissions to allow regular users (i.e. non-super users) to call the `get_job` operation -- CORE-1036 On create/drop table we auto create/drop the related transactions environments for the schema.table -- CORE-1042 Built raw functions to write to a tables transaction log for insert/update/delete operations -- CORE-1057 Implemented write transaction into lmdb create/update/delete functions -- CORE-1048 Adds `SEARCH` wildcard handling for role permissions standards -- CORE-1059 Added config setting to disable transaction logging for an instance -- CORE-1076 Adds permissions filter to describe operations -- CORE-1043 Change clustering catchup to use the new transaction log -- CORE-1052 Removed word "master" from source -- CORE-1061 Added new operation called `delete_transactions_before` this will tail a transaction log for a specific schema / table -- CORE-1040 On HarperDB startup make sure all tables have a transaction environment -- CORE-1055 Added 2 new setting to change the server headersTimeout & keepAliveTimeout from the config file -- CORE-1044 Created new operation `read_transaction_log` which will allow a user to get transactions for a table by `timestamp`, `username`, or `hash_value` -- CORE-1043 Change clustering catchup to use the new transaction log -- CORE-1089 Added new attribute to `system_information` for table/transaction log data size in bytes & transaction log record count -- CORE-1101 Fix to store empty strings rather than considering them null & fix to be able to search on empty strings in SQL/NoSQL. -- CORE-1054 Updates permissions object to remove delete attribute permission and update table attribute permission key to `attribute_permissions` -- CORE-1092 Do not allow the `__createdtime__` to be updated -- CORE-1085 Updates create schema/table & drop schema/table/attribute operations permissions to require super user role and adds integration tests to validate -- CORE-1071 Updates response messages and status codes from `describe_schema` and `describe_table` operations to provide standard language/status code when a schema item is not found -- CORE-1049 Updates response message for SQL update op with no matching rows -- CORE-1096 Added tracking of the origin in the transaction log. This origin object stores the node name, timestamp of the transaction from the originating node & the user. - -**Bug Fixes** - -- CORE-1028 Fixes bug for simple `SQL SELECT` queries not returning aliases and incorrectly returning hash values when not requested in query -- CORE-1037 Fixed an issue where numbers with leading zero i.e. 00123 are converted to numbers rather than being honored as strings. -- CORE-1063 Updates permission error response shape to consolidate issues into individual objects per schema/table combo -- CORE-1098 Fixed an issue where transaction environments were remaining in the global cache after being dropped. -- CORE-1086 Fixed issue where responses from insert/update were incorrect with skipped records. -- CORE-1079 Fixes SQL bugs around invalid schema/table and special characters in `WHERE` clause diff --git a/docs/technical-details/release-notes/2.penny/2.2.2.md b/docs/technical-details/release-notes/2.penny/2.2.2.md deleted file mode 100644 index 46e822b0..00000000 --- a/docs/technical-details/release-notes/2.penny/2.2.2.md +++ /dev/null @@ -1,12 +0,0 @@ -### HarperDB 2.2.2, Penny Release - -10/27/2020 - -- CORE-1154 Allowed transaction logging to be disabled even if clustering is enabled. -- CORE-1153 Fixed issue where `delete_files_before` was writing to transaction log. -- CORE-1152 Fixed issue where no more than 4 HarperDB forks would be created. -- CORE-1112 Adds handling for system timestamp attributes in permissions. -- CORE-1131 Adds better handling for checking perms on operations with action value in JSON. -- CORE-1113 Fixes validation bug checking for super user/cluster user permissions and other permissions. -- CORE-1135 Adds validation for valid keys in role API operations. -- CORE-1073 Adds new `import_from_s3` operation to API. diff --git a/docs/technical-details/release-notes/2.penny/2.2.3.md b/docs/technical-details/release-notes/2.penny/2.2.3.md deleted file mode 100644 index aadf55d9..00000000 --- a/docs/technical-details/release-notes/2.penny/2.2.3.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 2.2.3, Penny Release - -11/16/2020 - -- CORE-1158 Performance improvements to core delete function and configuration of `delete_files_before` to run in batches with a pause into between. diff --git a/docs/technical-details/release-notes/2.penny/2.3.0.md b/docs/technical-details/release-notes/2.penny/2.3.0.md deleted file mode 100644 index e3c7a724..00000000 --- a/docs/technical-details/release-notes/2.penny/2.3.0.md +++ /dev/null @@ -1,18 +0,0 @@ -### HarperDB 2.3.0, Penny Release - -12/03/2020 - -**Features/Updates** - -- CORE-1191, CORE-1190, CORE-1125, CORE-1157, CORE-1126, CORE-1140, CORE-1134, CORE-1123, CORE-1124, CORE-1122 Added JWT Authentication option (See documentation for more information) -- CORE-1128, CORE-1143, CORE-1140, CORE-1129 Added `upsert` operation -- CORE-1187 Added `get_configuration` operation which allows admins to view their configuration settings. -- CORE-1175 Added new internal LMDB function to copy an environment for use in future features. -- CORE-1166 Updated packages to address security vulnerabilities. - -**Bug Fixes** - -- CORE-1195 Modified `drop_attribute` to drop after data cleanse completes. -- CORE-1149 Fix SQL bug regarding self joins and updates alasql to 0.6.5 release. -- CORE-1168 Fix inconsistent invalid schema/table errors. -- CORE-1162 Fix bug which caused `delete_files_before` to cause tables to grow in size due to an open cursor issue. diff --git a/docs/technical-details/release-notes/2.penny/2.3.1.md b/docs/technical-details/release-notes/2.penny/2.3.1.md deleted file mode 100644 index 5877b3ba..00000000 --- a/docs/technical-details/release-notes/2.penny/2.3.1.md +++ /dev/null @@ -1,8 +0,0 @@ -### HarperDB 2.3.1, Penny Release - -1/29/2021 - -**Bug Fixes** - -- CORE-1218 A bug in HarperDB 2.3.0 was identified related to manually calling the `create_attribute` operation. This bug caused secondary indexes to be overwritten by the most recently inserted or updated value for the index, thereby causing a search operation filtered with that index to only return the most recently inserted/updated row. Note, this issue does not affect attributes that are reflexively/automatically created. It only affects attributes created using `create_attribute`. To resolve this issue in 2.3.0 or earlier, drop and recreate your table using reflexive attribute creation. In 2.3.1, drop and recreate your table and use either reflexive attribute creation or `create_attribute`. -- CORE-1219 Increased maximum table attributes from 1000 to 10000 diff --git a/docs/technical-details/release-notes/2.penny/README.md b/docs/technical-details/release-notes/2.penny/README.md deleted file mode 100644 index 904477f9..00000000 --- a/docs/technical-details/release-notes/2.penny/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# HarperDB Penny (Version 2) - -Did you know our release names are dedicated to employee pups? For our second release, Penny was the star. - -Here is a bit about Penny: - -![picture of brindle dog](../../../../images/dogs/penny.webp) - -_Hi I am Penny! My dad is Kyle Bernhardy, the CTO of HarperDB. I am a nine-year-old Whippet who lives for running hard and fast while exploring the beautiful terrain of Colorado. My favorite activity is chasing birds along with afternoon snoozes in a sunny spot in my backyard._ diff --git a/docs/technical-details/release-notes/3.monkey/3.0.0.md b/docs/technical-details/release-notes/3.monkey/3.0.0.md deleted file mode 100644 index 43376fc8..00000000 --- a/docs/technical-details/release-notes/3.monkey/3.0.0.md +++ /dev/null @@ -1,27 +0,0 @@ -### HarperDB 3.0, Monkey Release - -5/18/2021 - -**Features/Updates** - -- CORE-1217, CORE-1226, CORE-1232 Create new `search_by_conditions` operation. -- CORE-1304 Upgrade to Node 12.22.1. -- CORE-1235 Adds new upgrade/install functionality. -- CORE-1206, CORE-1248, CORE-1252 Implement `lmdb-store` library for optimized performance. -- CORE-1062 Added alias operation for `delete_files_before`, named `delete_records_before`. -- CORE-1243 Change `HTTPS_ON` settings value to false by default. -- CORE-1189 Implement fastify web server, resulting in improved performance. -- CORE-1221 Update user API to use role name instead of role id. -- CORE-1225 Updated dependencies to eliminate npm security warnings. -- CORE-1241 Adds 3.0 update directive and refactors/fixes update functionality. - -**Bug Fixes** - -- CORE-1299 Remove all references to the `PROJECT_DIR` setting. This setting is problematic when using node version managers and upgrading the version of node and then installing a new instance of HarperDB. -- CORE-1288 Fix bug with drop table/schema that was causing 'env required' error log. -- CORE-1285 Update warning log when trying to create an attribute that already exists. -- CORE-1254 Added logic to manage data collisions in clustering. -- CORE-1212 Add pre-check to `drop_user` that returns error if user doesn't exist. -- CORE-1114 Update response code and message from `add_user` when user already exists. -- CORE-1111 Update response from `create_attribute` to match the create schema/table response. -- CORE-1205 Fixed bug that prevented schema/table from being dropped if name was a number or had a wildcard value in it. Updated validation for insert, upsert and update. diff --git a/docs/technical-details/release-notes/3.monkey/3.1.0.md b/docs/technical-details/release-notes/3.monkey/3.1.0.md deleted file mode 100644 index 4a41daf4..00000000 --- a/docs/technical-details/release-notes/3.monkey/3.1.0.md +++ /dev/null @@ -1,19 +0,0 @@ -### HarperDB 3.1.0, Monkey Release - -8/24/2021 - -**Features/Updates** - -- CORE-1320, CORE-1321, CORE-1323, CORE-1324 Version 1.0 of HarperDB Custom Functions -- CORE-1275, CORE-1276, CORE-1278, CORE-1279, CORE-1280, CORE-1282, CORE-1283, CORE-1305, CORE-1314 IPC server for communication between HarperDB processes, including HarperDB, HarperDB Clustering, and HarperDB Functions -- CORE-1352, CORE-1355, CORE-1356, CORE-1358 Implement pm2 for HarperDB process management -- CORE-1292, CORE-1308, CORE-1312, CORE-1334, CORE-1338 Updated installation process to start HarperDB immediately on install and to accept all config settings via environment variable or command line arguments -- CORE-1310 Updated licensing functionality -- CORE-1301 Updated validation for performance improvement -- CORE-1359 Add `hdb-response-time` header which returns the HarperDB response time in milliseconds -- CORE-1330, CORE-1309 New config settings: `LOG_TO_FILE`, `LOG_TO_STDSTREAMS`, `IPC_SERVER_PORT`, `RUN_IN_FOREGROUND`, `CUSTOM_FUNCTIONS`, `CUSTOM_FUNCTIONS_PORT`, `CUSTOM_FUNCTIONS_DIRECTORY`, `MAX_CUSTOM_FUNCTION_PROCESSES` - -**Bug Fixes** - -- CORE-1315 Corrected issue in HarperDB restart scenario -- CORE-1370 Update some of the validation error handlers so that they don't log full stack diff --git a/docs/technical-details/release-notes/3.monkey/3.1.1.md b/docs/technical-details/release-notes/3.monkey/3.1.1.md deleted file mode 100644 index 4733f103..00000000 --- a/docs/technical-details/release-notes/3.monkey/3.1.1.md +++ /dev/null @@ -1,14 +0,0 @@ -### HarperDB 3.1.1, Monkey Release - -9/23/2021 - -**Features/Updates** - -- CORE-1393 Added utility function to add settings from env/cmd vars to the settings file on every run/restart -- CORE-1395 Create a setting which will allow to enable the local Studio to be served from an instance of HarperDB -- CORE-1397 Update the stock 404 response to not return the request URL -- General updates to optimize Docker container - -**Bug Fixes** - -- CORE-1399 Added fixes for complex SQL alias issues diff --git a/docs/technical-details/release-notes/3.monkey/3.1.2.md b/docs/technical-details/release-notes/3.monkey/3.1.2.md deleted file mode 100644 index d07d9993..00000000 --- a/docs/technical-details/release-notes/3.monkey/3.1.2.md +++ /dev/null @@ -1,11 +0,0 @@ -### HarperDB 3.1.2, Monkey Release - -10/21/2021 - -**Features/Updates** - -- Updated the installation ASCII art to reflect the new HarperDB logo - -**Bug Fixes** - -- CORE-1408 Corrects issue where `drop_attribute` was not properly setting the LMDB version number causing tables to behave unexpectedly diff --git a/docs/technical-details/release-notes/3.monkey/3.1.3.md b/docs/technical-details/release-notes/3.monkey/3.1.3.md deleted file mode 100644 index 72c1ba84..00000000 --- a/docs/technical-details/release-notes/3.monkey/3.1.3.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 3.1.3, Monkey Release - -1/14/2022 - -**Bug Fixes** - -- CORE-1446 Fix for scans on indexes larger than 1 million entries causing queries to never return diff --git a/docs/technical-details/release-notes/3.monkey/3.1.4.md b/docs/technical-details/release-notes/3.monkey/3.1.4.md deleted file mode 100644 index f4f94715..00000000 --- a/docs/technical-details/release-notes/3.monkey/3.1.4.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 3.1.4, Monkey Release - -2/24/2022 - -**Features/Updates** - -- CORE-1460 Added new setting `STORAGE_WRITE_ASYNC`. If this setting is true, LMDB will have faster write performance at the expense of not being crash safe. The default for this setting is false, which results in HarperDB being crash safe. diff --git a/docs/technical-details/release-notes/3.monkey/3.1.5.md b/docs/technical-details/release-notes/3.monkey/3.1.5.md deleted file mode 100644 index 9873984a..00000000 --- a/docs/technical-details/release-notes/3.monkey/3.1.5.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 3.1.5, Monkey Release - -3/4/2022 - -**Features/Updates** - -- CORE-1498 Fixed incorrect autocasting of string that start with "0." that tries to convert to number but instead returns NaN. diff --git a/docs/technical-details/release-notes/3.monkey/3.2.0.md b/docs/technical-details/release-notes/3.monkey/3.2.0.md deleted file mode 100644 index a76c0ec2..00000000 --- a/docs/technical-details/release-notes/3.monkey/3.2.0.md +++ /dev/null @@ -1,9 +0,0 @@ -### HarperDB 3.2.0, Monkey Release - -3/25/2022 - -**Features/Updates** - -- CORE-1391 Bug fix related to orphaned HarperDB background processes. -- CORE-1509 Updated node version check, updated Node.js version, updated project dependencies. -- CORE-1518 Remove final call from logger. diff --git a/docs/technical-details/release-notes/3.monkey/3.2.1.md b/docs/technical-details/release-notes/3.monkey/3.2.1.md deleted file mode 100644 index b2518e56..00000000 --- a/docs/technical-details/release-notes/3.monkey/3.2.1.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 3.2.1, Monkey Release - -6/1/2022 - -**Features/Updates** - -- CORE-1573 Added logic to track the pid of the foreground process if running in foreground. Then on stop, use that pid to kill the process. Logic was also added to kill the pm2 daemon when stop is called. diff --git a/docs/technical-details/release-notes/3.monkey/3.3.0.md b/docs/technical-details/release-notes/3.monkey/3.3.0.md deleted file mode 100644 index b3769195..00000000 --- a/docs/technical-details/release-notes/3.monkey/3.3.0.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 3.3.0 - Monkey - -- CORE-1595 Added new role type `structure_user`, this enables non-superusers to be able to create/drop schema/table/attribute. -- CORE-1501 Improved performance for drop_table. -- CORE-1599 Added two new operations for custom functions `install_node_modules` & `audit_node_modules`. -- CORE-1598 Added `skip_node_modules` flag to `package_custom_function_project` operation. This flag allows for not bundling project dependencies and deploying a smaller project to other nodes. Use this flag in tandem with `install_node_modules`. -- CORE-1707 Binaries are now included for Linux on AMD64, Linux on ARM64, and macOS. GCC, Make, Python are no longer required when installing on these platforms. diff --git a/docs/technical-details/release-notes/3.monkey/README.md b/docs/technical-details/release-notes/3.monkey/README.md deleted file mode 100644 index e5431a1a..00000000 --- a/docs/technical-details/release-notes/3.monkey/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# HarperDB Monkey (Version 3) - -Did you know our release names are dedicated to employee pups? For our third release, we have Monkey. - -![picture of tan dog](../../../../images/dogs/monkey.webp) - -_Hi, I am Monkey, a.k.a. Monk, a.k.a. Monchichi. My dad is Aron Johnson, the Director of DevOps at HarperDB. I am an eight-year-old Australian Cattle dog mutt whose favorite pastime is hunting and collecting tennis balls from the park next to her home. I love burrowing in the Colorado snow, rolling in the cool grass on warm days, and cheese!_ diff --git a/docs/technical-details/release-notes/4.tucker/4.0.0.md b/docs/technical-details/release-notes/4.tucker/4.0.0.md deleted file mode 100644 index 6c8f5f9c..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.0.0.md +++ /dev/null @@ -1,126 +0,0 @@ -### HarperDB 4.0.0, Tucker Release - -11/2/2022 - -**Networking & Data Replication (Clustering)** - -The HarperDB clustering internals have been rewritten and the underlying technology for Clustering has been completely replaced with [NATS](https://nats.io/), an enterprise grade connective technology responsible for addressing, discovery and exchanging of messages that drive the common patterns in distributed systems. - -- CORE-1464, CORE-1470, : Remove SocketCluster dependencies and all code related to them. -- CORE-1465, CORE-1485, CORE-1537, CORE-1538, CORE-1558, CORE-1583, CORE_1665, CORE-1710, CORE-1801, CORE-1865 :Add nats-`server` code as dependency, on install of HarperDB download nats-`server` is possible else fallback to building from source code. -- CORE-1593, CORE-1761: Add `nats.js` as project dependency. -- CORE-1466: Build NATS configs on `harperdb run` based on HarperDB YAML configuration. -- CORE-1467, CORE-1508: Launch and manage NATS servers with PM2. -- CORE-1468, CORE-1507: Create a process which reads the work queue stream and processes transactions. -- CORE-1481, CORE-1529, CORE-1698, CORE-1502, CORE-1696: On upgrade to 4.0, update pre-existing clustering configurations, create table transaction streams, create work queue stream, update `hdb_nodes` table, create clustering folder structure, and rebuild self-signed certs. -- CORE-1494, CORE-1521, CORE-1755: Build out internals to interface with NATS. -- CORE-1504: Update existing hooks to save transactions to work with NATS. -- CORE-1514, CORE-1515, CORE-1516, CORE-1527, CORE-1532: Update `add_node`, `update_node`, and `remove_node` operations to no longer need host and port in payload. These operations now manage dynamically sourcing of table level transaction streams between nodes and work queues. -- CORE-1522: Create `NATSReplyService` process which handles the receiving NATS based requests from remote instances and sending back appropriate responses. -- CORE-1471, CORE-1568, CORE-1563, CORE-1534, CORE-1569: Update `cluster_status` operation. -- CORE-1611: Update pre-existing transaction log operations to be audit log operations. -- CORE-1541, CORE-1612, CORE-1613: Create translation log operations which interface with streams. -- CORE-1668: Update NATS serialization / deserialization to use MessagePack. -- CORE-1673: Add `system_info` param to `hdb_nodes` table and update on `add_node` and `cluster_status`. -- CORE-1477, CORE-1493, CORE-1557, CORE-1596, CORE-1577: Both a full HarperDB restart & just clustering restart call the NATS server with a reload directive to maintain full uptime while servers refresh. -- CORE-1474:HarperDB install adds clustering folder structure. -- CORE-1530: Post `drop_table` HarperDB purges the related transaction stream. -- CORE-1567: Set NATS config to always use TLS. -- CORE-1543: Removed the `transact_to_cluster` attribute from the bulk load operations. Now bulk loads always replicate. -- CORE-1533, CORE-1556, CORE-1561, CORE-1562, CORE-1564: New operation `configure_cluster`, this operation enables bulk publishing and subscription of multiple tables to multiple instances of HarperDB. -- CORE-1535: Create work queue stream on install of HarperDB. This stream receives transactions from remote instances of HarperDB which are then ingested in order. -- CORE-1551: Create transaction streams on the remote node if they do not exist when performing `add_node` or `update_node`. -- CORE-1594, CORE-1605, CORE-1749, CORE-1767, CORE-1770: Optimize the work queue stream and its consumer to be more performant and validate exact once delivery. -- CORE-1621, CORE-1692, CORE-1570, CORE-1693: NATS stream names are MD5 hashed to avoid characters that HarperDB allows, but NATS may not. -- CORE-1762: Add a new optional attribute to `add_node` and `update_node` named `opt_start_time`. This attribute sets a starting time to start synchronizing transactions. -- CORE-1785: Optimizations and bug fixes in regards to sourcing data from remote instances on HarperDB. -- CORE-1588: Created new operation `set_cluster_routes` to enable setting routes for instances of HarperDB to mesh together. -- CORE-1589: Created new operation `get_cluster_routes` to allow for retrieval of routes used to connect the instance of HarperDB to the mesh. -- CORE-1590: Created new operation `delete_cluster_routes` to allow for removal of routes used to connect the instance of HarperDB to the mesh. -- CORE-1667: Fix old environment variable `CLUSTERING_PORT` not mapping to new hub server port. -- CORE-1609: Allow `remove_node` to be called when the other node cannot be reached. -- CORE-1815: Add transaction lock to `add_node` and `update_node` to avoid concurrent nats source update bug. -- CORE-1848: Update stream configs if the node name has been changed in the YAML configuration. -- CORE-1873: Update `add_node` and `update_node` so that it auto-creates schema/table on both local and remote node respectively - -**Data Storage** - -We have made improvements to how we store, index, and retrieve data. - -- CORE-1619: Enabled new concurrent flushing technology for improved write performance. -- CORE-1701: Optimize search performance for `search_by_conditions` when executing multiple AND conditions. -- CORE-1652: Encode the values of secondary indices more efficiently for faster access. -- CORE-1670: Store updated timestamp in `lmdb.js`' version property. -- CORE-1651: Enabled multiple value indexing of array values which allows for the ability to search on specific elements in an array more efficiently. -- CORE-1649, CORE-1659: Large text values (larger than 255 bytes) are no longer stored in separate blob index. Now they are segmented and delimited in the same index to increase search performance. -- Complex objects and object arrays are no longer stored in a separate index to preserve storage and increase write throughput. -- CORE-1650, CORE-1724, CORE-1738: Improved internals around interpreting attribute values. -- CORE-1657: Deferred property decoding allows large objects to be stored, but individual attributes can be accessed (like with get_attributes) without incurring the cost of decoding the entire object. -- CORE-1658: Enable in-memory caching of records for even faster access to frequently accessed data. -- CORE-1693: Wrap updates in async transactions to ensure ACID-compliant updates. -- CORE-1653: Upgrade to 4.0 rebuilds tables to reflect changes made to index improvements. -- CORE-1753: Removed old `node-lmdb` dependency. -- CORE-1787: Freeze objects returned from queries. -- CORE-1821: Read the `WRITE_ASYNC` setting which enables LMDB nosync. - -**Logging** - -HarperDB has increased logging specificity by breaking out logs based on components logging. There are specific log files each for HarperDB Core, Custom Functions, Hub Server, Leaf Server, and more. - -- CORE-1497: Remove `pino` and `winston` dependencies. -- CORE-1426: All logging is output via `stdout` and `stderr`, our default logging is then picked up by PM2 which handles writing out to file. -- CORE-1431: Improved `read_log` operation validation. -- CORE-1433, CORE-1463: Added log rotation. -- CORE-1553, CORE-1555, CORE-1552, CORE-1554, CORE-1704: Performance gain by only serializing objects and arrays if the log is for the level defined in configuration. -- CORE-1436: Upgrade to 4.0 updates internals for logging changes. -- CORE-1428, CORE-1440, CORE-1442, CORE-1434, CORE-1435, CORE-1439, CORE-1482, CORE-1751, CORE-1752: Bug fixes, performance improvements and improved unit tests. -- CORE-1691: Convert non-PM2 managed log file writes to use Node.js `fs.appendFileSync` function. - -**Configuration** - -HarperDB has updated its configuration from a properties file to YAML. - -- CORE-1448, CORE-1449, CORE-1519, CORE-1587: Upgrade automatically converts the pre-existing settings file to YAML. -- CORE-1445, CORE-1534, CORE-1444, CORE-1858: Build out new logic to create, update, and interpret the YAML configuration file. -- Installer has updated prompts to reflect YAML settings. -- CORE-1447: Create an alias for the `configure_cluster` operation as `set_configuration`. -- CORE-1461, CORE-1462, CORE-1483: Unit test improvements. -- CORE-1492: Improvements to get_configuration and set_configuration operations. -- CORE-1503: Modify HarperDB configuration for more granular certificate definition. -- CORE-1591: Update `routes` IP param to `host` and to `leaf` config in `harperdb.conf` -- CORE-1519: Fix issue when switching between old and new versions of HarperDB we are getting the config parameter is undefined error on npm install. - -**Broad NodeJS and Platform Support** - -- CORE-1624: HarperDB can now run on multiple versions of NodeJS, from v14 to v19. We primarily test on v18, so that is the preferred version. - -**Windows 10 and 11** - -- CORE-1088: HarperDB now runs natively on Windows 10 and 11 without the need to run in a container or installed in WSL. Windows is only intended for evaluation and development purposes, not for production work loads. - -**Extra Changes and Bug Fixes** - -- CORE-1520: Refactor installer to remove all waterfall code and update to use Promises. -- CORE-1573: Stop the PM2 daemon and any logging processes when stopping hdb. -- CORE-1586: When HarperDB is running in foreground stop any additional logging processes from being spawned. -- CORE-1626: Update docker file to accommodate new `harperdb.conf` file. -- CORE-1592, CORE-1526, CORE-1660, CORE-1646, CORE-1640, CORE-1689, CORE-1711, CORE-1601, CORE-1726, CORE-1728, CORE-1736, CORE-1735, CORE-1745, CORE-1729, CORE-1748, CORE-1644, CORE-1750, CORE-1757, CORE-1727, CORE-1740, CORE-1730, CORE-1777, CORE-1778, CORE-1782, CORE-1775, CORE-1771, CORE-1774, CORE-1759, CORE-1772, CORE-1861, CORE-1862, CORE-1863, CORE-1870, CORE-1869:Changes for CI/CD pipeline and integration tests. -- CORE-1661: Fixed issue where old boot properties file caused an error when attempting to install 4.0.0. -- CORE-1697, CORE-1814, CORE-1855: Upgrade fastify dependency to new major version 4. -- CORE-1629: Jobs are now running as processes managed by the PM2 daemon. -- CORE-1733: Update LICENSE to reflect our EULA on our site. -- CORE-1606: Enable Custom Functions by default. -- CORE-1714: Include pre-built binaries for most common platforms (darwin-arm64, darwin-x64, linux-arm64, linux-x64, win32-x64). -- CORE-1628: Fix issue where setting license through environment variable not working. -- CORE-1602, CORE-1760, CORE-1838, CORE-1839, CORE-1847, CORE-1773: HarperDB Docker container improvements. -- CORE-1706: Add support for encoding HTTP responses with MessagePack. -- CORE-1709: Improve the way lmdb.js dependencies are installed. -- CORE-1758: Remove/update unnecessary HTTP headers. -- CORE-1756: On `npm install` and `harperdb install` change the node version check from an error to a warning if the installed Node.js version does not match our preferred version. -- CORE-1791: Optimizations to authenticated user caching. -- CORE-1794: Update README to discuss Windows support & Node.js versions -- CORE-1837: Fix issue where Custom Function directory was not being created on install. -- CORE-1742: Add more validation to audit log - check schema/table exists and log is enabled. -- CORE-1768: Fix issue where when running in foreground HarperDB process is not stopping on `harperdb stop`. -- CORE-1864: Fix to semver checks on upgrade. -- CORE-1850: Fix issue where a `cluster_user` type role could not be altered. diff --git a/docs/technical-details/release-notes/4.tucker/4.0.1.md b/docs/technical-details/release-notes/4.tucker/4.0.1.md deleted file mode 100644 index ba7e3d70..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.0.1.md +++ /dev/null @@ -1,8 +0,0 @@ -### HarperDB 4.0.1, Tucker Release - -01/20/2023 - -**Bug Fixes** - -- CORE-1992 Local studio was not loading because the path got mangled in the build. -- CORE-2001 Fixed deploy_custom_function_project after node update broke it. diff --git a/docs/technical-details/release-notes/4.tucker/4.0.2.md b/docs/technical-details/release-notes/4.tucker/4.0.2.md deleted file mode 100644 index 34e86018..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.0.2.md +++ /dev/null @@ -1,8 +0,0 @@ -### HarperDB 4.0.2, Tucker Release - -01/24/2023 - -**Bug Fixes** - -- CORE-2003 Fix bug where if machine had one core thread config would default to zero. -- Update to lmdb 2.7.3 and msgpackr 1.7.0 diff --git a/docs/technical-details/release-notes/4.tucker/4.0.3.md b/docs/technical-details/release-notes/4.tucker/4.0.3.md deleted file mode 100644 index cd987f0b..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.0.3.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.0.3, Tucker Release - -01/26/2023 - -**Bug Fixes** - -- CORE-2007 Add update nodes 4.0.0 launch script to build script to fix clustering upgrade. diff --git a/docs/technical-details/release-notes/4.tucker/4.0.4.md b/docs/technical-details/release-notes/4.tucker/4.0.4.md deleted file mode 100644 index 4dea9ee1..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.0.4.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.0.4, Tucker Release - -01/27/2023 - -**Bug Fixes** - -- CORE-2009 Fixed bug where add node was not being called when upgrading clustering. diff --git a/docs/technical-details/release-notes/4.tucker/4.0.5.md b/docs/technical-details/release-notes/4.tucker/4.0.5.md deleted file mode 100644 index 83a2a2c5..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.0.5.md +++ /dev/null @@ -1,9 +0,0 @@ -### HarperDB 4.0.5, Tucker Release - -02/15/2023 - -**Bug Fixes** - -- CORE-2029 Improved the upgrade process for handling existing user TLS certificates and correctly configuring TLS settings. Added a prompt to upgrade to determine if new certificates should be created or existing certificates should be kept/used. -- Fix the way NATS connections are honored in a local environment. -- Do not define the certificate authority path to NATS if it is not defined in the HarperDB config. diff --git a/docs/technical-details/release-notes/4.tucker/4.0.6.md b/docs/technical-details/release-notes/4.tucker/4.0.6.md deleted file mode 100644 index bb696c3b..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.0.6.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.0.6, Tucker Release - -03/09/2023 - -**Bug Fixes** - -- Fixed a data serialization error that occurs when a large number of different record structures are persisted in a single table. diff --git a/docs/technical-details/release-notes/4.tucker/4.0.7.md b/docs/technical-details/release-notes/4.tucker/4.0.7.md deleted file mode 100644 index dfd135bd..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.0.7.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.0.7, Tucker Release - -03/10/2023 - -**Bug Fixes** - -- Update lmdb.js dependency diff --git a/docs/technical-details/release-notes/4.tucker/4.1.0.md b/docs/technical-details/release-notes/4.tucker/4.1.0.md deleted file mode 100644 index fde09ef7..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.1.0.md +++ /dev/null @@ -1,58 +0,0 @@ -# 4.1.0 - -HarperDB 4.1 introduces the ability to use worker threads for concurrently handling HTTP requests. Previously this was handled by processes. This shift provides important benefits in terms of better control of traffic delegation with support for optimized load tracking and session affinity, better debuggability, and reduced memory footprint. - -This means debugging will be much easier for custom functions. If you install/run HarperDB locally, most modern IDEs like WebStorm and VSCode support worker thread debugging, so you can start HarperDB in your IDE, and set breakpoints in your custom functions and debug them. - -The associated routing functionality now includes session affinity support. This can be used to consistently route users to the same thread which can improve caching locality, performance, and fairness. This can be enabled in with the [`http.sessionAffinity` option in your configuration](../../../deployments/configuration.md#http). - -HarperDB 4.1's NoSQL query handling has been revamped to consistently use iterators, which provide an extremely memory efficient mechanism for directly streaming query results to the network _as_ the query results are computed. This results in faster Time to First Byte (TTFB) (only the first record/value in a query needs to be computed before data can start to be sent), and less memory usage during querying (the entire query result does not need to be stored in memory). These iterators are also available in query results for custom functions and can provide means for custom function code to iteratively access data from the database without loading entire results. This should be a completely transparent upgrade, all HTTP APIs function the same, with the one exception that custom functions need to be aware that they can't access query results by `[index]` (they should use array methods or for-in loops to handle query results). - -4.1 includes configuration options for specifying the location of database storage files. This allows you to specifically locate database directories and files on different volumes for better flexibility and utilization of disks and storage volumes. See the [storage configuration](../../../../deployments/configuration.md#storage) and [schemas configuration](../../../../deployments/configuration.md#schemas) for information on how to configure these locations. - -Logging has been revamped and condensed into one `hdb.log` file. See [logging](../../../administration/logging/) for more information. - -A new operation called `cluster_network` was added, this operation will ping the cluster and return a list of enmeshed nodes. - -Custom Functions will no longer automatically load static file routes, instead the `@fastify/static` plugin will need to be registered with the Custom Function server. See [Host A Static Web UI-static](https://docs.harperdb.io/docs/v/4.1/custom-functions/host-static). - -Updates to S3 import and export mean that these operations now require the bucket `region` in the request. Also, if referencing a nested object it should be done in the `key` parameter. See examples [here](../../../developers/operations-api/bulk-operations.md#import-from-s3). - -Due to the AWS SDK v2 reaching end of life support we have updated to v3. This has caused some breaking changes in our operations `import_from_s3` and `export_to_s3`: - -- A new attribute `region` will need to be supplied -- The `bucket` attribute can no longer have trailing slashes. Slashes will now need to be in the `key`. - -Starting HarperDB without any command (just `harperdb`) now runs HarperDB like a standard process, in the foreground. This means you can use standard unix tooling for interacting with the process and is conducive for running HarperDB with systemd or any other process management tool. If you wish to have HarperDB launch itself in separate background process (and immediately terminate the shell process), you can do so by running `harperdb start`. - -Internal Tickets completed: - -- CORE-609 - Ensure that attribute names are always added to global schema as Strings -- CORE-1549 - Remove fastify-static code from Custom Functions server which auto serves content from "static" folder -- CORE-1655 - Iterator based queries -- CORE-1764 - Fix issue where describe_all operation returns an empty object for non super-users if schema(s) do not yet have table(s) -- CORE-1854 - Switch to using worker threads instead of processes for handling concurrency -- CORE-1877 - Extend the csv_url_load operation to allow for additional headers to be passed to the remote server when the csv is being downloaded -- CORE-1893 - Add last updated timestamp to describe operations -- CORE-1896 - Fix issue where Select \* from system.hdb_info returns wrong HDB version number after Instance Upgrade -- CORE-1904 - Fix issue when executing GEOJSON query in SQL -- CORE-1905 - Add HarperDB YAML configuration setting which defines the storage location of NATS streams -- CORE-1906 - Add HarperDB YAML configuration setting defining the storage location of tables. -- CORE-1655 - Streaming binary format serialization -- CORE-1943 - Add configuration option to set mount point for audit tables -- CORE-1921 - Update NATS transaction lifecycle to handle message deduplication in work queue streams. -- CORE-1963 - Update logging for better readability, reduced duplication, and request context information. -- CORE-1968 - In server\nats\natsIngestService.js remove the js_msg.working(); line to improve performance. -- CORE-1976 - Fix error when calling describe_table operation with no schema or table defined in payload. -- CORE-1983 - Fix issue where create_attribute operation does not validate request for required attributes -- CORE-2015 - Remove PM2 logs that get logged in console when starting HDB -- CORE-2048 - systemd script for 4.1 -- CORE-2052 - Include thread information in system_information for visibility of threads -- CORE-2061 - Add a better error msg when clustering is enabled without a cluster user set -- CORE-2068 - Create new log rotate logic since pm2 log-rotate no longer used -- CORE-2072 - Update to Node 18.15.0 -- CORE-2090 - Upgrade Testing from v4.0.x and v3.x to v4.1. -- CORE-2091 - Run the performance tests -- CORE-2092 - Allow for automatic patch version updates of certain packages -- CORE-2109 - Add verify option to clustering TLS configuration -- CORE-2111 - Update AWS SDK to v3 diff --git a/docs/technical-details/release-notes/4.tucker/4.1.1.md b/docs/technical-details/release-notes/4.tucker/4.1.1.md deleted file mode 100644 index 2da797d1..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.1.1.md +++ /dev/null @@ -1,10 +0,0 @@ -# 4.1.1 - -06/16/2023 - -- HarperDB uses improved logic for determining default heap limits and thread counts. When running in a restricted container and on NodeJS 18.15+, HarperDB will use the constrained memory limit to determine heap limits for each thread. In more memory constrained servers with many CPU cores, a reduced default thread count will be used to ensure that excessive memory is not used by many workers. You may still define your own thread count (with `http`/`threads`) in the [configuration](../../../deployments/configuration.md). -- An option has been added for [disabling the republishing NATS messages](../../../deployments/configuration.md), which can provide improved replication performance in a fully connected network. -- Improvements to our OpenShift container. -- Dependency security updates. -- **Bug Fixes** -- Fixed a bug in reporting database metrics in the `system_information` operation. diff --git a/docs/technical-details/release-notes/4.tucker/4.1.2.md b/docs/technical-details/release-notes/4.tucker/4.1.2.md deleted file mode 100644 index aabb838d..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.1.2.md +++ /dev/null @@ -1,8 +0,0 @@ -### HarperDB 4.1.2, Tucker Release - -06/16/2023 - -- HarperDB has updated binary dependencies to support older glibc versions back 2.17. -- A new CLI command was added to get the current status of whether HarperDB is running and the cluster status. This is available with `harperdb status`. -- Improvements to our OpenShift container. -- Dependency security updates. diff --git a/docs/technical-details/release-notes/4.tucker/4.2.0.md b/docs/technical-details/release-notes/4.tucker/4.2.0.md deleted file mode 100644 index 500b1828..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.2.0.md +++ /dev/null @@ -1,94 +0,0 @@ -# 4.2.0 - -#### HarperDB 4.2.0 - -HarperDB 4.2 introduces a new interface to accessing our core database engine with faster access, well-typed idiomatic JavaScript interfaces, ergonomic object mapping, and real-time data subscriptions. 4.2 also had adopted a new component architecture for building extensions to deliver customized external data sources, authentication, file handlers, content types, and more. These architectural upgrades lead to several key new HarperDB capabilities including a new REST interface, advanced caching, real-time messaging and publish/subscribe functionality through MQTT, WebSockets, and Server-Sent Events. - -4.2 also introduces configurable database schemas, using GraphQL Schema syntax. The new component structure is also configuration-driven, providing easy, low-code paths to building applications. [Check out our new getting starting guide](/docs/4.2/getting-started) to see how easy it is to get started with HarperDB apps. - -### Resource API - -The [Resource API](/docs/4.2/technical-details/reference/resource) is the new interface for accessing data in HarperDB. It utilizes a uniform interface for accessing data in HarperDB database/tables and is designed to easily be implemented or extended for defining customized application logic for table access or defining custom external data sources. This API has support for connecting resources together for caching and delivering data change and message notifications in real-time. The [Resource API documentation details this interface](/docs/4.2/technical-details/reference/resource). - -### Component Architecture - -HarperDB's custom functions have evolved towards a [full component architecture](/docs/4.2/technical-details/reference/components/); our internal functionality is defined as components, and this can be used in a modular way in conjunction with user components. These can all easily be configured and loaded through configuration files, and there is now a [well-defined interface for creating your own components](/docs/4.2/technical-details/reference/components/extensions). Components can easily be deployed/installed into HarperDB using [NPM and Github references as well](/docs/4.2/technical-details/reference/components/configuration). - -### Configurable Database Schemas - -HarperDB applications or components support [schema definitions using GraphQL schema syntax](/docs/4.2/developers/applications/defining-schemas). This makes it easy to define your table and attribute structure and gives you control over which attributes should be indexed and what types they should be. With schemas in configuration, these schemas can be bundled with an application and deployed together with application code. - -### REST Interface - -HarperDB 4.2 introduces a new REST interface for accessing data through best-practice HTTP APIs using intuitive paths and standards-based methods and headers that directly map to our Resource API. This new interface provides fast and easy access to data via queries through GET requests, modifications of data through PUTs, customized actions through POSTs and more. With standards-based header support built-in, this works seamlessly with external caches (including browser caches) for accelerated performance and reduced network transfers. - -### Real-Time - -HarperDB 4.2 now provides standard interfaces for subscribing to data changes and receiving notifications of changes and messages in real-time. Using these new real-time messaging capabilities with structured data provides a powerful integrated platform for both database style data updates and querying along with message delivery. [Real-time messaging](/docs/4.2/developers/real-time) of data is available through several protocols: - -#### MQTT - -4.2 now includes MQTT support which is a publish and subscribe messaging protocol, designed for efficiency (designed to be efficient enough for even small Internet of Things devices). This allows clients to connect to HarperDB and publish messages through our data center and subscribe to messages and data for real-time delivery. 4.2 implements support for QoS 0 and 1, along with durable sessions. - -#### WebSockets - -HarperDB now also supports WebSockets. This can be used as a transport for MQTT or as a connection for custom connection handling. - -#### Server-Sent Events - -HarperDB also includes support for Server-Sent Events. This is a very easy-to-use browser API that allows web sites/applications to connect to HarperDB and subscribe to data changes with minimal effort over standard HTTP. - -### Database Structure - -HarperDB databases contain a collection of tables, and these tables are now contained in a single transactionally-consistent database file. This means reads and writes can be performed transactionally and atomically across tables (as long as they are in the same database). Multi-table transactions are replicated as single atomic transactions as well. Audit logs are also maintained in the same database with atomic consistency as well. - -Databases are now entirely encapsulated in a file, which means they can be moved/copied to another database without requiring any separate metadata updates in the system tables. - -### Clone Node - -HarperDB includes new functionality for adding new HarperDB nodes in a cluster. New instances can be configured to clone from a leader node, performing and copying a database snapshot from a leader node, and self-configuring from the leader node as well, to facilitate accelerated deployment of new nodes for fast horizontal scaling to meet demand needs. [See the documentation on Clone Node for more information.](/docs/4.2/administration/cloning) - -### Operations API terminology updates - -Any operation that used the `schema` property was updated to make this property optional and alternately support `database` as the property for specifying the database (formerly 'schema'). If both `schema` and `database` are absent, operation defaults to using the `data` database. Term 'primary key' now used in place of 'hash'. noSQL operation `search_by_hash` updated to `search_by_id`. - -Support was added for defining a table with `primary_key` instead of `hash_attribute`. - -## Configuration - -There have been significant changes to `harperdb-config.yaml`, however none of these changes should affect pre-4.2 versions. If you upgrade to 4.2 any existing configuration should be backwards compatible and will not need to be updated. - -`harperdb-config.yaml` has had some configuration values added, removed, renamed and defaults changed. Please refer to [harperdb-config.yaml](../../../deployments/configuration.md) for the most current configuration parameters. - -- The `http` element has been expanded. - - `compressionThreshold` was added. - - All `customFunction` configuration now lives here, except for the `tls` section. -- `threads` has moved out of the `http` element and now is its own top level element. -- `authentication` section was moved out of the `operationsApi` section and is now its own top level element/section. -- `analytics.aggregatePeriod` was added. -- Default logging level was changed to `warn`. -- Default clustering log level was changed to `info`. -- `clustering.republishMessages` now defaults to `false`. -- `operationsApi.foreground` was removed. To start HarperDB in the foreground, from the CLI run `harperdb`. -- Made `operationsApi` configuration optional. Any config not defined here will default to the `http` section. -- Added a `securePort` parameter to `operationsApi` and `http` used for setting the https port. -- Added a new top level `tls` section. -- Removed `customFunctions.enabled`, `customFunctions.network.https`, `operationsApi.network.https` and `operationsApi.nodeEnv`. -- Added an element called `componentRoot` which replaces `customFunctions.root`. -- Updated custom pathing to use `databases` instead of `schemas`. -- Added `logging.auditAuthEvents.logFailed` and `logging.auditAuthEvents.logSuccessful` for enabling logging of auth events. -- A new `mqtt` section was added. - -### Socket Management - -HarperDB now uses socket sharing to distribute incoming connections to different threads (`SO_REUSEPORT`). This is considered to be the most performant mechanism available for multi-threaded socket handling. This does mean that we have deprecated session-affinity based socket delegation. - -HarperDB now also supports more flexible port configurations: application endpoints and WebSockets run on 9926 by default, but these can be separated, or application endpoints can be configured to run on the same port as the operations API for a single port configuration. - -### Sessions - -HarperDB now supports cookie-based sessions for authentication for web clients. This can be used with the standard authentication mechanisms to login, and then cookies can be used to preserve the authenticated session. This is generally a more secure way of maintaining authentication in browsers, without having to rely on storing credentials. - -### Dev Mode - -HarperDB can now directly run a HarperDB application from any location using `harperdb run /path/to/app` or `harperdb dev /path/to/app`. The latter starts in dev mode, with logging directly to the console, debugging enabled, and auto-restarting with any changes in your application files. Dev mode is recommended for local application and component development. diff --git a/docs/technical-details/release-notes/4.tucker/4.2.1.md b/docs/technical-details/release-notes/4.tucker/4.2.1.md deleted file mode 100644 index acc4f8cf..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.2.1.md +++ /dev/null @@ -1,9 +0,0 @@ -### HarperDB 4.2.1, Tucker Release - -11/3/2023 - -- Downgrade NATS 2.10.3 back to 2.10.1 due to regression in connection handling. -- Handle package names with underscores. -- Improved validation of queries and comparators -- Avoid double replication on transactions with multiple commits -- Added file metadata on get_component_file diff --git a/docs/technical-details/release-notes/4.tucker/4.2.2.md b/docs/technical-details/release-notes/4.tucker/4.2.2.md deleted file mode 100644 index fa9b8e36..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.2.2.md +++ /dev/null @@ -1,11 +0,0 @@ -### HarperDB 4.2.2, Tucker Release - -11/8/2023 - -- Increase timeouts for NATS connections. -- Fix for database snapshots for backups (and for clone node). -- Fix application of permissions for default tables exposed through REST. -- Log replication failures with record information. -- Fix application of authorization/permissions for MQTT commands. -- Fix copying of local components in clone node. -- Fix calculation of overlapping start time in clone node. diff --git a/docs/technical-details/release-notes/4.tucker/4.2.3.md b/docs/technical-details/release-notes/4.tucker/4.2.3.md deleted file mode 100644 index b064310b..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.2.3.md +++ /dev/null @@ -1,9 +0,0 @@ -### HarperDB 4.2.3, Tucker Release - -11/15/2023 - -- When setting setting securePort, disable unsecure port setting on same port -- Fix `harperdb status` when pid file is missing -- Fix/include missing icons/fonts from local studio -- Fix crash that can occur when concurrently accessing records > 16KB -- Apply a lower heap limit to better ensure that memory leaks are quickly caught/mitigated diff --git a/docs/technical-details/release-notes/4.tucker/4.2.4.md b/docs/technical-details/release-notes/4.tucker/4.2.4.md deleted file mode 100644 index ae3fa18d..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.2.4.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.2.4, Tucker Release - -11/16/2023 - -- Prevent coercion of strings to numbers in SQL queries (in WHERE clause) -- Address fastify deprecation warning about accessing config diff --git a/docs/technical-details/release-notes/4.tucker/4.2.5.md b/docs/technical-details/release-notes/4.tucker/4.2.5.md deleted file mode 100644 index 603b5a97..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.2.5.md +++ /dev/null @@ -1,8 +0,0 @@ -### HarperDB 4.2.5, Tucker Release - -11/22/2023 - -- Disable compression on server-sent events to ensure messages are immediately sent (not queued for later deliver) -- Update geoNear function to tolerate null values -- lmdb-js fix to ensure prefetched keys are pinned in memory until retrieved -- Add header to indicate start of a new authenticated session (for studio to identify authenticated sessions) diff --git a/docs/technical-details/release-notes/4.tucker/4.2.6.md b/docs/technical-details/release-notes/4.tucker/4.2.6.md deleted file mode 100644 index fabbe679..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.2.6.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.2.6, Tucker Release - -11/29/2023 - -- Update various geo SQL functions to tolerate invalid values -- Properly report component installation/load errors in `get_components` (for studio to load components after an installation failure) diff --git a/docs/technical-details/release-notes/4.tucker/4.2.7.md b/docs/technical-details/release-notes/4.tucker/4.2.7.md deleted file mode 100644 index 58d0069f..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.2.7.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.2.7 - -12/6/2023 - -- Add support for cloning over the top of an existing HarperDB instance -- Add health checks for NATS consumer with ability to restart consumer loops for better resiliency -- Revert Fastify autoload module due to a regression that had caused EcmaScript modules for Fastify route modules to fail to load on Windows diff --git a/docs/technical-details/release-notes/4.tucker/4.2.8.md b/docs/technical-details/release-notes/4.tucker/4.2.8.md deleted file mode 100644 index 5e88ed45..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.2.8.md +++ /dev/null @@ -1,10 +0,0 @@ -### HarperDB 4.2.8 - -12/19/2023 - -- Added support CLI command line arguments for clone node -- Added support for cloning a node without enabling clustering -- Clear NATS client cache on closed event -- Fix check for attribute permissions so that an empty attribute permissions array is treated as a table level permission definition -- Improve speed of cross-node health checks -- Fix for using `database` in describe operations diff --git a/docs/technical-details/release-notes/4.tucker/4.3.0.md b/docs/technical-details/release-notes/4.tucker/4.3.0.md deleted file mode 100644 index f08c7067..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.0.md +++ /dev/null @@ -1,120 +0,0 @@ -# 4.3.0 - -#### HarperDB 4.3.0, Tucker Release - -3/19/2024 - -#### Relationships and Joins - -HarperDB now supports defining relationships between tables. These relationships can be defined as one-to-many, many-to-one, or many-to-many, and use a foreign key to record the relationship between records from different tables. An example of how to use this to define a many-to-one and one-to-many relationships between a product and brand table: - -```graphql -type Product @table { - id: ID @primaryKey - name: String @indexed - # foreign key used to reference a brand - brandId: ID @indexed - # many-to-one relationship to brand - brand: Related @relation(from: "brandId") -} -type Brand @table { - id: ID @primaryKey - name: String @indexed - # one-to-many relationship of brand to products of that brand - products: Product @relation(to: "brandId") -} -``` - -This relationships model can be used in queries and selects, which will automatically "join" the data from the tables. For example, you could search for products by brand name: - -```http -/Product?brand.name=Microsoft -``` - -HarperDB also now supports querying with a sort order. Multiple sort orders can be provided breaking ties. Nested select have also been added, which also utilizes joins when related records are referenced. For example: - -```http -/Product?brand.name=Microsoft&sort(price)&select(name,brand{name,size}) -``` - -See the [schema definition documentation](/docs/4.3/developers/applications/defining-schemas) for more information on defining relationships, and the [REST documentation for more information on queries](/docs/4.3/developers/rest). - -#### OpenAPI Specification - -A new default endpoint `GET /openapi` was added for describing endpoints configured through a GraphQL schema. - -#### Query Optimizations - -HarperDB has also made numerous improvements to query planning and execution for high performance query results with a broader range of queries. - -#### Indexing Nulls - -New tables and indexes now support indexing null values, enabling queries by null (as well as queries for non-null values). For example, you can query by nulls with the REST interface: - -```http -GET /Table/?attribute=null -``` - -Note, that existing indexes will remain without null value indexing, and can only support indexing/querying by nulls if they are rebuilt (removed and re-added). - -#### CLI Expansion - -The HarperDB now supports an expansive set of commands that execute operations from the operations API. For example, you can list users from the command line: - -```bash -harperdb list_users -``` - -#### BigInt Support - -HarperDB now supports `BigInt` attributes/values with integers (with full precision) up to 1000 bits (or 10^301). These can be used as primary keys or standard attributes, and can be used in queries or other operations. Within JSON documents, you can simply use standard JSON integer numbers with up to 300 digits, and large BigInt integers will be returned as standard JSON numbers. - -#### Local Studio Upgrade - -HarperDB has upgraded the local studio to match the same version that is offered on http://studio.harperdb.io. The local studio now has the full robust feature set of the online version. - -### MQTT - -#### mTLS Support - -HarperDB now supports mTLS based authentication for HTTP, WebSockets, and MQTT. See the [configuration documentation for more information](/docs/4.3/deployments/configuration). - -#### Single-Level Wildcards - -HarperDB's MQTT service now supports single-level wildcards (`+`), which facilitates a great range of subscriptions. - -#### Retain handling - -HarperDB's MQTT now supports the retain handling flags for subscriptions that are made using MQTT v5. - -#### CRDT - -HarperDB now supports basic conflict-free data type (CRDT) updates that allow properties to be individually updated and merged when separate properties are updated on different threads or nodes. Individual property CRDT updates are automatically performed when you update individual properties through the resource API. Individual property CRDT updates are used when making `PATCH` requests through the REST API. - -The CRDT functionality also supports explicit incrementation to merge multiple parallel incrementation requests with proper summing. See the [Resource API for more information](/docs/4.3/technical-details/reference/resource). - -#### Configuration Improvements - -The configuration has improved support for detecting port conflicts, handling paths for fastify routes, and now includes support for specifying a heap limit and TLS ciphers. See the [configuration documentation for more information](/docs/4.3/deployments/configuration). - -#### Balanced Audit Log Cleanup - -Audit log cleanup has been improved to reduce resource consumption during scheduled cleanups. - -#### `export_*` support for `search_by_conditions` - -The `export_local` and `export_to_s3` operations now support `search_by_conditions` as one of the allowed search operators. - -### Storage Performance Improvements - -Significant improvements were made to handling of free-space to decrease free-space fragmentation and improve performance of reusing free-space for new data. This includes prioritizing reuse of recently released free-space for more better memory/caching utilization. - -#### Compact Database - -In addition to storage improvements, HarperDB now includes functionality for [compacting a database](/docs/4.3/deployments/harper-cli) (while offline), which can be used to eliminate all free-space to reset any fragmentation. - -#### Compression - -Compression is now enabled by default for all records over 4KB. - -To learn more on how to configure compression visit [configuration](https://docs.harperdb.io/docs/v/4.3/deployments/configuration). diff --git a/docs/technical-details/release-notes/4.tucker/4.3.1.md b/docs/technical-details/release-notes/4.tucker/4.3.1.md deleted file mode 100644 index 53880170..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.1.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.3.1 - -3/25/2024 - -- Fix Fastify warning about responseTime usage -- Add access to the MQTT topic in the context -- Fix for ensuring local NATS streams are created diff --git a/docs/technical-details/release-notes/4.tucker/4.3.10.md b/docs/technical-details/release-notes/4.tucker/4.3.10.md deleted file mode 100644 index 37a0dd4c..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.10.md +++ /dev/null @@ -1,8 +0,0 @@ -### HarperDB 4.3.10 - -5/5/2024 - -- Provide a `data` property on the request/context with deserialized data from the request body for any request including methods that don't typically have a request body -- Ensure that CRDTs are not double applied after committing a transaction -- Delete MQTT will after publishing even if it fails to publish -- Improve transaction retry logic to use async non-optimistic transactions after multiple retries diff --git a/docs/technical-details/release-notes/4.tucker/4.3.11.md b/docs/technical-details/release-notes/4.tucker/4.3.11.md deleted file mode 100644 index e3bd75cd..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.11.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.3.11 - -5/15/2024 - -- Add support for multiple certificates with SNI-based selection of certificates for HTTPS/TLS -- Fix warning in Node v22 diff --git a/docs/technical-details/release-notes/4.tucker/4.3.12.md b/docs/technical-details/release-notes/4.tucker/4.3.12.md deleted file mode 100644 index 7732ef38..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.12.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.3.12 - -5/16/2024 - -- Fix for handling ciphers in multiple certificates -- Allow each certificate config to have multiple hostnames diff --git a/docs/technical-details/release-notes/4.tucker/4.3.13.md b/docs/technical-details/release-notes/4.tucker/4.3.13.md deleted file mode 100644 index 798aa0e0..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.13.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.3.13 - -5/22/2024 - -- Fix for handling HTTPS/TLS with IP address targets (no hostname) where SNI is not available -- Fix for memory leak when a node is down and consumers are trying to reconnect -- Faster cross-thread notification mechanism for transaction events diff --git a/docs/technical-details/release-notes/4.tucker/4.3.14.md b/docs/technical-details/release-notes/4.tucker/4.3.14.md deleted file mode 100644 index 00d0f8e7..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.14.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.3.14 - -5/24/2024 - -- Fix application of ciphers to multi-certificate TLS configuration diff --git a/docs/technical-details/release-notes/4.tucker/4.3.15.md b/docs/technical-details/release-notes/4.tucker/4.3.15.md deleted file mode 100644 index f845b44d..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.15.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.3.15 - -5/29/2024 - -- Add support for wildcards in hostnames for SNI -- Properly apply ciphers settings on multiple TLS configurations diff --git a/docs/technical-details/release-notes/4.tucker/4.3.16.md b/docs/technical-details/release-notes/4.tucker/4.3.16.md deleted file mode 100644 index 43e61103..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.16.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.3.16 - -6/3/2024 - -- Properly shim legacy TLS configuration with new multi-certificate support -- Show the changed filenames when an application is reloaded diff --git a/docs/technical-details/release-notes/4.tucker/4.3.17.md b/docs/technical-details/release-notes/4.tucker/4.3.17.md deleted file mode 100644 index 89112db4..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.17.md +++ /dev/null @@ -1,10 +0,0 @@ -### HarperDB 4.3.17 - -6/13/2024 - -- Add MQTT analytics of incoming messages and separate by QoS level -- Ensure that any installed `harperdb` package in components is relinked to running harperdb. -- Upgrade storage to more efficiently avoid storage increases -- Fix to improve database metrics in system_information -- Fix for pathing on Windows with extension modules -- Add ability to define a range of listening threads diff --git a/docs/technical-details/release-notes/4.tucker/4.3.18.md b/docs/technical-details/release-notes/4.tucker/4.3.18.md deleted file mode 100644 index e9673f04..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.18.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.3.18 - -6/18/2024 - -- Immediately terminate an MQTT connection when there is a keep-alive timeout. diff --git a/docs/technical-details/release-notes/4.tucker/4.3.19.md b/docs/technical-details/release-notes/4.tucker/4.3.19.md deleted file mode 100644 index 8d493c28..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.19.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.3.19 - -7/2/2024 - -- Properly return records for the existing value for subscriptions used for retained messages, so they are correctly serialized. -- Ensure that deploy components empty the target directory for a clean installation and expansion of a `package` sub-directory. -- Ensure that we do not double load components that are referenced by symlink from node_modules and in components directory. diff --git a/docs/technical-details/release-notes/4.tucker/4.3.2.md b/docs/technical-details/release-notes/4.tucker/4.3.2.md deleted file mode 100644 index 00cad16a..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.2.md +++ /dev/null @@ -1,11 +0,0 @@ -### HarperDB 4.3.2 - -3/29/2024 - -- Clone node updates to individually clone missing parts -- Fixes for publishing OpenShift container -- Increase purge stream timeout -- Fixed declaration of analytics schema so queries work before a restart -- Fix for iterating queries when deleted records exist -- LMDB stability upgrade -- Fix for cleanup of last will in MQTT diff --git a/docs/technical-details/release-notes/4.tucker/4.3.20.md b/docs/technical-details/release-notes/4.tucker/4.3.20.md deleted file mode 100644 index e0132480..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.20.md +++ /dev/null @@ -1,13 +0,0 @@ -### HarperDB 4.3.20 - -7/11/2024 - -- The restart_service operation is now executed as a job, making it possible to track the progress of a restart (which is performed as a rolling restart of threads) -- Disable Nagle's algorithm for TCP connections to improve performance -- Append Server-Timing header if a fastify route has already added one -- Avoid symlinking the harperdb directory to itself -- Fix for deleting an empty database -- Upgrade ws and pm2 packages for security vulnerabilities -- Improved TypeScript definitions for Resource and Context. -- The context of a source can set `noCacheStore` to avoid caching the results of a retrieval from source -- Better error reporting of MQTT parsing errors and termination of connections for compliance diff --git a/docs/technical-details/release-notes/4.tucker/4.3.21.md b/docs/technical-details/release-notes/4.tucker/4.3.21.md deleted file mode 100644 index 37bbf2bd..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.21.md +++ /dev/null @@ -1,9 +0,0 @@ -### HarperDB 4.3.21 - -8/21/2024 - -- Fixed an issue with iterating/serializing query results with a `limit`. -- Fixed an issue that was preventing the caching of structured records in memory. -- Fixed and added several TypeScript exported types including `tables`, `databases`, `Query`, and `Context`. -- Fixed logging warnings about license limits after a license is updated. -- Don't register a certificate as the default certificate for non-SNI connections unless it lists an IP address in the SAN field. diff --git a/docs/technical-details/release-notes/4.tucker/4.3.22.md b/docs/technical-details/release-notes/4.tucker/4.3.22.md deleted file mode 100644 index 04cd71ac..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.22.md +++ /dev/null @@ -1,10 +0,0 @@ -### HarperDB 4.3.22 - -9/6/2024 - -- Adding improved back-pressure handling for large subscriptions and backlogs with durable MQTT sessions -- Allow .extension in URL paths to indicate both preferred encoding and decoding -- Added support for multi-part ids in query parameters -- Limit describe calls by time before using statistical sampling -- Proper cleanup of a transaction when it is aborted due to running out of available read transactions -- Updates to release/builds diff --git a/docs/technical-details/release-notes/4.tucker/4.3.23.md b/docs/technical-details/release-notes/4.tucker/4.3.23.md deleted file mode 100644 index 43fab00d..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.23.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.3.23 - -9/12/2024 - -- Avoid long-running read transactions on subscription catch-ups -- Reverted change to setting default certificate for IP address only -- Better handling of last-will messages on startup diff --git a/docs/technical-details/release-notes/4.tucker/4.3.24.md b/docs/technical-details/release-notes/4.tucker/4.3.24.md deleted file mode 100644 index a953af6b..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.24.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.3.24 - -9/12/2024 - -- Fix for querying for large strings (over 255 characters) diff --git a/docs/technical-details/release-notes/4.tucker/4.3.25.md b/docs/technical-details/release-notes/4.tucker/4.3.25.md deleted file mode 100644 index dee3f9a7..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.25.md +++ /dev/null @@ -1,8 +0,0 @@ -### HarperDB 4.3.25 - -9/24/2024 - -- Add analytics for replication latency -- Fix iteration issue over asynchronous joined queries -- Local studio fix for loading applications in insecure context (HTTP) -- Local studio fix for loading configuration tab diff --git a/docs/technical-details/release-notes/4.tucker/4.3.26.md b/docs/technical-details/release-notes/4.tucker/4.3.26.md deleted file mode 100644 index 14eaba4f..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.26.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.3.26 - -9/27/2024 - -- Fixed a security issue that allowed users to bypass access controls with the operations API -- Previously expiration handling was limited to tables with a source, but now it can be applied to any table diff --git a/docs/technical-details/release-notes/4.tucker/4.3.27.md b/docs/technical-details/release-notes/4.tucker/4.3.27.md deleted file mode 100644 index ddec8731..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.27.md +++ /dev/null @@ -1,9 +0,0 @@ -### HarperDB 4.3.27 - -10/2/2024 - -- Fixed handling HTTP upgrade with Connection header that does not use Upgrade as the sole value (for Firefox) -- Added metrics for requests by status code -- Properly remove attributes from the stored metadata when removed from GraphQL schema -- Fixed a regression in clustering retrieval of schema description -- Fix attribute validation/handling to ensure that sequential ids can be assigned with insert/upsert operations diff --git a/docs/technical-details/release-notes/4.tucker/4.3.28.md b/docs/technical-details/release-notes/4.tucker/4.3.28.md deleted file mode 100644 index 51319670..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.28.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.3.28 - -10/3/2024 - -- Tolerate user with no role when building NATS config -- Change metrics for requests by status code to be prefixed with "response\_" -- Log error `cause`, and other properties, when available. diff --git a/docs/technical-details/release-notes/4.tucker/4.3.29.md b/docs/technical-details/release-notes/4.tucker/4.3.29.md deleted file mode 100644 index 2130c555..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.29.md +++ /dev/null @@ -1,12 +0,0 @@ -### HarperDB 4.3.29 - -10/7/2024 - -- Avoid unnecessary cookie session creation without explicit login -- Added support for caching directives in operations API -- Fixed issue with creating metadata for table with no primary key -- Local studio upgrade: - - Added support for "cache only" mode to view table data without origin resolution - - Added partial support for cookie-based authentication - - Added support for browsing tables with no primary key - - Improved performance for sorting tables diff --git a/docs/technical-details/release-notes/4.tucker/4.3.3.md b/docs/technical-details/release-notes/4.tucker/4.3.3.md deleted file mode 100644 index 656ef8be..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.3.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.3.3 - -4/01/2024 - -- Improve MQTT logging by properly logging auth failures, logging disconnections diff --git a/docs/technical-details/release-notes/4.tucker/4.3.30.md b/docs/technical-details/release-notes/4.tucker/4.3.30.md deleted file mode 100644 index e374c19d..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.30.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.3.30 - -10/9/2024 - -- Properly assign transaction timestamp to writes from cache resolutions (ensuring that latencies can be calculated on replicating nodes) diff --git a/docs/technical-details/release-notes/4.tucker/4.3.31.md b/docs/technical-details/release-notes/4.tucker/4.3.31.md deleted file mode 100644 index f1c66323..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.31.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.3.31 - -10/10/2024 - -- Reset the restart limit for manual restarts to ensure that NATS process will continue to restart after more than 10 manual restarts -- Only apply caching directives (from headers) to tables/resources that are configured to be caching, sourced from another resource -- Catch/tolerate errors on serializing objects for logging diff --git a/docs/technical-details/release-notes/4.tucker/4.3.32.md b/docs/technical-details/release-notes/4.tucker/4.3.32.md deleted file mode 100644 index 558d906a..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.32.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.3.32 - -10/16/2024 - -- Fix a memory leak when cluster_network closes a hub connection -- Improved MQTT error handling, with less verbose logging of more common errors, and treat a missing subscription as an invalid/missing topic -- Record analytics and server-timing header even when cache resolution fails diff --git a/docs/technical-details/release-notes/4.tucker/4.3.33.md b/docs/technical-details/release-notes/4.tucker/4.3.33.md deleted file mode 100644 index 1d934f0e..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.33.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.3.33 - -10/24/2024 - -- Change the default maximum length for a fastify route parameter from 100 to 1000 characters. diff --git a/docs/technical-details/release-notes/4.tucker/4.3.34.md b/docs/technical-details/release-notes/4.tucker/4.3.34.md deleted file mode 100644 index c3ca47ec..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.34.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.3.34 - -10/24/2024 - -- lmdb-js upgrade diff --git a/docs/technical-details/release-notes/4.tucker/4.3.35.md b/docs/technical-details/release-notes/4.tucker/4.3.35.md deleted file mode 100644 index 1f8c2073..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.35.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.3.35 - -11/12/2024 - -- Upgrades for supporting Node.js V23 -- Fix for handling a change in the schema for nested data structures diff --git a/docs/technical-details/release-notes/4.tucker/4.3.36.md b/docs/technical-details/release-notes/4.tucker/4.3.36.md deleted file mode 100644 index 40e8b726..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.36.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.3.36 - -11/14/2024 - -- lmdb-js upgrade for better free-space management diff --git a/docs/technical-details/release-notes/4.tucker/4.3.37.md b/docs/technical-details/release-notes/4.tucker/4.3.37.md deleted file mode 100644 index 8f067b9c..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.37.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.3.37 - -12/6/2024 - -- lmdb-js upgrade for preventing crashes with shared user buffers diff --git a/docs/technical-details/release-notes/4.tucker/4.3.38.md b/docs/technical-details/release-notes/4.tucker/4.3.38.md deleted file mode 100644 index 1dde2665..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.38.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.3.38 - -1/10/2025 - -- Fixes for audit log cleanup diff --git a/docs/technical-details/release-notes/4.tucker/4.3.4.md b/docs/technical-details/release-notes/4.tucker/4.3.4.md deleted file mode 100644 index ee9909ad..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.4.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.3.4 - -4/9/2024 - -- Fixed a buffer overrun issue with decompressing compressed data -- Better keep-alive of transactions with long running queries diff --git a/docs/technical-details/release-notes/4.tucker/4.3.5.md b/docs/technical-details/release-notes/4.tucker/4.3.5.md deleted file mode 100644 index 04e51594..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.5.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.3.5 - -4/10/2024 - -- Fixed a buffer overrun issue with decompressing compressed data diff --git a/docs/technical-details/release-notes/4.tucker/4.3.6.md b/docs/technical-details/release-notes/4.tucker/4.3.6.md deleted file mode 100644 index 704640e5..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.6.md +++ /dev/null @@ -1,9 +0,0 @@ -### HarperDB 4.3.6 - -4/12/2024 - -- Fixed parsing of dates from epoch millisecond times in queries -- Fixed CRDT incrementation of different data types -- Adjustments to text/plain content type q-value handling -- Fixed parsing of passwords with a colon -- Added MQTT events for connections, authorization, and disconnections diff --git a/docs/technical-details/release-notes/4.tucker/4.3.7.md b/docs/technical-details/release-notes/4.tucker/4.3.7.md deleted file mode 100644 index 878ef822..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.7.md +++ /dev/null @@ -1,9 +0,0 @@ -### HarperDB 4.3.7 - -4/16/2024 - -- Fixed transaction handling to stay on open on long compaction operations -- Fixed handling of sorting on non-indexed attributes -- Storage stability improvements -- Fixed authentication/authorization of WebSockets connection and use of cookies -- Fixes for clone node operations diff --git a/docs/technical-details/release-notes/4.tucker/4.3.8.md b/docs/technical-details/release-notes/4.tucker/4.3.8.md deleted file mode 100644 index 2f858e6f..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.8.md +++ /dev/null @@ -1,9 +0,0 @@ -### HarperDB 4.3.8 - -4/26/2024 - -- Added support for the MQTT keep-alive feature (disconnecting if no control messages are received within keep-alive window) -- Improved handling of write queue timeouts, with configurability -- Fixed a memory leak that can occur with NATS reconnections after heartbeat misses -- Fixed a bug in clone node with a null port -- Add error events to MQTT events system diff --git a/docs/technical-details/release-notes/4.tucker/4.3.9.md b/docs/technical-details/release-notes/4.tucker/4.3.9.md deleted file mode 100644 index b693c746..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.3.9.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.3.9 - -4/30/2024 - -- lmdb-js upgrade diff --git a/docs/technical-details/release-notes/4.tucker/4.4.0.md b/docs/technical-details/release-notes/4.tucker/4.4.0.md deleted file mode 100644 index 3aa11dc3..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.0.md +++ /dev/null @@ -1,55 +0,0 @@ -# 4.4.0 - -#### HarperDB 4.4.0 - -10/14/2024 - -### Native Replication - -HarperDB has a completely [new native replication system](../../../developers/replication/) which is faster, more efficient, secure, and reliable than the previous replication system. The new system (codenamed "Plexus") uses direct WebSocket connections between servers with highly optimized encoding and is driven by direct tracking audit/transaction log for efficient and flexible data transfer. This replication has improved resilience with the ability to reach consensus consistency when one node goes down through cross-node catch-up. Network connections can be performed over the existing operations API port or a separate port, for improved configurability. - -The native replication system is much easier to configure, with multiple options for authentication and security, including PKI/mTLS security that is highly robust and easy to use in conjunction with existing PKI certificates. Replication can be configured through explicit subscriptions or for automated replication of all data in a database. With automated replication, gossiping is used to automatically discover and connect to other nodes in the cluster. - -#### Sharding - -The new replication system also includes provisional support for [sharding](../../../developers/replication/sharding.md). This sharding mechanism paves the way for greater scalability and performance, by allow data to be distributed across multiple nodes. - -#### Replicated Operations - -Certain operations can now be replicated across the cluster, including the deployment and management of components. This allows for a more seamless experience when managing a cluster of HarperDB instances. Restarts can also be "replicated", and if used, will perform a rolling restart of all the nodes in a cluster. - -### Computed Properties - -Computed properties allow applications to define properties that are computed from other properties, allowing for composite properties that are calculated from other data stored in records without requiring actual storage of the computed value. For example, you could have a computed property for a full name based on first and last, or age/duration based on a date. Computed properties are also foundational for custom indexes. See the [schema documentation ](/docs/4.4/developers/applications/defining-schemas), [Resource API](/docs/4.4/technical-details/reference/resource), and our blog post on [computed properties](https://www.harperdb.io/development/tutorials/how-to-create-custom-indexes-with-computed-properties) for more information. - -### Custom Indexing - -Custom indexes can now be defined using computed properties to allow for unlimited possibilities of indexing, including composite, full-text indexing, vector indexing. Again, see the [schema documentation](/docs/4.4/developers/applications/defining-schemas) for more information. - -### Native Graph Support - -HarperDB now includes provisional support for native [GraphQL querying functionality](/docs/4.4/technical-details/reference/graphql). This allows for querying of graph data using GraphQL syntax. This is provisional and some APIs may be updated in the future. - -### Dynamic Certificate Management - -Certificates are now stored in system tables and can be dynamically managed. Certificates can be added, replaced, and deleted without restarting HarperDB. This includes both standard certificates and certificate authorities, as well as private keys (private keys are not stored in table, they securely stored in a file). - -#### Status Report on Startup - -On startup, HarperDB will now print out an informative status of all running services and ports they are listening on. - -#### Support for Response object - -Resource methods can now return a `Response` object (or an object with `headers` and `status`) to allow for more control over the response. - -### Auto-incrementing Primary Keys - -Primary keys can now be auto-incrementing, allowing for automatic generation of numeric primary keys on insert/creation. Primary keys defined with `ID` or `String` will continue to use GUIDs for auto-assigned primary keys, which occurs on insert or creation if the primary key is not provided. However, for keys that are defined as `Any`, `Int`, or `Long`, the primary key will be assigned using auto-incrementation. This is significantly more efficient than GUIDs since the key only requires 8 bytes of storage instead of 31 bytes, and doesn't require random number generation. - -#### Developer/Production Mode for Configuration - -When using interactive installation (when configuration is not provided through arguments or env vars), HarperDB now provides an option for developer or production mode with a set of default configuration for each mode better suited for developer or production environments. - -**Export by Protocol** - -Exported resources can be configured to be specifically exported by protocol (REST, MQTT, etc.) for more granular control over what is exported where. diff --git a/docs/technical-details/release-notes/4.tucker/4.4.1.md b/docs/technical-details/release-notes/4.tucker/4.4.1.md deleted file mode 100644 index 0963bc93..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.1.md +++ /dev/null @@ -1,8 +0,0 @@ -### HarperDB 4.4.1 - -10/17/2024 - -- Fix issue where non-RSA keys were not being parsed correctly on startup. -- Fix a memory leak when cluster_network closes a hub connection -- Improved MQTT error handling, with less verbose logging of more common errors, and treat a missing subscription as an invalid/missing topic -- Record analytics and server-timing header even when cache resolution fails diff --git a/docs/technical-details/release-notes/4.tucker/4.4.10.md b/docs/technical-details/release-notes/4.tucker/4.4.10.md deleted file mode 100644 index 9767c6e7..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.10.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.4.10 - -12/17/2024 - -- Fix for deploying packages and detecting node_modules directory diff --git a/docs/technical-details/release-notes/4.tucker/4.4.11.md b/docs/technical-details/release-notes/4.tucker/4.4.11.md deleted file mode 100644 index 8eb248f9..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.11.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.4.11 - -12/18/2024 - -- Fix for initial certification creation on upgrade -- Docker build fix diff --git a/docs/technical-details/release-notes/4.tucker/4.4.12.md b/docs/technical-details/release-notes/4.tucker/4.4.12.md deleted file mode 100644 index 1b1b4e31..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.12.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.4.12 - -12/19/2024 - -- Move components installed by reference into hdb/components for consistency and compatibility with next.js -- Use npm install --force to ensure modules are installed diff --git a/docs/technical-details/release-notes/4.tucker/4.4.13.md b/docs/technical-details/release-notes/4.tucker/4.4.13.md deleted file mode 100644 index 2e4427ea..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.13.md +++ /dev/null @@ -1,11 +0,0 @@ -### HarperDB 4.4.13 - -1/2/2025 - -- Fix for not using requestCert if the port doesn't need replication -- Fix for applying timeouts HTTP server for ancient node versions -- Updates for different replication configuration settings, including sharding and replication using stored credentials -- Mitigation crashing due GC'ed shared array buffers -- Fix for error handling with CLI failures -- Updated dependencies -- Fix for allow securePort to be set on authentication diff --git a/docs/technical-details/release-notes/4.tucker/4.4.14.md b/docs/technical-details/release-notes/4.tucker/4.4.14.md deleted file mode 100644 index 0c649e13..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.14.md +++ /dev/null @@ -1,8 +0,0 @@ -### HarperDB 4.4.14 - -1/3/2025 - -- Fix for starting HTTP server if headersTimeout is omitted in the configuration -- Fix for avoiding ping timeouts for large/long-duration WS messages between nodes -- Don't report errors for component that only uses a directory -- Add flag for disabling WebSocket on REST component diff --git a/docs/technical-details/release-notes/4.tucker/4.4.15.md b/docs/technical-details/release-notes/4.tucker/4.4.15.md deleted file mode 100644 index 8f9c0757..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.15.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.4.15 - -1/8/2025 - -- Fix for manage the state of replication sequences for node -- Fix for better concurrency with ongoing replication -- Fix for accessing audit log entries diff --git a/docs/technical-details/release-notes/4.tucker/4.4.16.md b/docs/technical-details/release-notes/4.tucker/4.4.16.md deleted file mode 100644 index 3becc679..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.16.md +++ /dev/null @@ -1,11 +0,0 @@ -### HarperDB 4.4.16 - -1/22/2025 - -- Fix for cleaning up old audit entries and associated deletion entries -- Allow CLI operations to be run when cloning is enabled -- Report table size in describe operations -- Fix for cleaning up symlinks when dropping components -- Fix for enumerating components when symlinks are used -- Add an option for using a specific installation command with deploys -- Add an API for registering an HTTP upgrade listener with `server.upgrade` diff --git a/docs/technical-details/release-notes/4.tucker/4.4.17.md b/docs/technical-details/release-notes/4.tucker/4.4.17.md deleted file mode 100644 index dce496cf..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.17.md +++ /dev/null @@ -1,8 +0,0 @@ -### HarperDB 4.4.17 - -1/29/2025 - -- Provide statistics on the size of the audit log store -- Fix handling of symlinks to HarperDB package that to avoid NPM's errors in restricted containers -- Add option for rolling/consecutive restarts for deployments -- Fix for enabling root CAs for replication authorization diff --git a/docs/technical-details/release-notes/4.tucker/4.4.18.md b/docs/technical-details/release-notes/4.tucker/4.4.18.md deleted file mode 100644 index c2836edd..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.18.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.4.18 - -1/29/2025 - -- Add option for disabling full table copy in replication -- Add option for startTime in route configuration -- Add/fix option to deploy with package from CLI diff --git a/docs/technical-details/release-notes/4.tucker/4.4.19.md b/docs/technical-details/release-notes/4.tucker/4.4.19.md deleted file mode 100644 index ae882c74..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.19.md +++ /dev/null @@ -1,8 +0,0 @@ -### HarperDB 4.4.19 - -2/4/2025 - -- LMDB upgrade for free-list verification on commit -- Add check to avoid compacting database multiple times with compactOnStart -- Fix handling of denied/absent subscription -- Add support for including symlinked directories in packaging a deployed component diff --git a/docs/technical-details/release-notes/4.tucker/4.4.2.md b/docs/technical-details/release-notes/4.tucker/4.4.2.md deleted file mode 100644 index 9d013c46..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.2.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.4.2 - -10/18/2024 - -- Republish of 4.4.1 with Git merge correction. diff --git a/docs/technical-details/release-notes/4.tucker/4.4.20.md b/docs/technical-details/release-notes/4.tucker/4.4.20.md deleted file mode 100644 index 62cb86b2..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.20.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.4.20 - -2/11/2025 - -- LMDB upgrade for improved handling of page boundaries with free-space lists diff --git a/docs/technical-details/release-notes/4.tucker/4.4.21.md b/docs/technical-details/release-notes/4.tucker/4.4.21.md deleted file mode 100644 index 74012d81..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.21.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.4.21 - -2/25/2025 - -- Fix for saving audit log entries for large keys (> 1KB) -- Security fix for handling missing passwords -- Skip bin links for NPM installation to avoid access issues diff --git a/docs/technical-details/release-notes/4.tucker/4.4.22.md b/docs/technical-details/release-notes/4.tucker/4.4.22.md deleted file mode 100644 index a12b4747..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.22.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.4.22 - -3/5/2025 - -- Add new http configuration option `corsAccessControlAllowHeaders` diff --git a/docs/technical-details/release-notes/4.tucker/4.4.23.md b/docs/technical-details/release-notes/4.tucker/4.4.23.md deleted file mode 100644 index 2fd31927..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.23.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.4.23 - -3/7/2025 - -- Fix for subscriptions to children of segmented id -- Fix for better error reporting on NPM failures diff --git a/docs/technical-details/release-notes/4.tucker/4.4.24.md b/docs/technical-details/release-notes/4.tucker/4.4.24.md deleted file mode 100644 index fddf569d..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.24.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.4.24 - -3/10/2025 - -- Use process.exit(0) to restart when enabled by env var -- Reset the cwd on thread restart diff --git a/docs/technical-details/release-notes/4.tucker/4.4.3.md b/docs/technical-details/release-notes/4.tucker/4.4.3.md deleted file mode 100644 index 91b221cc..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.3.md +++ /dev/null @@ -1,9 +0,0 @@ -### HarperDB 4.4.3 - -10/25/2024 - -- Fix for notification of records through classes that override get for multi-tier caching -- Fix for CLI operations -- Support for longer route parameters in Fastify routes -- Fix for accessing `harperdb` package/module from user threads -- Improvements to clone node for cloning without credentials diff --git a/docs/technical-details/release-notes/4.tucker/4.4.4.md b/docs/technical-details/release-notes/4.tucker/4.4.4.md deleted file mode 100644 index 05cd5af8..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.4.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.4.4 - -11/4/2024 - -- Re-introduce declarative roles and permissions -- Fix for OpenAPI endpoint -- Fix for exports of `harperdb` package/module diff --git a/docs/technical-details/release-notes/4.tucker/4.4.5.md b/docs/technical-details/release-notes/4.tucker/4.4.5.md deleted file mode 100644 index 7652820c..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.5.md +++ /dev/null @@ -1,11 +0,0 @@ -### HarperDB 4.4.5 - -11/15/2024 - -- Fix for DOS vulnerability in large headers with cache-control and replication headers -- Fix for handling a change in the schema type for sub-fields in a nested object -- Add support for content type handlers to return iterators -- Fix for session management with custom authentication handler -- Updates for Node.js V23 compatibility -- Fix for sorting on nested properties -- Fix for querying on not_equal to a null with object values diff --git a/docs/technical-details/release-notes/4.tucker/4.4.6.md b/docs/technical-details/release-notes/4.tucker/4.4.6.md deleted file mode 100644 index c7131f3b..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.6.md +++ /dev/null @@ -1,8 +0,0 @@ -### HarperDB 4.4.6 - -11/25/2024 - -- Fix queries with only sorting applied -- Fix for handling invalidation events propagating through sources -- Expanded CLI support for deploying packages -- Support for deploying large packages diff --git a/docs/technical-details/release-notes/4.tucker/4.4.7.md b/docs/technical-details/release-notes/4.tucker/4.4.7.md deleted file mode 100644 index 39579988..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.7.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.4.7 - -11/27/2024 - -- Allow for package to deploy own modules -- Fix for preventing double sourcing of resources diff --git a/docs/technical-details/release-notes/4.tucker/4.4.8.md b/docs/technical-details/release-notes/4.tucker/4.4.8.md deleted file mode 100644 index 67d6a4e9..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.8.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.4.8 - -12/2/2024 - -- Add multiple node versions of published docker containers diff --git a/docs/technical-details/release-notes/4.tucker/4.4.9.md b/docs/technical-details/release-notes/4.tucker/4.4.9.md deleted file mode 100644 index fffb30c7..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.4.9.md +++ /dev/null @@ -1,9 +0,0 @@ -### HarperDB 4.4.9 - -12/12/2024 - -- Change enableRootCAs to default to true -- Fixes for install and clone commands -- Add rejectUnauthorized to the CLI options -- Fixes for cloning -- Install modules in own component when deploying package by payload diff --git a/docs/technical-details/release-notes/4.tucker/4.5.0.md b/docs/technical-details/release-notes/4.tucker/4.5.0.md deleted file mode 100644 index 1e521c47..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.5.0.md +++ /dev/null @@ -1,94 +0,0 @@ -# 4.5.0 - -#### HarperDB 4.5.0 - -3/13/2025 - -### Blob Storage - -4.5 introduces a new [Blob storage system](/docs/4.5/technical-details/reference/blob), that is designed to efficiently handle large binary objects, with built-in support for streaming large content/media in and out of storage. This provides significantly better performance and functionality for large unstructured data, such as HTML, images, video, and other large files. Components can leverage this functionality through the JavaScript `Blob` interface, and the new `createBlob` function. Blobs are fully replicated and integrated. Harper can also coerce strings to `Blob`s (when dictated by the field type), making it feasible to use blobs for large string data, including with MQTT messaging. - -### Password Hashing Upgrade - -4.5 adds two new password hashing algorithms for better security (to replace md5): -`sha256`: This is a solid general purpose of password hashing, with good security properties and excellent performance. This is the default algorithm in 4.5. -`argon2id`: This provides the highest level of security, and is the recommended algorithm that do not require frequent password verifications. However, it is more CPU intensive, and may not be suitable for environments with a high frequency of password verifications. - -### Resource and Storage Analytics - -4.5 includes numerous new analytics for resources and storage, including page faults, context switches, free space, disk usage, and other metrics. - -#### Default Replication Port - -The default port for replication has been changed from 9925 to 9933. - -### Property Forwarding - -Accessing record properties from resource instances should be accessible through standard property access syntax, regardless of whether the property was declared in a schema. Previously only properties declared in a schema were accessible through standard property access syntax. This change allows for more consistent and intuitive access to record properties, regardless of how they were defined. It is still recommended to declare properties in a schema for better performance and documentation. - -### Storage Reclamation - -Harper now includes functionality for automatically trying to clean up and evict non-essential data when storage is running low. When free space drops below 40% (configurable), Harper will start to: - -- Evict older entries from caching tables -- Evict older audit log entries -- Remove older rotated logs files - These efforts will become progressively more aggressive as free space decreases. - -### Expanded Sharding Functionality - -When sharding is being used, Harper can now honor write requests with residency information that will not be written to the local node's table. Harper also now allows nodes to be declaratively configured as part of a shard. - -### Certificate Revocation - -Certificates can now be revoked by configuring nodes with a list of revoked certificate serial numbers. - -### Built-in `loadEnv` Component - -There is a [new `loadEnv` component loader](/docs/4.5/developers/components/built-in) that can be used to load environmental variables from a .env in a component. - -### Cluster Status Information - -The [`cluster_status` operation](../../../developers/operations-api/clustering.md) now includes new statistics for replication, including the timestamps of last received transactions, sent transactions, and committed transactions. - -### Improved URL path parsing - -Resources can be defined with nested paths and directly accessed by the exact path without requiring a trailing slash. The `id.property` syntax for accessing properties in URLs will only be applied to properties that are declared in a schema. This allows for URLs to generally include dots in paths without being interpreted as property access. A new [`directURLMapping` option/flag](../../../deployments/configuration.md) on resources that allows for more direct URL path handling as well. - -### `server.authenticateUser` API - -In addition to the `server.getUser` API that allows for retrieval of users by username, the `server.authenticateUser` API is now available which will _always_ verify the user by the provided password. - -#### Improved Message Delivery - -Performance of delivery of messages has been improved. - -### HTTP/2 - -HarperDB now supports HTTP/2 for all API endpoints. This can be enabled with the `http2` option in the configuration file. - -### `harperdb` symlink - -Using `import from 'harperdb'` will more consistently work when directly running a component locally. - -### Transaction Reuse - -By default, transactions can now be reused after calling `transaction.commit()`. - -### GraphQL configuration - -The GraphQL query endpoint can be configured to listen on different ports. GraphQL query endpoing is now also disabled by default, to avoid any conflicts. - -### Glob support for components - -Glob file handling for specifying files used by components has been improved for better consistency. - -### Table.getRecordCount - -`Table.getRecordCount()` is now available to get the number of records in a table. - -### Removal of record counts from REST API - -Previously the root path for a resource in the REST API would return a record count. However, this is a significant performance hazard and was never documented to exist, so this has been removed to ensure better performance and reliability. - -Note that downgrading from 4.5 to 4.4 is _not_ supported. diff --git a/docs/technical-details/release-notes/4.tucker/4.5.1.md b/docs/technical-details/release-notes/4.tucker/4.5.1.md deleted file mode 100644 index 96743caa..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.5.1.md +++ /dev/null @@ -1,11 +0,0 @@ -### HarperDB 4.5.1 - -3/18/2025 - -- Fix/implementation for sharding data that is written for cache resolution -- Add support for replication.shard in configuration for defining local node's shard id -- Fix for source map handling in stack traces -- Improved error reporting for syntax errors in component code -- Improved logging on deployment and NPM installation -- Added shard information to cluster_status -- Fix for audit entry eviction when a table is deleted diff --git a/docs/technical-details/release-notes/4.tucker/4.5.10.md b/docs/technical-details/release-notes/4.tucker/4.5.10.md deleted file mode 100644 index 5e360f6e..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.5.10.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.5.10 - -5/20/2025 - -- Expose the `resources` map for being able to set and access custom resources -- Fix for cleaning up blob files that are used when a database is deleted diff --git a/docs/technical-details/release-notes/4.tucker/4.5.11.md b/docs/technical-details/release-notes/4.tucker/4.5.11.md deleted file mode 100644 index 3adef455..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.5.11.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.5.11 -6/27/2025 - -* Fix bug (workaround Node.js bug) with assigning the ciphers to a server and applying to TLS connections -* Fix for handling TLS array when checking certificates configuration \ No newline at end of file diff --git a/docs/technical-details/release-notes/4.tucker/4.5.12.md b/docs/technical-details/release-notes/4.tucker/4.5.12.md deleted file mode 100644 index d04c8fbe..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.5.12.md +++ /dev/null @@ -1,8 +0,0 @@ -### HarperDB 4.5.12 -7/9/2025 - -- Fix for dynamically setting `harperdb` package symlink on deploy -- Assign shard numbers from each node's config rather than from routes -- Handle certificates without a common name, falling back to the SANs -- Properly clean up blobs that are only transiently used for replication -- Ensure that we always set up server.shards even when there are no TLS connections diff --git a/docs/technical-details/release-notes/4.tucker/4.5.13.md b/docs/technical-details/release-notes/4.tucker/4.5.13.md deleted file mode 100644 index f38d0ad6..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.5.13.md +++ /dev/null @@ -1,4 +0,0 @@ -### HarperDB 4.5.13 -7/12/2025 - -- Fix cleaning out audit entries when a blob has been removed diff --git a/docs/technical-details/release-notes/4.tucker/4.5.14.md b/docs/technical-details/release-notes/4.tucker/4.5.14.md deleted file mode 100644 index c71fe2b1..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.5.14.md +++ /dev/null @@ -1,4 +0,0 @@ -### HarperDB 4.5.14 -7/15/2025 - -- Use proper back-pressure when copying a table for initial database sync diff --git a/docs/technical-details/release-notes/4.tucker/4.5.2.md b/docs/technical-details/release-notes/4.tucker/4.5.2.md deleted file mode 100644 index 624416e8..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.5.2.md +++ /dev/null @@ -1,8 +0,0 @@ -### HarperDB 4.5.2 - -3/25/2025 - -- For defined schemas, don't allow updates from remote nodes that could cause conflicts and repeated schema change requests -- New harper-chrome docker container for accessing Chrome binaries for use with tools like Puppeteer -- Improved rolling restart handling of errors with reaching individual nodes -- Defined cleaner operation object to avoid accident leaking of credentials with logging diff --git a/docs/technical-details/release-notes/4.tucker/4.5.3.md b/docs/technical-details/release-notes/4.tucker/4.5.3.md deleted file mode 100644 index 04f5d25e..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.5.3.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.5.3 - -4/3/2025 - -- Fix for immediately reloading updated certificates and private key files to ensure that certificates properly match the private key -- Fix for analytics of storage size when tables are deleted diff --git a/docs/technical-details/release-notes/4.tucker/4.5.4.md b/docs/technical-details/release-notes/4.tucker/4.5.4.md deleted file mode 100644 index 0029dd1e..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.5.4.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.5.4 - -4/11/2025 - -- Fix for replication of (non-retained) published messages -- Make cookie domain be configurable to allow for cookies shared across sub-hostnames -- Fix for on-demand loading of shared blobs diff --git a/docs/technical-details/release-notes/4.tucker/4.5.5.md b/docs/technical-details/release-notes/4.tucker/4.5.5.md deleted file mode 100644 index 3b93b046..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.5.5.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.5.5 - -4/15/2025 - -- Updates for better messaging with symlinks in Windows -- Fix for saving replicated blobs diff --git a/docs/technical-details/release-notes/4.tucker/4.5.6.md b/docs/technical-details/release-notes/4.tucker/4.5.6.md deleted file mode 100644 index 0f26c1de..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.5.6.md +++ /dev/null @@ -1,7 +0,0 @@ -### HarperDB 4.5.6 - -4/17/2025 - -- Fix for changing the type of the primary key attribute -- Added a new `includeExpensiveRecordCountEstimates` property to the REST component for returning record count estimates -- Fix for dropping attributes diff --git a/docs/technical-details/release-notes/4.tucker/4.5.7.md b/docs/technical-details/release-notes/4.tucker/4.5.7.md deleted file mode 100644 index f4481712..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.5.7.md +++ /dev/null @@ -1,6 +0,0 @@ -### HarperDB 4.5.7 - -4/23/2025 - -- Fix for handling buffers from replicated sharded blob records to prevent overwriting while using -- Updated included studio version for fix for logging in diff --git a/docs/technical-details/release-notes/4.tucker/4.5.8.md b/docs/technical-details/release-notes/4.tucker/4.5.8.md deleted file mode 100644 index 80482d46..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.5.8.md +++ /dev/null @@ -1,8 +0,0 @@ -### HarperDB 4.5.8 - -4/30/2025 - -- Fix MQTT subscription topics with trailing slashes to ensure they are not treated as a wildcard -- Fix the arguments that are used for the default connect/subscribe calls so they pass the second argument from connect like `connect(incomingMessages, query) -> subscribe(query)` -- Add support for replication connections using any configured certificate authorities to verify the server certificates -- Added more descriptive error messages on errors in user residency functions diff --git a/docs/technical-details/release-notes/4.tucker/4.5.9.md b/docs/technical-details/release-notes/4.tucker/4.5.9.md deleted file mode 100644 index c7c972ed..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.5.9.md +++ /dev/null @@ -1,5 +0,0 @@ -### HarperDB 4.5.9 - -5/14/2025 - -- Remove --no-bin-links directive for NPM that was causing installs of dependencies to fail diff --git a/docs/technical-details/release-notes/4.tucker/4.6.0.md b/docs/technical-details/release-notes/4.tucker/4.6.0.md deleted file mode 100644 index 426c8aa4..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.6.0.md +++ /dev/null @@ -1,31 +0,0 @@ -# 4.6.0 - -#### HarperDB 4.6.0 - -6/13/2025 - -### Vector Indexing: Hierarchical Navigable Small World - -Harper 4.6 now includes support for vector indexing, which allows for efficient and fast queries on large semantic data sets. Vector indexing is powered by the [Hierarchical Navigable Small World (HNSW) algorithm](https://arxiv.org/abs/1603.09320) and can be used to index any vector-valued property, and is particularly useful for vector text-embedding data. This provides powerful efficient vector-based searching for semantic and AI-based querying functionality. HNSW is a preferred algorithm for vector indexing and searching because it provides an excellent balance of recall and performance. - -### New Extension API with support for dynamic reloading - -4.6 introduces a new extension API with significant ergonomic improvements for creating new extension components that are more robust and dynamic. The new API also provides a mechanism for dynamic reloading of some files and configuration without restarts. - -### Logging Improvements - -4.6 includes significant expansions to logging configurability, allowing for specific logging configurations of individual components. This also leverages the new extension API to allow for dynamic reloading of logging configuration. With the more granular logging, logs can be directed to different files and/or different log levels. -The logger includes support for HTTP logging, which configurability for logging standard HTTP methods and paths as well headers, ids, and timing information. It also supports distinct logging configuration for different components. -The new logger is now based on the Node.js Console API, with improved the formatting of log messages for various types of objects. -An important change is that logging to standard out/error will _not_ include the timestamp. And console logging does not get logged to the log files by default. - - -### Data Loader -4.6 includes a new [data loader](../../../developers/applications/data-loader.md) that can be used to load data into HarperDB as part of a component. The data loader can be used to load data from JSON file and can be deployed and distributed with a component to provide a reliable mechanism for ensuring specific records are loaded into Harper. - -### Resource API Upgrades - -4.6 includes an upgraded form of the Resource API that can be selected with significant improvements in ease of use. - -### only-if-cached behavior -Previously when the `only-in-cached` caching directive was used and the entry was not cached, Harper would return a 504, but still make a request to origin in the background. Now, Harper will no longer a request to origin for `only-if-cached`. \ No newline at end of file diff --git a/docs/technical-details/release-notes/4.tucker/4.6.1.md b/docs/technical-details/release-notes/4.tucker/4.6.1.md deleted file mode 100644 index 1de73ac0..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.6.1.md +++ /dev/null @@ -1,13 +0,0 @@ -# 4.6.1 -7/10/2025 - -- Plugin API updates to use plugin nomenclature -- Fix for dynamically setting `harperdb` package symlink on deploy -- Assign shard numbers from each node's config rather than from routes -- Handle certificates without a common name, falling back to the SANs -- Properly clean up blobs that are only transiently used for replication -- Ensure that we always set up server.shards even when there are no TLS connections -- Fix for clone node getting the cluster status -- Properly initialize config on CLI operations to avoid path error -- Fix for lmdb for compiling for MacOS and using little-endian -- Allow secure cookies with localhost diff --git a/docs/technical-details/release-notes/4.tucker/4.6.2.md b/docs/technical-details/release-notes/4.tucker/4.6.2.md deleted file mode 100644 index a5b0afeb..00000000 --- a/docs/technical-details/release-notes/4.tucker/4.6.2.md +++ /dev/null @@ -1,6 +0,0 @@ -# 4.6.2 -7/15/2025 - -- Use proper back-pressure when copying a table for initial database sync -- Fix cleaning out audit entries when a blob has been removed -- Fix for running CLI operations when a Harper DB is not installed \ No newline at end of file diff --git a/docs/technical-details/release-notes/4.tucker/README.md b/docs/technical-details/release-notes/4.tucker/README.md deleted file mode 100644 index 93a78ad3..00000000 --- a/docs/technical-details/release-notes/4.tucker/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# Harper Tucker (Version 4) - -HarperDB version 4 ([Tucker release](tucker.md)) represents major step forward in database technology. This release line has ground-breaking architectural advancements including: - -## [4.6](4.6.0.md) - -- Vector Indexing - 4.6 introduces a new Vector Indexing system based on Hierarchical Navigable Small World Graphs. -- New extension API - 4.6 introduces a new extension API for creating extensions components. -- Improved logging configurability - Logging can be dynamically updated and specifically configured for each component. -- Resource API - 4.6 has updated Resource APIs for ease of use. -- Data loader - 4.6 introduces a new data loader that allows for ensuring records exist as part of a component. - -## [4.5](4.5.0.md) - -- Blob Storage - 4.5 introduces a new [Blob storage system](/docs/4.5/technical-details/reference/blob). -- Password Hashing Upgrade - two new password hashing algorithms for better security (to replace md5). -- New resource and storage Analytics - -## [4.4](4.4.0.md) - -- Native replication (codename "Plexus") which is faster, more efficient, secure, and reliable than the previous replication system and provides provisional sharding capabilities with a foundation for the future -- Computed properties that allow applications to define properties that are computed from other properties, allowing for composite properties that are calculated from other data stored in records without requiring actual storage of the computed value -- Custom indexing including composite, full-text indexing, and vector indexing - -## [4.3](4.3.0.md) - -- Relationships, joins, and broad new querying capabilities for complex and nested conditions, sorting, joining, and selecting with significant query optimizations -- More advanced transaction support for CRDTs and storage of large integers (with BigInt) -- Better management with new upgraded local studio and new CLI features - -## [4.2](4.2.0.md) - -- New component architecture and Resource API for advanced, robust custom database application development -- Real-time capabilites through MQTT, WebSockets, and Server-Sent Events -- REST interface for intuitive, fast, and standards-compliant HTTP interaction -- Native caching capabilities for high-performance cache scenarios -- Clone node functionality - -## [4.1](4.1.0.md) - -- New streaming iterators mechanism that allows query results to be delivered to clients _while_ querying results are being processed, for incredibly fast time-to-first-byte and concurrent processing/delivery -- New thread-based concurrency model for more efficient resource usage - -## [4.0](4.0.0.md) - -- New clustering technology that delivers robust, resilient and high-performance replication -- Major storage improvements with highly-efficient adaptive-structure modified MessagePack format, with on-demand deserialization capabilities - -Did you know our release names are dedicated to employee pups? For our fourth release, [meet Tucker!](tucker.md) diff --git a/docs/technical-details/release-notes/4.tucker/tucker.md b/docs/technical-details/release-notes/4.tucker/tucker.md deleted file mode 100644 index edf8aeaf..00000000 --- a/docs/technical-details/release-notes/4.tucker/tucker.md +++ /dev/null @@ -1,7 +0,0 @@ -# Harper Tucker (Version 4) - -Did you know our release names are dedicated to employee pups? For our fourth release, we have Tucker. - -![picture of grey and white dog](../../../../images/dogs/tucker.png) - -_G’day, I’m Tucker. My dad is David Cockerill, a software engineer here at Harper. I am a 3-year-old Labrador Husky mix. I love to protect my dad from all the squirrels and rabbits we have in our yard. I have very ticklish feet and love belly rubs!_ diff --git a/docs/technical-details/release-notes/README.md b/docs/technical-details/release-notes/README.md deleted file mode 100644 index 183204d4..00000000 --- a/docs/technical-details/release-notes/README.md +++ /dev/null @@ -1,271 +0,0 @@ -# Release Notes - -### Current Release - -[Meet Tucker](4.tucker/tucker.md) Our 4th Release Pup - -[4.6.3 Tucker](4.tucker/4.6.3.md) - -[4.6.2 Tucker](4.tucker/4.6.2.md) - -[4.6.1 Tucker](4.tucker/4.6.1.md) - -[4.6.0 Tucker](4.tucker/4.6.0.md) - -[4.5.16 Tucker](4.tucker/4.5.16.md) - -[4.5.15 Tucker](4.tucker/4.5.15.md) - -[4.5.14 Tucker](4.tucker/4.5.14.md) - -[4.5.13 Tucker](4.tucker/4.5.13.md) - -[4.5.12 Tucker](4.tucker/4.5.12.md) - -[4.5.11 Tucker](4.tucker/4.5.11.md) - -[4.5.10 Tucker](4.tucker/4.5.10.md) - -[4.5.9 Tucker](4.tucker/4.5.9.md) - -[4.5.8 Tucker](4.tucker/4.5.8.md) - -[4.5.7 Tucker](4.tucker/4.5.7.md) - -[4.5.6 Tucker](4.tucker/4.5.6.md) - -[4.5.5 Tucker](4.tucker/4.5.5.md) - -[4.5.4 Tucker](4.tucker/4.5.4.md) - -[4.5.3 Tucker](4.tucker/4.5.3.md) - -[4.5.2 Tucker](4.tucker/4.5.2.md) - -[4.5.1 Tucker](4.tucker/4.5.1.md) - -[4.5.0 Tucker](4.tucker/4.5.0.md) - -[4.4.24 Tucker](4.tucker/4.4.24.md) - -[4.4.23 Tucker](4.tucker/4.4.23.md) - -[4.4.22 Tucker](4.tucker/4.4.22.md) - -[4.4.21 Tucker](4.tucker/4.4.21.md) - -[4.4.20 Tucker](4.tucker/4.4.20.md) - -[4.4.19 Tucker](4.tucker/4.4.19.md) - -[4.4.21 Tucker](4.tucker/4.4.21.md) - -[4.4.20 Tucker](4.tucker/4.4.20.md) - -[4.4.19 Tucker](4.tucker/4.4.19.md) - -[4.4.18 Tucker](4.tucker/4.4.18.md) - -[4.4.17 Tucker](4.tucker/4.4.17.md) - -[4.4.16 Tucker](4.tucker/4.4.16.md) - -[4.4.15 Tucker](4.tucker/4.4.15.md) - -[4.4.14 Tucker](4.tucker/4.4.14.md) - -[4.4.13 Tucker](4.tucker/4.4.13.md) - -[4.4.12 Tucker](4.tucker/4.4.12.md) - -[4.4.11 Tucker](4.tucker/4.4.11.md) - -[4.4.10 Tucker](4.tucker/4.4.10.md) - -[4.4.9 Tucker](4.tucker/4.4.9.md) - -[4.4.8 Tucker](4.tucker/4.4.8.md) - -[4.4.7 Tucker](4.tucker/4.4.7.md) - -[4.4.6 Tucker](4.tucker/4.4.6.md) - -[4.4.5 Tucker](4.tucker/4.4.5.md) - -[4.4.4 Tucker](4.tucker/4.4.4.md) - -[4.4.4 Tucker](4.tucker/4.4.3.md) - -[4.4.2 Tucker](4.tucker/4.4.2.md) - -[4.4.1 Tucker](4.tucker/4.4.1.md) - -[4.4.0 Tucker](4.tucker/4.4.0.md) - -[4.3.38 Tucker](4.tucker/4.3.38.md) - -[4.3.37 Tucker](4.tucker/4.3.37.md) - -[4.3.36 Tucker](4.tucker/4.3.36.md) - -[4.3.35 Tucker](4.tucker/4.3.35.md) - -[4.3.34 Tucker](4.tucker/4.3.34.md) - -[4.3.33 Tucker](4.tucker/4.3.33.md) - -[4.3.32 Tucker](4.tucker/4.3.32.md) - -[4.3.31 Tucker](4.tucker/4.3.31.md) - -[4.3.30 Tucker](4.tucker/4.3.30.md) - -[4.3.29 Tucker](4.tucker/4.3.29.md) - -[4.3.28 Tucker](4.tucker/4.3.28.md) - -[4.3.27 Tucker](4.tucker/4.3.27.md) - -[4.3.26 Tucker](4.tucker/4.3.26.md) - -[4.3.25 Tucker](4.tucker/4.3.25.md) - -[4.3.24 Tucker](4.tucker/4.3.24.md) - -[4.3.23 Tucker](4.tucker/4.3.23.md) - -[4.3.22 Tucker](4.tucker/4.3.22.md) - -[4.3.21 Tucker](4.tucker/4.3.21.md) - -[4.3.20 Tucker](4.tucker/4.3.20.md) - -[4.3.19 Tucker](4.tucker/4.3.19.md) - -[4.3.18 Tucker](4.tucker/4.3.18.md) - -[4.3.17 Tucker](4.tucker/4.3.17.md) - -[4.3.16 Tucker](4.tucker/4.3.16.md) - -[4.3.15 Tucker](4.tucker/4.3.15.md) - -[4.3.14 Tucker](4.tucker/4.3.14.md) - -[4.3.13 Tucker](4.tucker/4.3.13.md) - -[4.3.12 Tucker](4.tucker/4.3.12.md) - -[4.3.11 Tucker](4.tucker/4.3.11.md) - -[4.3.10 Tucker](4.tucker/4.3.10.md) - -[4.3.9 Tucker](4.tucker/4.3.9.md) - -[4.3.8 Tucker](4.tucker/4.3.8.md) - -[4.3.7 Tucker](4.tucker/4.3.7.md) - -[4.3.6 Tucker](4.tucker/4.3.6.md) - -[4.3.5 Tucker](4.tucker/4.3.5.md) - -[4.3.4 Tucker](4.tucker/4.3.4.md) - -[4.3.3 Tucker](4.tucker/4.3.3.md) - -[4.3.2 Tucker](4.tucker/4.3.2.md) - -[4.3.1 Tucker](4.tucker/4.3.1.md) - -[4.3.0 Tucker](4.tucker/4.3.0.md) - -[4.2.8 Tucker](4.tucker/4.2.8.md) - -[4.2.7 Tucker](4.tucker/4.2.7.md) - -[4.2.6 Tucker](4.tucker/4.2.6.md) - -[4.2.5 Tucker](4.tucker/4.2.5.md) - -[4.2.4 Tucker](4.tucker/4.2.4.md) - -[4.2.3 Tucker](4.tucker/4.2.3.md) - -[4.2.2 Tucker](4.tucker/4.2.2.md) - -[4.2.1 Tucker](4.tucker/4.2.1.md) - -[4.2.0 Tucker](4.tucker/4.2.0.md) - -[4.1.2 Tucker](4.tucker/4.1.2.md) - -[4.1.1 Tucker](4.tucker/4.1.1.md) - -[4.1.0 Tucker](4.tucker/4.1.0.md) - -[4.0.7 Tucker](4.tucker/4.0.7.md) - -[4.0.6 Tucker](4.tucker/4.0.6.md) - -[4.0.5 Tucker](4.tucker/4.0.5.md) - -[4.0.4 Tucker](4.tucker/4.0.4.md) - -[4.0.3 Tucker](4.tucker/4.0.3.md) - -[4.0.2 Tucker](4.tucker/4.0.2.md) - -[4.0.1 Tucker](4.tucker/4.0.1.md) - -[4.0.0 Tucker](4.tucker/4.0.0.md) - -### Past Releases - -[Meet Monkey](3.monkey/) Our 3rd Release Pup - -[3.2.1 Monkey](3.monkey/3.2.1.md) - -[3.2.0 Monkey](3.monkey/3.2.0.md) - -[3.1.5 Monkey](3.monkey/3.1.5.md) - -[3.1.4 Monkey](3.monkey/3.1.4.md) - -[3.1.3 Monkey](3.monkey/3.1.3.md) - -[3.1.2 Monkey](3.monkey/3.1.2.md) - -[3.1.1 Monkey](3.monkey/3.1.1.md) - -[3.1.0 Monkey](3.monkey/3.1.0.md) - -[3.0.0 Monkey](3.monkey/3.0.0.md) - ---- - -[Meet Penny](2.penny/) Our 2nd Release Pup - -[2.3.1 Penny](2.penny/2.3.1.md) - -[2.3.0 Penny](2.penny/2.3.0.md) - -[2.2.3 Penny](2.penny/2.2.3.md) - -[2.2.2 Penny](2.penny/2.2.2.md) - -[2.2.0 Penny](2.penny/2.2.0.md) - -[2.1.1 Penny](2.penny/2.1.1.md) - ---- - -[Meet Alby](1.alby/) Our 1st Release Pup - -[1.3.1 Alby](1.alby/1.3.1.md) - -[1.3.0 Alby](1.alby/1.3.0.md) - -[1.2.0 Alby](1.alby/1.2.0.md) - -[1.1.0 Alby](1.alby/1.1.0.md) diff --git a/docs/technical-details/release-notes/index.md b/docs/technical-details/release-notes/index.md new file mode 100644 index 00000000..fe3073c0 --- /dev/null +++ b/docs/technical-details/release-notes/index.md @@ -0,0 +1,269 @@ +--- +title: Release Notes +--- + +# Release Notes + +### Current Release + +[Meet Tucker](v4-tucker/tucker) Our 4th Release Pup + +[4.6.2 Tucker](v4-tucker/4.6.2) + +[4.6.1 Tucker](v4-tucker/4.6.1) + +[4.6.0 Tucker](v4-tucker/4.6.0) + +[4.5.14 Tucker](v4-tucker/4.5.14) + +[4.5.13 Tucker](v4-tucker/4.5.13) + +[4.5.12 Tucker](v4-tucker/4.5.12) + +[4.5.11 Tucker](v4-tucker/4.5.11) + +[4.5.10 Tucker](v4-tucker/4.5.10) + +[4.5.9 Tucker](v4-tucker/4.5.9) + +[4.5.8 Tucker](v4-tucker/4.5.8) + +[4.5.7 Tucker](v4-tucker/4.5.7) + +[4.5.6 Tucker](v4-tucker/4.5.6) + +[4.5.5 Tucker](v4-tucker/4.5.5) + +[4.5.4 Tucker](v4-tucker/4.5.4) + +[4.5.3 Tucker](v4-tucker/4.5.3) + +[4.5.2 Tucker](v4-tucker/4.5.2) + +[4.5.1 Tucker](v4-tucker/4.5.1) + +[4.5.0 Tucker](v4-tucker/4.5.0) + +[4.4.24 Tucker](v4-tucker/4.4.24) + +[4.4.23 Tucker](v4-tucker/4.4.23) + +[4.4.22 Tucker](v4-tucker/4.4.22) + +[4.4.21 Tucker](v4-tucker/4.4.21) + +[4.4.20 Tucker](v4-tucker/4.4.20) + +[4.4.19 Tucker](v4-tucker/4.4.19) + +[4.4.21 Tucker](v4-tucker/4.4.21) + +[4.4.20 Tucker](v4-tucker/4.4.20) + +[4.4.19 Tucker](v4-tucker/4.4.19) + +[4.4.18 Tucker](v4-tucker/4.4.18) + +[4.4.17 Tucker](v4-tucker/4.4.17) + +[4.4.16 Tucker](v4-tucker/4.4.16) + +[4.4.15 Tucker](v4-tucker/4.4.15) + +[4.4.14 Tucker](v4-tucker/4.4.14) + +[4.4.13 Tucker](v4-tucker/4.4.13) + +[4.4.12 Tucker](v4-tucker/4.4.12) + +[4.4.11 Tucker](v4-tucker/4.4.11) + +[4.4.10 Tucker](v4-tucker/4.4.10) + +[4.4.9 Tucker](v4-tucker/4.4.9) + +[4.4.8 Tucker](v4-tucker/4.4.8) + +[4.4.7 Tucker](v4-tucker/4.4.7) + +[4.4.6 Tucker](v4-tucker/4.4.6) + +[4.4.5 Tucker](v4-tucker/4.4.5) + +[4.4.4 Tucker](v4-tucker/4.4.4) + +[4.4.4 Tucker](v4-tucker/4.4.3) + +[4.4.2 Tucker](v4-tucker/4.4.2) + +[4.4.1 Tucker](v4-tucker/4.4.1) + +[4.4.0 Tucker](v4-tucker/4.4.0) + +[4.3.38 Tucker](v4-tucker/4.3.38) + +[4.3.37 Tucker](v4-tucker/4.3.37) + +[4.3.36 Tucker](v4-tucker/4.3.36) + +[4.3.35 Tucker](v4-tucker/4.3.35) + +[4.3.34 Tucker](v4-tucker/4.3.34) + +[4.3.33 Tucker](v4-tucker/4.3.33) + +[4.3.32 Tucker](v4-tucker/4.3.32) + +[4.3.31 Tucker](v4-tucker/4.3.31) + +[4.3.30 Tucker](v4-tucker/4.3.30) + +[4.3.29 Tucker](v4-tucker/4.3.29) + +[4.3.28 Tucker](v4-tucker/4.3.28) + +[4.3.27 Tucker](v4-tucker/4.3.27) + +[4.3.26 Tucker](v4-tucker/4.3.26) + +[4.3.25 Tucker](v4-tucker/4.3.25) + +[4.3.24 Tucker](v4-tucker/4.3.24) + +[4.3.23 Tucker](v4-tucker/4.3.23) + +[4.3.22 Tucker](v4-tucker/4.3.22) + +[4.3.21 Tucker](v4-tucker/4.3.21) + +[4.3.20 Tucker](v4-tucker/4.3.20) + +[4.3.19 Tucker](v4-tucker/4.3.19) + +[4.3.18 Tucker](v4-tucker/4.3.18) + +[4.3.17 Tucker](v4-tucker/4.3.17) + +[4.3.16 Tucker](v4-tucker/4.3.16) + +[4.3.15 Tucker](v4-tucker/4.3.15) + +[4.3.14 Tucker](v4-tucker/4.3.14) + +[4.3.13 Tucker](v4-tucker/4.3.13) + +[4.3.12 Tucker](v4-tucker/4.3.12) + +[4.3.11 Tucker](v4-tucker/4.3.11) + +[4.3.10 Tucker](v4-tucker/4.3.10) + +[4.3.9 Tucker](v4-tucker/4.3.9) + +[4.3.8 Tucker](v4-tucker/4.3.8) + +[4.3.7 Tucker](v4-tucker/4.3.7) + +[4.3.6 Tucker](v4-tucker/4.3.6) + +[4.3.5 Tucker](v4-tucker/4.3.5) + +[4.3.4 Tucker](v4-tucker/4.3.4) + +[4.3.3 Tucker](v4-tucker/4.3.3) + +[4.3.2 Tucker](v4-tucker/4.3.2) + +[4.3.1 Tucker](v4-tucker/4.3.1) + +[4.3.0 Tucker](v4-tucker/4.3.0) + +[4.2.8 Tucker](v4-tucker/4.2.8) + +[4.2.7 Tucker](v4-tucker/4.2.7) + +[4.2.6 Tucker](v4-tucker/4.2.6) + +[4.2.5 Tucker](v4-tucker/4.2.5) + +[4.2.4 Tucker](v4-tucker/4.2.4) + +[4.2.3 Tucker](v4-tucker/4.2.3) + +[4.2.2 Tucker](v4-tucker/4.2.2) + +[4.2.1 Tucker](v4-tucker/4.2.1) + +[4.2.0 Tucker](v4-tucker/4.2.0) + +[4.1.2 Tucker](v4-tucker/4.1.2) + +[4.1.1 Tucker](v4-tucker/4.1.1) + +[4.1.0 Tucker](v4-tucker/4.1.0) + +[4.0.7 Tucker](v4-tucker/4.0.7) + +[4.0.6 Tucker](v4-tucker/4.0.6) + +[4.0.5 Tucker](v4-tucker/4.0.5) + +[4.0.4 Tucker](v4-tucker/4.0.4) + +[4.0.3 Tucker](v4-tucker/4.0.3) + +[4.0.2 Tucker](v4-tucker/4.0.2) + +[4.0.1 Tucker](v4-tucker/4.0.1) + +[4.0.0 Tucker](v4-tucker/4.0.0) + +### Past Releases + +[Meet Monkey](v3-monkey/) Our 3rd Release Pup + +[3.2.1 Monkey](v3-monkey/3.2.1) + +[3.2.0 Monkey](v3-monkey/3.2.0) + +[3.1.5 Monkey](v3-monkey/3.1.5) + +[3.1.4 Monkey](v3-monkey/3.1.4) + +[3.1.3 Monkey](v3-monkey/3.1.3) + +[3.1.2 Monkey](v3-monkey/3.1.2) + +[3.1.1 Monkey](v3-monkey/3.1.1) + +[3.1.0 Monkey](v3-monkey/3.1.0) + +[3.0.0 Monkey](v3-monkey/3.0.0) + +--- + +[Meet Penny](v2-penny/) Our 2nd Release Pup + +[2.3.1 Penny](v2-penny/2.3.1) + +[2.3.0 Penny](v2-penny/2.3.0) + +[2.2.3 Penny](v2-penny/2.2.3) + +[2.2.2 Penny](v2-penny/2.2.2) + +[2.2.0 Penny](v2-penny/2.2.0) + +[2.1.1 Penny](v2-penny/2.1.1) + +--- + +[Meet Alby](v1-alby/) Our 1st Release Pup + +[1.3.1 Alby](v1-alby/1.3.1) + +[1.3.0 Alby](v1-alby/1.3.0) + +[1.2.0 Alby](v1-alby/1.2.0) + +[1.1.0 Alby](v1-alby/1.1.0) diff --git a/docs/technical-details/release-notes/v1-alby/1.1.0.md b/docs/technical-details/release-notes/v1-alby/1.1.0.md new file mode 100644 index 00000000..2256a825 --- /dev/null +++ b/docs/technical-details/release-notes/v1-alby/1.1.0.md @@ -0,0 +1,72 @@ +--- +title: 1.1.0 +sidebar_position: 89899 +--- + +### HarperDB 1.1.0, Alby Release + +4/18/2018 + +**Features** + +- Users & Roles: + - Limit/Assign access to all HarperDB operations + + - Limit/Assign access to schemas, tables & attributes + + - Limit/Assign access to specific SQL operations (`INSERT`, `UPDATE`, `DELETE`, `SELECT`) + +- Enhanced SQL parser + - Added extensive ANSI SQL Support. + - Added Array function, which allows for converting relational data into Object/Hierarchical data + - `Distinct_Array` Function: allows for removing duplicates in the Array function. + - Enhanced SQL Validation: Improved validation around structure of SQL, validating the schema, etc.. + - 10x performance improvement on SQL statements. + +- Export Function: can now call a NoSQL/SQL search and have it export to CSV or JSON. + +- Added upgrade function to CLI + +- Added ability to perform bulk update from CSV + +- Created landing page for HarperDB. + +- Added CORS support to HarperDB + +**Fixes** + +- Fixed memory leak in CSV bulk loads + +- Corrected error when attempting to perform a `SQL DELETE` + +- Added further validation to NoSQL `UPDATE` to validate schema & table exist + +- Fixed install issue occurring when part of the install path does not exist, the install would silently fail. + +- Fixed issues with replicated data when one of the replicas is down + +- Removed logging of initial user’s credentials during install + +- Can now use reserved words as aliases in SQL + +- Removed user(s) password in results when calling `list_users` + +- Corrected forwarding of operations to other nodes in a cluster + +- Corrected lag in schema meta-data passing to other nodes in a cluster + +- Drop table & schema now move the table & schema or table to the trash folder under the Database folder for later permanent deletion. + +- Bulk inserts no longer halt the entire operation if n records already exist, instead the return includes the hashes of records that have been skipped. + +- Added ability to accept EULA from command line + +- Corrected `search_by_value` not searching on the correct attribute + +- Added ability to increase the timeout of a request by adding `SERVER_TIMEOUT_MS` to config/settings.js + +- Add error handling resulting from SQL calculations. + +- Standardized error responses as JSON. + +- Corrected internal process generation to not allow more processes than machine has cores. diff --git a/docs/technical-details/release-notes/v1-alby/1.2.0.md b/docs/technical-details/release-notes/v1-alby/1.2.0.md new file mode 100644 index 00000000..a504a7ad --- /dev/null +++ b/docs/technical-details/release-notes/v1-alby/1.2.0.md @@ -0,0 +1,42 @@ +--- +title: 1.2.0 +sidebar_position: 89799 +--- + +### HarperDB 1.2.0, Alby Release + +7/10/2018 + +**Features** + +- Time to Live: Conserve the resources of your edge device by setting data on devices to live for a specific period of time. +- Geo: HarperDB has implemented turf.js into its SQL parser to enable geo based analytics. +- Jobs: CSV Data loads, Exports & Time to Live now all run as back ground jobs. +- Exports: Perform queries that export into JSON or CSV and save to disk or S3. + +**Fixes** + +- Fixed issue where CSV data loads incorrectly report number of records loaded. +- Added validation to stop `BETWEEN` operations in SQL. +- Updated logging to not include internal variables in the logs. +- Cleaned up `add_role` response to not include internal variables. +- Removed old and unused dependencies. +- Build out further unit tests and integration tests. +- Fixed https to handle certificates properly. +- Improved stability of clustering & replication. +- Corrected issue where Objects and Arrays were not casting properly in `SQL SELECT` response. +- Fixed issue where Blob text was not being returned from `SQL SELECT`s. +- Fixed error being returned when querying on table with no data, now correctly returns empty array. +- Improved performance in SQL when searching on exact values. +- Fixed error when ./harperdb stop is called. +- Fixed logging issue causing instability in installer. +- Fixed `read_log` operation to accept date time. +- Added permissions checking to `export_to_s3`. +- Added ability to run SQL on `SELECT` without a `FROM`. +- Fixed issue where updating a user’s password was not encrypting properly. +- Fixed `user_guide.html` to point to readme on git repo. +- Created option to have HarperDB run as a foreground process. +- Updated `user_info` to return the correct role for a user. +- Fixed issue where HarperDB would not stop if the database root was deleted. +- Corrected error message on insert if an invalid schema is provided. +- Added permissions checks for user & role operations. diff --git a/docs/technical-details/release-notes/v1-alby/1.3.0.md b/docs/technical-details/release-notes/v1-alby/1.3.0.md new file mode 100644 index 00000000..e3a5215f --- /dev/null +++ b/docs/technical-details/release-notes/v1-alby/1.3.0.md @@ -0,0 +1,27 @@ +--- +title: 1.3.0 +sidebar_position: 89699 +--- + +### HarperDB 1.3.0, Alby Release + +11/2/2018 + +**Features** + +- Upgrade: Upgrade to newest version via command line. +- SQL Support: Added `IS NULL` for SQL parser. +- Added attribute validation to search operations. + +**Fixes** + +- Fixed `SELECT` calculations, i.e. `SELECT` 2+2. +- Fixed select OR not returning expected results. +- No longer allowing reserved words for schema and table names. +- Corrected process interruptions from improper SQL statements. +- Improved message handling between spawned processes that replace killed processes. +- Enhanced error handling for updates to tables that do not exist. +- Fixed error handling for NoSQL responses when `get_attributes` is provided with invalid attributes. +- Fixed issue with new columns not being updated properly in update statements. +- Now validating roles, tables and attributes when creating or updating roles. +- Fixed an issue where in some cases `undefined` was being returned after dropping a role diff --git a/docs/technical-details/release-notes/v1-alby/1.3.1.md b/docs/technical-details/release-notes/v1-alby/1.3.1.md new file mode 100644 index 00000000..56927389 --- /dev/null +++ b/docs/technical-details/release-notes/v1-alby/1.3.1.md @@ -0,0 +1,29 @@ +--- +title: 1.3.1 +sidebar_position: 89698 +--- + +### HarperDB 1.3.1, Alby Release + +2/26/2019 + +**Features** + +- Clustering connection direction appointment +- Foundations for threading/multi processing +- UUID autogen for hash attributes that were not provided +- Added cluster status operation + +**Bug Fixes and Enhancements** + +- More logging +- Clustering communication enhancements +- Clustering queue ordering by timestamps +- Cluster re connection enhancements +- Number of system core(s) detection +- Node LTS (10.15) compatibility +- Update/Alter users enhancements +- General performance enhancements +- Warning is logged if different versions of harperdb are connected via clustering +- Fixed need to restart after user creation/alteration +- Fixed SQL error that occurred on selecting from an empty table diff --git a/docs/technical-details/release-notes/v1-alby/index.md b/docs/technical-details/release-notes/v1-alby/index.md new file mode 100644 index 00000000..aad1de4d --- /dev/null +++ b/docs/technical-details/release-notes/v1-alby/index.md @@ -0,0 +1,13 @@ +--- +title: HarperDB Alby (Version 1) +--- + +# HarperDB Alby (Version 1) + +Did you know our release names are dedicated to employee pups? For our first release, Alby was our pup. + +Here is a bit about Alby: + +![picture of black dog](/dogs/alby.webp) + +_Hi, I am Alby. My mom is Kaylan Stock, Director of Marketing at HarperDB. I am a 9-year-old Great Dane mix who loves sun bathing, going for swims, and wreaking havoc on the local squirrels. My favorite snack is whatever you are eating, and I love a good butt scratch!_ diff --git a/docs/technical-details/release-notes/v2-penny/2.1.1.md b/docs/technical-details/release-notes/v2-penny/2.1.1.md new file mode 100644 index 00000000..c59337d7 --- /dev/null +++ b/docs/technical-details/release-notes/v2-penny/2.1.1.md @@ -0,0 +1,28 @@ +--- +title: 2.1.1 +sidebar_position: 79898 +--- + +### HarperDB 2.1.1, Penny Release + +05/22/2020 + +**Highlights** + +- CORE-1007 Added the ability to perform `SQL INSERT` & `UPDATE` with function calls & expressions on values. +- CORE-1023 Fixed minor bug in final SQL step incorrectly trying to translate ordinals to alias in `ORDER BY` statement. +- CORE-1020 Fixed bug allowing 'null' and 'undefined' string values to be passed in as valid hash values. +- CORE-1006 Added SQL functionality that enables `JOIN` statements across different schemas. +- CORE-1005 Implemented JSONata library to handle our JSON document search functionality in SQL, creating the `SEARCH_JSON` function. +- CORE-1009 Updated schema validation to allow all printable ASCII characters to be used in schema/table/attribute names, except, forward slashes and backticks. Same rules apply now for hash attribute values. +- CORE-1003 Fixed handling of ORDER BY statements with function aliases. +- CORE-1004 Fixed bug related to `SELECT*` on `JOIN` queries with table columns with the same name. +- CORE-996 Fixed an issue where the `transact_to_cluster` flag is lost for CSV URL loads, fixed an issue where new attributes created in CSV bulk load do not sync to the cluster. +- CORE-994 Added new operation `system_information`. This operation returns info & metrics for the OS, time, memory, cpu, disk, network. +- CORE-993 Added new custom date functions for AlaSQL & UTC updates. +- CORE-991 Changed jobs to spawn a new process which will run the intended job without impacting a main HarperDB process. +- CORE-992 HTTPS enabled by default. +- CORE-990 Updated `describe_table` to add the record count for the table for LMDB data storage. +- CORE-989 Killed the socket cluster processes prior to HarperDB processes to eliminate a false uptime. +- CORE-975 Updated time values set by SQL Date Functions to be in epoch format. +- CORE-974 Added date functions to `SQL SELECT` column alias functionality. diff --git a/docs/technical-details/release-notes/v2-penny/2.2.0.md b/docs/technical-details/release-notes/v2-penny/2.2.0.md new file mode 100644 index 00000000..a669ca8b --- /dev/null +++ b/docs/technical-details/release-notes/v2-penny/2.2.0.md @@ -0,0 +1,44 @@ +--- +title: 2.2.0 +sidebar_position: 79799 +--- + +### HarperDB 2.2.0, Penny Release + +08/24/2020 + +**Features/Updates** + +- CORE-997 Updated the data format for CSV data loads being sync'd across a cluster to take up less resources +- CORE-1018 Adds SQL functionality for `BETWEEN` statements +- CORE-1032 Updates permissions to allow regular users (i.e. non-super users) to call the `get_job` operation +- CORE-1036 On create/drop table we auto create/drop the related transactions environments for the schema.table +- CORE-1042 Built raw functions to write to a tables transaction log for insert/update/delete operations +- CORE-1057 Implemented write transaction into lmdb create/update/delete functions +- CORE-1048 Adds `SEARCH` wildcard handling for role permissions standards +- CORE-1059 Added config setting to disable transaction logging for an instance +- CORE-1076 Adds permissions filter to describe operations +- CORE-1043 Change clustering catchup to use the new transaction log +- CORE-1052 Removed word "master" from source +- CORE-1061 Added new operation called `delete_transactions_before` this will tail a transaction log for a specific schema / table +- CORE-1040 On HarperDB startup make sure all tables have a transaction environment +- CORE-1055 Added 2 new setting to change the server headersTimeout & keepAliveTimeout from the config file +- CORE-1044 Created new operation `read_transaction_log` which will allow a user to get transactions for a table by `timestamp`, `username`, or `hash_value` +- CORE-1043 Change clustering catchup to use the new transaction log +- CORE-1089 Added new attribute to `system_information` for table/transaction log data size in bytes & transaction log record count +- CORE-1101 Fix to store empty strings rather than considering them null & fix to be able to search on empty strings in SQL/NoSQL. +- CORE-1054 Updates permissions object to remove delete attribute permission and update table attribute permission key to `attribute_permissions` +- CORE-1092 Do not allow the `__createdtime__` to be updated +- CORE-1085 Updates create schema/table & drop schema/table/attribute operations permissions to require super user role and adds integration tests to validate +- CORE-1071 Updates response messages and status codes from `describe_schema` and `describe_table` operations to provide standard language/status code when a schema item is not found +- CORE-1049 Updates response message for SQL update op with no matching rows +- CORE-1096 Added tracking of the origin in the transaction log. This origin object stores the node name, timestamp of the transaction from the originating node & the user. + +**Bug Fixes** + +- CORE-1028 Fixes bug for simple `SQL SELECT` queries not returning aliases and incorrectly returning hash values when not requested in query +- CORE-1037 Fixed an issue where numbers with leading zero i.e. 00123 are converted to numbers rather than being honored as strings. +- CORE-1063 Updates permission error response shape to consolidate issues into individual objects per schema/table combo +- CORE-1098 Fixed an issue where transaction environments were remaining in the global cache after being dropped. +- CORE-1086 Fixed issue where responses from insert/update were incorrect with skipped records. +- CORE-1079 Fixes SQL bugs around invalid schema/table and special characters in `WHERE` clause diff --git a/docs/technical-details/release-notes/v2-penny/2.2.2.md b/docs/technical-details/release-notes/v2-penny/2.2.2.md new file mode 100644 index 00000000..fca00967 --- /dev/null +++ b/docs/technical-details/release-notes/v2-penny/2.2.2.md @@ -0,0 +1,17 @@ +--- +title: 2.2.2 +sidebar_position: 79797 +--- + +### HarperDB 2.2.2, Penny Release + +10/27/2020 + +- CORE-1154 Allowed transaction logging to be disabled even if clustering is enabled. +- CORE-1153 Fixed issue where `delete_files_before` was writing to transaction log. +- CORE-1152 Fixed issue where no more than 4 HarperDB forks would be created. +- CORE-1112 Adds handling for system timestamp attributes in permissions. +- CORE-1131 Adds better handling for checking perms on operations with action value in JSON. +- CORE-1113 Fixes validation bug checking for super user/cluster user permissions and other permissions. +- CORE-1135 Adds validation for valid keys in role API operations. +- CORE-1073 Adds new `import_from_s3` operation to API. diff --git a/docs/technical-details/release-notes/v2-penny/2.2.3.md b/docs/technical-details/release-notes/v2-penny/2.2.3.md new file mode 100644 index 00000000..06b89d4e --- /dev/null +++ b/docs/technical-details/release-notes/v2-penny/2.2.3.md @@ -0,0 +1,10 @@ +--- +title: 2.2.3 +sidebar_position: 79796 +--- + +### HarperDB 2.2.3, Penny Release + +11/16/2020 + +- CORE-1158 Performance improvements to core delete function and configuration of `delete_files_before` to run in batches with a pause into between. diff --git a/docs/technical-details/release-notes/v2-penny/2.3.0.md b/docs/technical-details/release-notes/v2-penny/2.3.0.md new file mode 100644 index 00000000..a027eedb --- /dev/null +++ b/docs/technical-details/release-notes/v2-penny/2.3.0.md @@ -0,0 +1,23 @@ +--- +title: 2.3.0 +sidebar_position: 79699 +--- + +### HarperDB 2.3.0, Penny Release + +12/03/2020 + +**Features/Updates** + +- CORE-1191, CORE-1190, CORE-1125, CORE-1157, CORE-1126, CORE-1140, CORE-1134, CORE-1123, CORE-1124, CORE-1122 Added JWT Authentication option (See documentation for more information) +- CORE-1128, CORE-1143, CORE-1140, CORE-1129 Added `upsert` operation +- CORE-1187 Added `get_configuration` operation which allows admins to view their configuration settings. +- CORE-1175 Added new internal LMDB function to copy an environment for use in future features. +- CORE-1166 Updated packages to address security vulnerabilities. + +**Bug Fixes** + +- CORE-1195 Modified `drop_attribute` to drop after data cleanse completes. +- CORE-1149 Fix SQL bug regarding self joins and updates alasql to 0.6.5 release. +- CORE-1168 Fix inconsistent invalid schema/table errors. +- CORE-1162 Fix bug which caused `delete_files_before` to cause tables to grow in size due to an open cursor issue. diff --git a/docs/technical-details/release-notes/v2-penny/2.3.1.md b/docs/technical-details/release-notes/v2-penny/2.3.1.md new file mode 100644 index 00000000..03df0186 --- /dev/null +++ b/docs/technical-details/release-notes/v2-penny/2.3.1.md @@ -0,0 +1,13 @@ +--- +title: 2.3.1 +sidebar_position: 79698 +--- + +### HarperDB 2.3.1, Penny Release + +1/29/2021 + +**Bug Fixes** + +- CORE-1218 A bug in HarperDB 2.3.0 was identified related to manually calling the `create_attribute` operation. This bug caused secondary indexes to be overwritten by the most recently inserted or updated value for the index, thereby causing a search operation filtered with that index to only return the most recently inserted/updated row. Note, this issue does not affect attributes that are reflexively/automatically created. It only affects attributes created using `create_attribute`. To resolve this issue in 2.3.0 or earlier, drop and recreate your table using reflexive attribute creation. In 2.3.1, drop and recreate your table and use either reflexive attribute creation or `create_attribute`. +- CORE-1219 Increased maximum table attributes from 1000 to 10000 diff --git a/docs/technical-details/release-notes/v2-penny/index.md b/docs/technical-details/release-notes/v2-penny/index.md new file mode 100644 index 00000000..798f90e2 --- /dev/null +++ b/docs/technical-details/release-notes/v2-penny/index.md @@ -0,0 +1,13 @@ +--- +title: HarperDB Penny (Version 2) +--- + +# HarperDB Penny (Version 2) + +Did you know our release names are dedicated to employee pups? For our second release, Penny was the star. + +Here is a bit about Penny: + +![picture of brindle dog](/dogs/penny.webp) + +_Hi I am Penny! My dad is Kyle Bernhardy, the CTO of HarperDB. I am a nine-year-old Whippet who lives for running hard and fast while exploring the beautiful terrain of Colorado. My favorite activity is chasing birds along with afternoon snoozes in a sunny spot in my backyard._ diff --git a/docs/technical-details/release-notes/v3-monkey/3.0.0.md b/docs/technical-details/release-notes/v3-monkey/3.0.0.md new file mode 100644 index 00000000..10319747 --- /dev/null +++ b/docs/technical-details/release-notes/v3-monkey/3.0.0.md @@ -0,0 +1,32 @@ +--- +title: 3.0.0 +sidebar_position: 69999 +--- + +### HarperDB 3.0, Monkey Release + +5/18/2021 + +**Features/Updates** + +- CORE-1217, CORE-1226, CORE-1232 Create new `search_by_conditions` operation. +- CORE-1304 Upgrade to Node 12.22.1. +- CORE-1235 Adds new upgrade/install functionality. +- CORE-1206, CORE-1248, CORE-1252 Implement `lmdb-store` library for optimized performance. +- CORE-1062 Added alias operation for `delete_files_before`, named `delete_records_before`. +- CORE-1243 Change `HTTPS_ON` settings value to false by default. +- CORE-1189 Implement fastify web server, resulting in improved performance. +- CORE-1221 Update user API to use role name instead of role id. +- CORE-1225 Updated dependencies to eliminate npm security warnings. +- CORE-1241 Adds 3.0 update directive and refactors/fixes update functionality. + +**Bug Fixes** + +- CORE-1299 Remove all references to the `PROJECT_DIR` setting. This setting is problematic when using node version managers and upgrading the version of node and then installing a new instance of HarperDB. +- CORE-1288 Fix bug with drop table/schema that was causing 'env required' error log. +- CORE-1285 Update warning log when trying to create an attribute that already exists. +- CORE-1254 Added logic to manage data collisions in clustering. +- CORE-1212 Add pre-check to `drop_user` that returns error if user doesn't exist. +- CORE-1114 Update response code and message from `add_user` when user already exists. +- CORE-1111 Update response from `create_attribute` to match the create schema/table response. +- CORE-1205 Fixed bug that prevented schema/table from being dropped if name was a number or had a wildcard value in it. Updated validation for insert, upsert and update. diff --git a/docs/technical-details/release-notes/v3-monkey/3.1.0.md b/docs/technical-details/release-notes/v3-monkey/3.1.0.md new file mode 100644 index 00000000..f14acb8e --- /dev/null +++ b/docs/technical-details/release-notes/v3-monkey/3.1.0.md @@ -0,0 +1,24 @@ +--- +title: 3.1.0 +sidebar_position: 69899 +--- + +### HarperDB 3.1.0, Monkey Release + +8/24/2021 + +**Features/Updates** + +- CORE-1320, CORE-1321, CORE-1323, CORE-1324 Version 1.0 of HarperDB Custom Functions +- CORE-1275, CORE-1276, CORE-1278, CORE-1279, CORE-1280, CORE-1282, CORE-1283, CORE-1305, CORE-1314 IPC server for communication between HarperDB processes, including HarperDB, HarperDB Clustering, and HarperDB Functions +- CORE-1352, CORE-1355, CORE-1356, CORE-1358 Implement pm2 for HarperDB process management +- CORE-1292, CORE-1308, CORE-1312, CORE-1334, CORE-1338 Updated installation process to start HarperDB immediately on install and to accept all config settings via environment variable or command line arguments +- CORE-1310 Updated licensing functionality +- CORE-1301 Updated validation for performance improvement +- CORE-1359 Add `hdb-response-time` header which returns the HarperDB response time in milliseconds +- CORE-1330, CORE-1309 New config settings: `LOG_TO_FILE`, `LOG_TO_STDSTREAMS`, `IPC_SERVER_PORT`, `RUN_IN_FOREGROUND`, `CUSTOM_FUNCTIONS`, `CUSTOM_FUNCTIONS_PORT`, `CUSTOM_FUNCTIONS_DIRECTORY`, `MAX_CUSTOM_FUNCTION_PROCESSES` + +**Bug Fixes** + +- CORE-1315 Corrected issue in HarperDB restart scenario +- CORE-1370 Update some of the validation error handlers so that they don't log full stack diff --git a/docs/technical-details/release-notes/v3-monkey/3.1.1.md b/docs/technical-details/release-notes/v3-monkey/3.1.1.md new file mode 100644 index 00000000..8f90dc10 --- /dev/null +++ b/docs/technical-details/release-notes/v3-monkey/3.1.1.md @@ -0,0 +1,19 @@ +--- +title: 3.1.1 +sidebar_position: 69898 +--- + +### HarperDB 3.1.1, Monkey Release + +9/23/2021 + +**Features/Updates** + +- CORE-1393 Added utility function to add settings from env/cmd vars to the settings file on every run/restart +- CORE-1395 Create a setting which will allow to enable the local Studio to be served from an instance of HarperDB +- CORE-1397 Update the stock 404 response to not return the request URL +- General updates to optimize Docker container + +**Bug Fixes** + +- CORE-1399 Added fixes for complex SQL alias issues diff --git a/docs/technical-details/release-notes/v3-monkey/3.1.2.md b/docs/technical-details/release-notes/v3-monkey/3.1.2.md new file mode 100644 index 00000000..706e5956 --- /dev/null +++ b/docs/technical-details/release-notes/v3-monkey/3.1.2.md @@ -0,0 +1,16 @@ +--- +title: 3.1.2 +sidebar_position: 69897 +--- + +### HarperDB 3.1.2, Monkey Release + +10/21/2021 + +**Features/Updates** + +- Updated the installation ASCII art to reflect the new HarperDB logo + +**Bug Fixes** + +- CORE-1408 Corrects issue where `drop_attribute` was not properly setting the LMDB version number causing tables to behave unexpectedly diff --git a/docs/technical-details/release-notes/v3-monkey/3.1.3.md b/docs/technical-details/release-notes/v3-monkey/3.1.3.md new file mode 100644 index 00000000..1a7d3301 --- /dev/null +++ b/docs/technical-details/release-notes/v3-monkey/3.1.3.md @@ -0,0 +1,12 @@ +--- +title: 3.1.3 +sidebar_position: 69896 +--- + +### HarperDB 3.1.3, Monkey Release + +1/14/2022 + +**Bug Fixes** + +- CORE-1446 Fix for scans on indexes larger than 1 million entries causing queries to never return diff --git a/docs/technical-details/release-notes/v3-monkey/3.1.4.md b/docs/technical-details/release-notes/v3-monkey/3.1.4.md new file mode 100644 index 00000000..3fa86ead --- /dev/null +++ b/docs/technical-details/release-notes/v3-monkey/3.1.4.md @@ -0,0 +1,12 @@ +--- +title: 3.1.4 +sidebar_position: 69895 +--- + +### HarperDB 3.1.4, Monkey Release + +2/24/2022 + +**Features/Updates** + +- CORE-1460 Added new setting `STORAGE_WRITE_ASYNC`. If this setting is true, LMDB will have faster write performance at the expense of not being crash safe. The default for this setting is false, which results in HarperDB being crash safe. diff --git a/docs/technical-details/release-notes/v3-monkey/3.1.5.md b/docs/technical-details/release-notes/v3-monkey/3.1.5.md new file mode 100644 index 00000000..23661928 --- /dev/null +++ b/docs/technical-details/release-notes/v3-monkey/3.1.5.md @@ -0,0 +1,12 @@ +--- +title: 3.1.5 +sidebar_position: 69894 +--- + +### HarperDB 3.1.5, Monkey Release + +3/4/2022 + +**Features/Updates** + +- CORE-1498 Fixed incorrect autocasting of string that start with "0." that tries to convert to number but instead returns NaN. diff --git a/docs/technical-details/release-notes/v3-monkey/3.2.0.md b/docs/technical-details/release-notes/v3-monkey/3.2.0.md new file mode 100644 index 00000000..fa215082 --- /dev/null +++ b/docs/technical-details/release-notes/v3-monkey/3.2.0.md @@ -0,0 +1,14 @@ +--- +title: 3.2.0 +sidebar_position: 69799 +--- + +### HarperDB 3.2.0, Monkey Release + +3/25/2022 + +**Features/Updates** + +- CORE-1391 Bug fix related to orphaned HarperDB background processes. +- CORE-1509 Updated node version check, updated Node.js version, updated project dependencies. +- CORE-1518 Remove final call from logger. diff --git a/docs/technical-details/release-notes/v3-monkey/3.2.1.md b/docs/technical-details/release-notes/v3-monkey/3.2.1.md new file mode 100644 index 00000000..4cc983a4 --- /dev/null +++ b/docs/technical-details/release-notes/v3-monkey/3.2.1.md @@ -0,0 +1,12 @@ +--- +title: 3.2.1 +sidebar_position: 69798 +--- + +### HarperDB 3.2.1, Monkey Release + +6/1/2022 + +**Features/Updates** + +- CORE-1573 Added logic to track the pid of the foreground process if running in foreground. Then on stop, use that pid to kill the process. Logic was also added to kill the pm2 daemon when stop is called. diff --git a/docs/technical-details/release-notes/v3-monkey/3.3.0.md b/docs/technical-details/release-notes/v3-monkey/3.3.0.md new file mode 100644 index 00000000..236704dd --- /dev/null +++ b/docs/technical-details/release-notes/v3-monkey/3.3.0.md @@ -0,0 +1,12 @@ +--- +title: 3.3.0 +sidebar_position: 69699 +--- + +### HarperDB 3.3.0 - Monkey + +- CORE-1595 Added new role type `structure_user`, this enables non-superusers to be able to create/drop schema/table/attribute. +- CORE-1501 Improved performance for drop_table. +- CORE-1599 Added two new operations for custom functions `install_node_modules` & `audit_node_modules`. +- CORE-1598 Added `skip_node_modules` flag to `package_custom_function_project` operation. This flag allows for not bundling project dependencies and deploying a smaller project to other nodes. Use this flag in tandem with `install_node_modules`. +- CORE-1707 Binaries are now included for Linux on AMD64, Linux on ARM64, and macOS. GCC, Make, Python are no longer required when installing on these platforms. diff --git a/docs/technical-details/release-notes/v3-monkey/index.md b/docs/technical-details/release-notes/v3-monkey/index.md new file mode 100644 index 00000000..0b976639 --- /dev/null +++ b/docs/technical-details/release-notes/v3-monkey/index.md @@ -0,0 +1,11 @@ +--- +title: HarperDB Monkey (Version 3) +--- + +# HarperDB Monkey (Version 3) + +Did you know our release names are dedicated to employee pups? For our third release, we have Monkey. + +![picture of tan dog](/dogs/monkey.webp) + +_Hi, I am Monkey, a.k.a. Monk, a.k.a. Monchichi. My dad is Aron Johnson, the Director of DevOps at HarperDB. I am an eight-year-old Australian Cattle dog mutt whose favorite pastime is hunting and collecting tennis balls from the park next to her home. I love burrowing in the Colorado snow, rolling in the cool grass on warm days, and cheese!_ diff --git a/docs/technical-details/release-notes/v4-tucker/4.0.0.md b/docs/technical-details/release-notes/v4-tucker/4.0.0.md new file mode 100644 index 00000000..7a3b86bb --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.0.0.md @@ -0,0 +1,131 @@ +--- +title: 4.0.0 +sidebar_position: 59999 +--- + +### HarperDB 4.0.0, Tucker Release + +11/2/2022 + +**Networking & Data Replication (Clustering)** + +The HarperDB clustering internals have been rewritten and the underlying technology for Clustering has been completely replaced with [NATS](https:/nats.io/), an enterprise grade connective technology responsible for addressing, discovery and exchanging of messages that drive the common patterns in distributed systems. + +- CORE-1464, CORE-1470, : Remove SocketCluster dependencies and all code related to them. +- CORE-1465, CORE-1485, CORE-1537, CORE-1538, CORE-1558, CORE-1583, CORE_1665, CORE-1710, CORE-1801, CORE-1865 :Add nats-`server` code as dependency, on install of HarperDB download nats-`server` is possible else fallback to building from source code. +- CORE-1593, CORE-1761: Add `nats.js` as project dependency. +- CORE-1466: Build NATS configs on `harperdb run` based on HarperDB YAML configuration. +- CORE-1467, CORE-1508: Launch and manage NATS servers with PM2. +- CORE-1468, CORE-1507: Create a process which reads the work queue stream and processes transactions. +- CORE-1481, CORE-1529, CORE-1698, CORE-1502, CORE-1696: On upgrade to 4.0, update pre-existing clustering configurations, create table transaction streams, create work queue stream, update `hdb_nodes` table, create clustering folder structure, and rebuild self-signed certs. +- CORE-1494, CORE-1521, CORE-1755: Build out internals to interface with NATS. +- CORE-1504: Update existing hooks to save transactions to work with NATS. +- CORE-1514, CORE-1515, CORE-1516, CORE-1527, CORE-1532: Update `add_node`, `update_node`, and `remove_node` operations to no longer need host and port in payload. These operations now manage dynamically sourcing of table level transaction streams between nodes and work queues. +- CORE-1522: Create `NATSReplyService` process which handles the receiving NATS based requests from remote instances and sending back appropriate responses. +- CORE-1471, CORE-1568, CORE-1563, CORE-1534, CORE-1569: Update `cluster_status` operation. +- CORE-1611: Update pre-existing transaction log operations to be audit log operations. +- CORE-1541, CORE-1612, CORE-1613: Create translation log operations which interface with streams. +- CORE-1668: Update NATS serialization / deserialization to use MessagePack. +- CORE-1673: Add `system_info` param to `hdb_nodes` table and update on `add_node` and `cluster_status`. +- CORE-1477, CORE-1493, CORE-1557, CORE-1596, CORE-1577: Both a full HarperDB restart & just clustering restart call the NATS server with a reload directive to maintain full uptime while servers refresh. +- CORE-1474:HarperDB install adds clustering folder structure. +- CORE-1530: Post `drop_table` HarperDB purges the related transaction stream. +- CORE-1567: Set NATS config to always use TLS. +- CORE-1543: Removed the `transact_to_cluster` attribute from the bulk load operations. Now bulk loads always replicate. +- CORE-1533, CORE-1556, CORE-1561, CORE-1562, CORE-1564: New operation `configure_cluster`, this operation enables bulk publishing and subscription of multiple tables to multiple instances of HarperDB. +- CORE-1535: Create work queue stream on install of HarperDB. This stream receives transactions from remote instances of HarperDB which are then ingested in order. +- CORE-1551: Create transaction streams on the remote node if they do not exist when performing `add_node` or `update_node`. +- CORE-1594, CORE-1605, CORE-1749, CORE-1767, CORE-1770: Optimize the work queue stream and its consumer to be more performant and validate exact once delivery. +- CORE-1621, CORE-1692, CORE-1570, CORE-1693: NATS stream names are MD5 hashed to avoid characters that HarperDB allows, but NATS may not. +- CORE-1762: Add a new optional attribute to `add_node` and `update_node` named `opt_start_time`. This attribute sets a starting time to start synchronizing transactions. +- CORE-1785: Optimizations and bug fixes in regards to sourcing data from remote instances on HarperDB. +- CORE-1588: Created new operation `set_cluster_routes` to enable setting routes for instances of HarperDB to mesh together. +- CORE-1589: Created new operation `get_cluster_routes` to allow for retrieval of routes used to connect the instance of HarperDB to the mesh. +- CORE-1590: Created new operation `delete_cluster_routes` to allow for removal of routes used to connect the instance of HarperDB to the mesh. +- CORE-1667: Fix old environment variable `CLUSTERING_PORT` not mapping to new hub server port. +- CORE-1609: Allow `remove_node` to be called when the other node cannot be reached. +- CORE-1815: Add transaction lock to `add_node` and `update_node` to avoid concurrent nats source update bug. +- CORE-1848: Update stream configs if the node name has been changed in the YAML configuration. +- CORE-1873: Update `add_node` and `update_node` so that it auto-creates schema/table on both local and remote node respectively + +**Data Storage** + +We have made improvements to how we store, index, and retrieve data. + +- CORE-1619: Enabled new concurrent flushing technology for improved write performance. +- CORE-1701: Optimize search performance for `search_by_conditions` when executing multiple AND conditions. +- CORE-1652: Encode the values of secondary indices more efficiently for faster access. +- CORE-1670: Store updated timestamp in `lmdb.js`' version property. +- CORE-1651: Enabled multiple value indexing of array values which allows for the ability to search on specific elements in an array more efficiently. +- CORE-1649, CORE-1659: Large text values (larger than 255 bytes) are no longer stored in separate blob index. Now they are segmented and delimited in the same index to increase search performance. +- Complex objects and object arrays are no longer stored in a separate index to preserve storage and increase write throughput. +- CORE-1650, CORE-1724, CORE-1738: Improved internals around interpreting attribute values. +- CORE-1657: Deferred property decoding allows large objects to be stored, but individual attributes can be accessed (like with get_attributes) without incurring the cost of decoding the entire object. +- CORE-1658: Enable in-memory caching of records for even faster access to frequently accessed data. +- CORE-1693: Wrap updates in async transactions to ensure ACID-compliant updates. +- CORE-1653: Upgrade to 4.0 rebuilds tables to reflect changes made to index improvements. +- CORE-1753: Removed old `node-lmdb` dependency. +- CORE-1787: Freeze objects returned from queries. +- CORE-1821: Read the `WRITE_ASYNC` setting which enables LMDB nosync. + +**Logging** + +HarperDB has increased logging specificity by breaking out logs based on components logging. There are specific log files each for HarperDB Core, Custom Functions, Hub Server, Leaf Server, and more. + +- CORE-1497: Remove `pino` and `winston` dependencies. +- CORE-1426: All logging is output via `stdout` and `stderr`, our default logging is then picked up by PM2 which handles writing out to file. +- CORE-1431: Improved `read_log` operation validation. +- CORE-1433, CORE-1463: Added log rotation. +- CORE-1553, CORE-1555, CORE-1552, CORE-1554, CORE-1704: Performance gain by only serializing objects and arrays if the log is for the level defined in configuration. +- CORE-1436: Upgrade to 4.0 updates internals for logging changes. +- CORE-1428, CORE-1440, CORE-1442, CORE-1434, CORE-1435, CORE-1439, CORE-1482, CORE-1751, CORE-1752: Bug fixes, performance improvements and improved unit tests. +- CORE-1691: Convert non-PM2 managed log file writes to use Node.js `fs.appendFileSync` function. + +**Configuration** + +HarperDB has updated its configuration from a properties file to YAML. + +- CORE-1448, CORE-1449, CORE-1519, CORE-1587: Upgrade automatically converts the pre-existing settings file to YAML. +- CORE-1445, CORE-1534, CORE-1444, CORE-1858: Build out new logic to create, update, and interpret the YAML configuration file. +- Installer has updated prompts to reflect YAML settings. +- CORE-1447: Create an alias for the `configure_cluster` operation as `set_configuration`. +- CORE-1461, CORE-1462, CORE-1483: Unit test improvements. +- CORE-1492: Improvements to get_configuration and set_configuration operations. +- CORE-1503: Modify HarperDB configuration for more granular certificate definition. +- CORE-1591: Update `routes` IP param to `host` and to `leaf` config in `harperdb.conf` +- CORE-1519: Fix issue when switching between old and new versions of HarperDB we are getting the config parameter is undefined error on npm install. + +**Broad NodeJS and Platform Support** + +- CORE-1624: HarperDB can now run on multiple versions of NodeJS, from v14 to v19. We primarily test on v18, so that is the preferred version. + +**Windows 10 and 11** + +- CORE-1088: HarperDB now runs natively on Windows 10 and 11 without the need to run in a container or installed in WSL. Windows is only intended for evaluation and development purposes, not for production work loads. + +**Extra Changes and Bug Fixes** + +- CORE-1520: Refactor installer to remove all waterfall code and update to use Promises. +- CORE-1573: Stop the PM2 daemon and any logging processes when stopping hdb. +- CORE-1586: When HarperDB is running in foreground stop any additional logging processes from being spawned. +- CORE-1626: Update docker file to accommodate new `harperdb.conf` file. +- CORE-1592, CORE-1526, CORE-1660, CORE-1646, CORE-1640, CORE-1689, CORE-1711, CORE-1601, CORE-1726, CORE-1728, CORE-1736, CORE-1735, CORE-1745, CORE-1729, CORE-1748, CORE-1644, CORE-1750, CORE-1757, CORE-1727, CORE-1740, CORE-1730, CORE-1777, CORE-1778, CORE-1782, CORE-1775, CORE-1771, CORE-1774, CORE-1759, CORE-1772, CORE-1861, CORE-1862, CORE-1863, CORE-1870, CORE-1869:Changes for CI/CD pipeline and integration tests. +- CORE-1661: Fixed issue where old boot properties file caused an error when attempting to install 4.0.0. +- CORE-1697, CORE-1814, CORE-1855: Upgrade fastify dependency to new major version 4. +- CORE-1629: Jobs are now running as processes managed by the PM2 daemon. +- CORE-1733: Update LICENSE to reflect our EULA on our site. +- CORE-1606: Enable Custom Functions by default. +- CORE-1714: Include pre-built binaries for most common platforms (darwin-arm64, darwin-x64, linux-arm64, linux-x64, win32-x64). +- CORE-1628: Fix issue where setting license through environment variable not working. +- CORE-1602, CORE-1760, CORE-1838, CORE-1839, CORE-1847, CORE-1773: HarperDB Docker container improvements. +- CORE-1706: Add support for encoding HTTP responses with MessagePack. +- CORE-1709: Improve the way lmdb.js dependencies are installed. +- CORE-1758: Remove/update unnecessary HTTP headers. +- CORE-1756: On `npm install` and `harperdb install` change the node version check from an error to a warning if the installed Node.js version does not match our preferred version. +- CORE-1791: Optimizations to authenticated user caching. +- CORE-1794: Update README to discuss Windows support & Node.js versions +- CORE-1837: Fix issue where Custom Function directory was not being created on install. +- CORE-1742: Add more validation to audit log - check schema/table exists and log is enabled. +- CORE-1768: Fix issue where when running in foreground HarperDB process is not stopping on `harperdb stop`. +- CORE-1864: Fix to semver checks on upgrade. +- CORE-1850: Fix issue where a `cluster_user` type role could not be altered. diff --git a/docs/technical-details/release-notes/v4-tucker/4.0.1.md b/docs/technical-details/release-notes/v4-tucker/4.0.1.md new file mode 100644 index 00000000..2a85f511 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.0.1.md @@ -0,0 +1,13 @@ +--- +title: 4.0.1 +sidebar_position: 59998 +--- + +### HarperDB 4.0.1, Tucker Release + +01/20/2023 + +**Bug Fixes** + +- CORE-1992 Local studio was not loading because the path got mangled in the build. +- CORE-2001 Fixed deploy_custom_function_project after node update broke it. diff --git a/docs/technical-details/release-notes/v4-tucker/4.0.2.md b/docs/technical-details/release-notes/v4-tucker/4.0.2.md new file mode 100644 index 00000000..bedbd970 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.0.2.md @@ -0,0 +1,13 @@ +--- +title: 4.0.2 +sidebar_position: 59997 +--- + +### HarperDB 4.0.2, Tucker Release + +01/24/2023 + +**Bug Fixes** + +- CORE-2003 Fix bug where if machine had one core thread config would default to zero. +- Update to lmdb 2.7.3 and msgpackr 1.7.0 diff --git a/docs/technical-details/release-notes/v4-tucker/4.0.3.md b/docs/technical-details/release-notes/v4-tucker/4.0.3.md new file mode 100644 index 00000000..ad1cbf8a --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.0.3.md @@ -0,0 +1,12 @@ +--- +title: 4.0.3 +sidebar_position: 59996 +--- + +### HarperDB 4.0.3, Tucker Release + +01/26/2023 + +**Bug Fixes** + +- CORE-2007 Add update nodes 4.0.0 launch script to build script to fix clustering upgrade. diff --git a/docs/technical-details/release-notes/v4-tucker/4.0.4.md b/docs/technical-details/release-notes/v4-tucker/4.0.4.md new file mode 100644 index 00000000..3f052465 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.0.4.md @@ -0,0 +1,12 @@ +--- +title: 4.0.4 +sidebar_position: 59995 +--- + +### HarperDB 4.0.4, Tucker Release + +01/27/2023 + +**Bug Fixes** + +- CORE-2009 Fixed bug where add node was not being called when upgrading clustering. diff --git a/docs/technical-details/release-notes/v4-tucker/4.0.5.md b/docs/technical-details/release-notes/v4-tucker/4.0.5.md new file mode 100644 index 00000000..1696d6d4 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.0.5.md @@ -0,0 +1,14 @@ +--- +title: 4.0.5 +sidebar_position: 59994 +--- + +### HarperDB 4.0.5, Tucker Release + +02/15/2023 + +**Bug Fixes** + +- CORE-2029 Improved the upgrade process for handling existing user TLS certificates and correctly configuring TLS settings. Added a prompt to upgrade to determine if new certificates should be created or existing certificates should be kept/used. +- Fix the way NATS connections are honored in a local environment. +- Do not define the certificate authority path to NATS if it is not defined in the HarperDB config. diff --git a/docs/technical-details/release-notes/v4-tucker/4.0.6.md b/docs/technical-details/release-notes/v4-tucker/4.0.6.md new file mode 100644 index 00000000..1cdc1bd7 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.0.6.md @@ -0,0 +1,12 @@ +--- +title: 4.0.6 +sidebar_position: 59993 +--- + +### HarperDB 4.0.6, Tucker Release + +03/09/2023 + +**Bug Fixes** + +- Fixed a data serialization error that occurs when a large number of different record structures are persisted in a single table. diff --git a/docs/technical-details/release-notes/v4-tucker/4.0.7.md b/docs/technical-details/release-notes/v4-tucker/4.0.7.md new file mode 100644 index 00000000..c4d1fbbf --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.0.7.md @@ -0,0 +1,12 @@ +--- +title: 4.0.7 +sidebar_position: 59992 +--- + +### HarperDB 4.0.7, Tucker Release + +03/10/2023 + +**Bug Fixes** + +- Update lmdb.js dependency diff --git a/docs/technical-details/release-notes/v4-tucker/4.1.0.md b/docs/technical-details/release-notes/v4-tucker/4.1.0.md new file mode 100644 index 00000000..a5ac09d5 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.1.0.md @@ -0,0 +1,63 @@ +--- +title: 4.1.0 +sidebar_position: 59899 +--- + +# 4.1.0 + +HarperDB 4.1 introduces the ability to use worker threads for concurrently handling HTTP requests. Previously this was handled by processes. This shift provides important benefits in terms of better control of traffic delegation with support for optimized load tracking and session affinity, better debuggability, and reduced memory footprint. + +This means debugging will be much easier for custom functions. If you install/run HarperDB locally, most modern IDEs like WebStorm and VSCode support worker thread debugging, so you can start HarperDB in your IDE, and set breakpoints in your custom functions and debug them. + +The associated routing functionality now includes session affinity support. This can be used to consistently route users to the same thread which can improve caching locality, performance, and fairness. This can be enabled in with the [`http.sessionAffinity` option in your configuration](../../../deployments/configuration#http). + +HarperDB 4.1's NoSQL query handling has been revamped to consistently use iterators, which provide an extremely memory efficient mechanism for directly streaming query results to the network _as_ the query results are computed. This results in faster Time to First Byte (TTFB) (only the first record/value in a query needs to be computed before data can start to be sent), and less memory usage during querying (the entire query result does not need to be stored in memory). These iterators are also available in query results for custom functions and can provide means for custom function code to iteratively access data from the database without loading entire results. This should be a completely transparent upgrade, all HTTP APIs function the same, with the one exception that custom functions need to be aware that they can't access query results by `[index]` (they should use array methods or for-in loops to handle query results). + +4.1 includes configuration options for specifying the location of database storage files. This allows you to specifically locate database directories and files on different volumes for better flexibility and utilization of disks and storage volumes. See the [storage configuration](../../../../deployments/configuration#storage) and [schemas configuration](../../../../deployments/configuration#schemas) for information on how to configure these locations. + +Logging has been revamped and condensed into one `hdb.log` file. See [logging](../../../administration/logging/) for more information. + +A new operation called `cluster_network` was added, this operation will ping the cluster and return a list of enmeshed nodes. + +Custom Functions will no longer automatically load static file routes, instead the `@fastify/static` plugin will need to be registered with the Custom Function server. See [Host A Static Web UI-static](https:/docs.harperdb.io/docs/v/4.1/custom-functions/host-static). + +Updates to S3 import and export mean that these operations now require the bucket `region` in the request. Also, if referencing a nested object it should be done in the `key` parameter. See examples [here](../../../developers/operations-api/bulk-operations#import-from-s3). + +Due to the AWS SDK v2 reaching end of life support we have updated to v3. This has caused some breaking changes in our operations `import_from_s3` and `export_to_s3`: + +- A new attribute `region` will need to be supplied +- The `bucket` attribute can no longer have trailing slashes. Slashes will now need to be in the `key`. + +Starting HarperDB without any command (just `harperdb`) now runs HarperDB like a standard process, in the foreground. This means you can use standard unix tooling for interacting with the process and is conducive for running HarperDB with systemd or any other process management tool. If you wish to have HarperDB launch itself in separate background process (and immediately terminate the shell process), you can do so by running `harperdb start`. + +Internal Tickets completed: + +- CORE-609 - Ensure that attribute names are always added to global schema as Strings +- CORE-1549 - Remove fastify-static code from Custom Functions server which auto serves content from "static" folder +- CORE-1655 - Iterator based queries +- CORE-1764 - Fix issue where describe_all operation returns an empty object for non super-users if schema(s) do not yet have table(s) +- CORE-1854 - Switch to using worker threads instead of processes for handling concurrency +- CORE-1877 - Extend the csv_url_load operation to allow for additional headers to be passed to the remote server when the csv is being downloaded +- CORE-1893 - Add last updated timestamp to describe operations +- CORE-1896 - Fix issue where Select \* from system.hdb_info returns wrong HDB version number after Instance Upgrade +- CORE-1904 - Fix issue when executing GEOJSON query in SQL +- CORE-1905 - Add HarperDB YAML configuration setting which defines the storage location of NATS streams +- CORE-1906 - Add HarperDB YAML configuration setting defining the storage location of tables. +- CORE-1655 - Streaming binary format serialization +- CORE-1943 - Add configuration option to set mount point for audit tables +- CORE-1921 - Update NATS transaction lifecycle to handle message deduplication in work queue streams. +- CORE-1963 - Update logging for better readability, reduced duplication, and request context information. +- CORE-1968 - In server\nats\natsIngestService.js remove the js_msg.working(); line to improve performance. +- CORE-1976 - Fix error when calling describe_table operation with no schema or table defined in payload. +- CORE-1983 - Fix issue where create_attribute operation does not validate request for required attributes +- CORE-2015 - Remove PM2 logs that get logged in console when starting HDB +- CORE-2048 - systemd script for 4.1 +- CORE-2052 - Include thread information in system_information for visibility of threads +- CORE-2061 - Add a better error msg when clustering is enabled without a cluster user set +- CORE-2068 - Create new log rotate logic since pm2 log-rotate no longer used +- CORE-2072 - Update to Node 18.15.0 +- CORE-2090 - Upgrade Testing from v4.0.x and v3.x to v4.1. +- CORE-2091 - Run the performance tests +- CORE-2092 - Allow for automatic patch version updates of certain packages +- CORE-2109 - Add verify option to clustering TLS configuration +- CORE-2111 - Update AWS SDK to v3 diff --git a/docs/technical-details/release-notes/v4-tucker/4.1.1.md b/docs/technical-details/release-notes/v4-tucker/4.1.1.md new file mode 100644 index 00000000..54163b63 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.1.1.md @@ -0,0 +1,15 @@ +--- +title: 4.1.1 +sidebar_position: 59898 +--- + +# 4.1.1 + +06/16/2023 + +- HarperDB uses improved logic for determining default heap limits and thread counts. When running in a restricted container and on NodeJS 18.15+, HarperDB will use the constrained memory limit to determine heap limits for each thread. In more memory constrained servers with many CPU cores, a reduced default thread count will be used to ensure that excessive memory is not used by many workers. You may still define your own thread count (with `http`/`threads`) in the [configuration](../../../deployments/configuration). +- An option has been added for [disabling the republishing NATS messages](../../../deployments/configuration), which can provide improved replication performance in a fully connected network. +- Improvements to our OpenShift container. +- Dependency security updates. +- **Bug Fixes** +- Fixed a bug in reporting database metrics in the `system_information` operation. diff --git a/docs/technical-details/release-notes/v4-tucker/4.1.2.md b/docs/technical-details/release-notes/v4-tucker/4.1.2.md new file mode 100644 index 00000000..fc5e16f4 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.1.2.md @@ -0,0 +1,13 @@ +--- +title: 4.1.2 +sidebar_position: 59897 +--- + +### HarperDB 4.1.2, Tucker Release + +06/16/2023 + +- HarperDB has updated binary dependencies to support older glibc versions back 2.17. +- A new CLI command was added to get the current status of whether HarperDB is running and the cluster status. This is available with `harperdb status`. +- Improvements to our OpenShift container. +- Dependency security updates. diff --git a/docs/technical-details/release-notes/v4-tucker/4.2.0.md b/docs/technical-details/release-notes/v4-tucker/4.2.0.md new file mode 100644 index 00000000..4e25e854 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.2.0.md @@ -0,0 +1,99 @@ +--- +title: 4.2.0 +sidebar_position: 59799 +--- + +# 4.2.0 + +#### HarperDB 4.2.0 + +HarperDB 4.2 introduces a new interface to accessing our core database engine with faster access, well-typed idiomatic JavaScript interfaces, ergonomic object mapping, and real-time data subscriptions. 4.2 also had adopted a new component architecture for building extensions to deliver customized external data sources, authentication, file handlers, content types, and more. These architectural upgrades lead to several key new HarperDB capabilities including a new REST interface, advanced caching, real-time messaging and publish/subscribe functionality through MQTT, WebSockets, and Server-Sent Events. + +4.2 also introduces configurable database schemas, using GraphQL Schema syntax. The new component structure is also configuration-driven, providing easy, low-code paths to building applications. [Check out our new getting starting guide](../../../getting-started) to see how easy it is to get started with HarperDB apps. + +### Resource API + +The [Resource API](../../reference/resources) is the new interface for accessing data in HarperDB. It utilizes a uniform interface for accessing data in HarperDB database/tables and is designed to easily be implemented or extended for defining customized application logic for table access or defining custom external data sources. This API has support for connecting resources together for caching and delivering data change and message notifications in real-time. The [Resource API documentation details this interface](../../reference/resources). + +### Component Architecture + +HarperDB's custom functions have evolved towards a full component architecture; our internal functionality is defined as components, and this can be used in a modular way in conjunction with user components. These can all easily be configured and loaded through configuration files, and there is now a [well-defined interface for creating your own components](broken-reference). Components can easily be deployed/installed into HarperDB using [NPM and Github references as well](broken-reference). + +### Configurable Database Schemas + +HarperDB applications or components support [schema definitions using GraphQL schema syntax](../../../developers/applications/defining-schemas). This makes it easy to define your table and attribute structure and gives you control over which attributes should be indexed and what types they should be. With schemas in configuration, these schemas can be bundled with an application and deployed together with application code. + +### REST Interface + +HarperDB 4.2 introduces a new REST interface for accessing data through best-practice HTTP APIs using intuitive paths and standards-based methods and headers that directly map to our Resource API. This new interface provides fast and easy access to data via queries through GET requests, modifications of data through PUTs, customized actions through POSTs and more. With standards-based header support built-in, this works seamlessly with external caches (including browser caches) for accelerated performance and reduced network transfers. + +### Real-Time + +HarperDB 4.2 now provides standard interfaces for subscribing to data changes and receiving notifications of changes and messages in real-time. Using these new real-time messaging capabilities with structured data provides a powerful integrated platform for both database style data updates and querying along with message delivery. [Real-time messaging](../../../developers/real-time) of data is available through several protocols: + +#### MQTT + +4.2 now includes MQTT support which is a publish and subscribe messaging protocol, designed for efficiency (designed to be efficient enough for even small Internet of Things devices). This allows clients to connect to HarperDB and publish messages through our data center and subscribe to messages and data for real-time delivery. 4.2 implements support for QoS 0 and 1, along with durable sessions. + +#### WebSockets + +HarperDB now also supports WebSockets. This can be used as a transport for MQTT or as a connection for custom connection handling. + +#### Server-Sent Events + +HarperDB also includes support for Server-Sent Events. This is a very easy-to-use browser API that allows web sites/applications to connect to HarperDB and subscribe to data changes with minimal effort over standard HTTP. + +### Database Structure + +HarperDB databases contain a collection of tables, and these tables are now contained in a single transactionally-consistent database file. This means reads and writes can be performed transactionally and atomically across tables (as long as they are in the same database). Multi-table transactions are replicated as single atomic transactions as well. Audit logs are also maintained in the same database with atomic consistency as well. + +Databases are now entirely encapsulated in a file, which means they can be moved/copied to another database without requiring any separate metadata updates in the system tables. + +### Clone Node + +HarperDB includes new functionality for adding new HarperDB nodes in a cluster. New instances can be configured to clone from a leader node, performing and copying a database snapshot from a leader node, and self-configuring from the leader node as well, to facilitate accelerated deployment of new nodes for fast horizontal scaling to meet demand needs. [See the documentation on Clone Node for more information.](../../../administration/cloning) + +### Operations API terminology updates + +Any operation that used the `schema` property was updated to make this property optional and alternately support `database` as the property for specifying the database (formerly 'schema'). If both `schema` and `database` are absent, operation defaults to using the `data` database. Term 'primary key' now used in place of 'hash'. noSQL operation `search_by_hash` updated to `search_by_id`. + +Support was added for defining a table with `primary_key` instead of `hash_attribute`. + +## Configuration + +There have been significant changes to `harperdb-config.yaml`, however none of these changes should affect pre-4.2 versions. If you upgrade to 4.2 any existing configuration should be backwards compatible and will not need to be updated. + +`harperdb-config.yaml` has had some configuration values added, removed, renamed and defaults changed. Please refer to [harperdb-config.yaml](../../../deployments/configuration) for the most current configuration parameters. + +- The `http` element has been expanded. + - `compressionThreshold` was added. + - All `customFunction` configuration now lives here, except for the `tls` section. +- `threads` has moved out of the `http` element and now is its own top level element. +- `authentication` section was moved out of the `operationsApi` section and is now its own top level element/section. +- `analytics.aggregatePeriod` was added. +- Default logging level was changed to `warn`. +- Default clustering log level was changed to `info`. +- `clustering.republishMessages` now defaults to `false`. +- `operationsApi.foreground` was removed. To start HarperDB in the foreground, from the CLI run `harperdb`. +- Made `operationsApi` configuration optional. Any config not defined here will default to the `http` section. +- Added a `securePort` parameter to `operationsApi` and `http` used for setting the https port. +- Added a new top level `tls` section. +- Removed `customFunctions.enabled`, `customFunctions.network.https`, `operationsApi.network.https` and `operationsApi.nodeEnv`. +- Added an element called `componentRoot` which replaces `customFunctions.root`. +- Updated custom pathing to use `databases` instead of `schemas`. +- Added `logging.auditAuthEvents.logFailed` and `logging.auditAuthEvents.logSuccessful` for enabling logging of auth events. +- A new `mqtt` section was added. + +### Socket Management + +HarperDB now uses socket sharing to distribute incoming connections to different threads (`SO_REUSEPORT`). This is considered to be the most performant mechanism available for multi-threaded socket handling. This does mean that we have deprecated session-affinity based socket delegation. + +HarperDB now also supports more flexible port configurations: application endpoints and WebSockets run on 9926 by default, but these can be separated, or application endpoints can be configured to run on the same port as the operations API for a single port configuration. + +### Sessions + +HarperDB now supports cookie-based sessions for authentication for web clients. This can be used with the standard authentication mechanisms to login, and then cookies can be used to preserve the authenticated session. This is generally a more secure way of maintaining authentication in browsers, without having to rely on storing credentials. + +### Dev Mode + +HarperDB can now directly run a HarperDB application from any location using `harperdb run /path/to/app` or `harperdb dev /path/to/app`. The latter starts in dev mode, with logging directly to the console, debugging enabled, and auto-restarting with any changes in your application files. Dev mode is recommended for local application and component development. diff --git a/docs/technical-details/release-notes/v4-tucker/4.2.1.md b/docs/technical-details/release-notes/v4-tucker/4.2.1.md new file mode 100644 index 00000000..c792a637 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.2.1.md @@ -0,0 +1,14 @@ +--- +title: 4.2.1 +sidebar_position: 59798 +--- + +### HarperDB 4.2.1, Tucker Release + +11/3/2023 + +- Downgrade NATS 2.10.3 back to 2.10.1 due to regression in connection handling. +- Handle package names with underscores. +- Improved validation of queries and comparators +- Avoid double replication on transactions with multiple commits +- Added file metadata on get_component_file diff --git a/docs/technical-details/release-notes/v4-tucker/4.2.2.md b/docs/technical-details/release-notes/v4-tucker/4.2.2.md new file mode 100644 index 00000000..9cfa957e --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.2.2.md @@ -0,0 +1,16 @@ +--- +title: 4.2.2 +sidebar_position: 59797 +--- + +### HarperDB 4.2.2, Tucker Release + +11/8/2023 + +- Increase timeouts for NATS connections. +- Fix for database snapshots for backups (and for clone node). +- Fix application of permissions for default tables exposed through REST. +- Log replication failures with record information. +- Fix application of authorization/permissions for MQTT commands. +- Fix copying of local components in clone node. +- Fix calculation of overlapping start time in clone node. diff --git a/docs/technical-details/release-notes/v4-tucker/4.2.3.md b/docs/technical-details/release-notes/v4-tucker/4.2.3.md new file mode 100644 index 00000000..edecd686 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.2.3.md @@ -0,0 +1,14 @@ +--- +title: 4.2.3 +sidebar_position: 59796 +--- + +### HarperDB 4.2.3, Tucker Release + +11/15/2023 + +- When setting setting securePort, disable unsecure port setting on same port +- Fix `harperdb status` when pid file is missing +- Fix/include missing icons/fonts from local studio +- Fix crash that can occur when concurrently accessing records > 16KB +- Apply a lower heap limit to better ensure that memory leaks are quickly caught/mitigated diff --git a/docs/technical-details/release-notes/v4-tucker/4.2.4.md b/docs/technical-details/release-notes/v4-tucker/4.2.4.md new file mode 100644 index 00000000..14d268b5 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.2.4.md @@ -0,0 +1,11 @@ +--- +title: 4.2.4 +sidebar_position: 59795 +--- + +### HarperDB 4.2.4, Tucker Release + +11/16/2023 + +- Prevent coercion of strings to numbers in SQL queries (in WHERE clause) +- Address fastify deprecation warning about accessing config diff --git a/docs/technical-details/release-notes/v4-tucker/4.2.5.md b/docs/technical-details/release-notes/v4-tucker/4.2.5.md new file mode 100644 index 00000000..1b6bf143 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.2.5.md @@ -0,0 +1,13 @@ +--- +title: 4.2.5 +sidebar_position: 59794 +--- + +### HarperDB 4.2.5, Tucker Release + +11/22/2023 + +- Disable compression on server-sent events to ensure messages are immediately sent (not queued for later deliver) +- Update geoNear function to tolerate null values +- lmdb-js fix to ensure prefetched keys are pinned in memory until retrieved +- Add header to indicate start of a new authenticated session (for studio to identify authenticated sessions) diff --git a/docs/technical-details/release-notes/v4-tucker/4.2.6.md b/docs/technical-details/release-notes/v4-tucker/4.2.6.md new file mode 100644 index 00000000..50abde53 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.2.6.md @@ -0,0 +1,11 @@ +--- +title: 4.2.6 +sidebar_position: 59793 +--- + +### HarperDB 4.2.6, Tucker Release + +11/29/2023 + +- Update various geo SQL functions to tolerate invalid values +- Properly report component installation/load errors in `get_components` (for studio to load components after an installation failure) diff --git a/docs/technical-details/release-notes/v4-tucker/4.2.7.md b/docs/technical-details/release-notes/v4-tucker/4.2.7.md new file mode 100644 index 00000000..5d75e134 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.2.7.md @@ -0,0 +1,12 @@ +--- +title: 4.2.7 +sidebar_position: 59792 +--- + +### HarperDB 4.2.7 + +12/6/2023 + +- Add support for cloning over the top of an existing HarperDB instance +- Add health checks for NATS consumer with ability to restart consumer loops for better resiliency +- Revert Fastify autoload module due to a regression that had caused EcmaScript modules for Fastify route modules to fail to load on Windows diff --git a/docs/technical-details/release-notes/v4-tucker/4.2.8.md b/docs/technical-details/release-notes/v4-tucker/4.2.8.md new file mode 100644 index 00000000..21127797 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.2.8.md @@ -0,0 +1,15 @@ +--- +title: 4.2.8 +sidebar_position: 59791 +--- + +### HarperDB 4.2.8 + +12/19/2023 + +- Added support CLI command line arguments for clone node +- Added support for cloning a node without enabling clustering +- Clear NATS client cache on closed event +- Fix check for attribute permissions so that an empty attribute permissions array is treated as a table level permission definition +- Improve speed of cross-node health checks +- Fix for using `database` in describe operations diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.0.md b/docs/technical-details/release-notes/v4-tucker/4.3.0.md new file mode 100644 index 00000000..a5c02b23 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.0.md @@ -0,0 +1,125 @@ +--- +title: 4.3.0 +sidebar_position: 59699 +--- + +# 4.3.0 + +#### HarperDB 4.3.0, Tucker Release + +3/19/2024 + +#### Relationships and Joins + +HarperDB now supports defining relationships between tables. These relationships can be defined as one-to-many, many-to-one, or many-to-many, and use a foreign key to record the relationship between records from different tables. An example of how to use this to define a many-to-one and one-to-many relationships between a product and brand table: + +```graphql +type Product @table { + id: ID @primaryKey + name: String @indexed + # foreign key used to reference a brand + brandId: ID @indexed + # many-to-one relationship to brand + brand: Related @relation(from: "brandId") +} +type Brand @table { + id: ID @primaryKey + name: String @indexed + # one-to-many relationship of brand to products of that brand + products: Product @relation(to: "brandId") +} +``` + +This relationships model can be used in queries and selects, which will automatically "join" the data from the tables. For example, you could search for products by brand name: + +```http +/Product?brand.name=Microsoft +``` + +HarperDB also now supports querying with a sort order. Multiple sort orders can be provided breaking ties. Nested select have also been added, which also utilizes joins when related records are referenced. For example: + +```http +/Product?brand.name=Microsoft&sort(price)&select(name,brand{name,size}) +``` + +See the [schema definition documentation](../../../developers/applications/defining-schemas) for more information on defining relationships, and the [REST documentation for more information on queries](../../../developers/rest). + +#### OpenAPI Specification + +A new default endpoint `GET /openapi` was added for describing endpoints configured through a GraphQL schema. + +#### Query Optimizations + +HarperDB has also made numerous improvements to query planning and execution for high performance query results with a broader range of queries. + +#### Indexing Nulls + +New tables and indexes now support indexing null values, enabling queries by null (as well as queries for non-null values). For example, you can query by nulls with the REST interface: + +```http +GET /Table/?attribute=null +``` + +Note, that existing indexes will remain without null value indexing, and can only support indexing/querying by nulls if they are rebuilt (removed and re-added). + +#### CLI Expansion + +The HarperDB now supports an expansive set of commands that execute operations from the operations API. For example, you can list users from the command line: + +```bash +harperdb list_users +``` + +#### BigInt Support + +HarperDB now supports `BigInt` attributes/values with integers (with full precision) up to 1000 bits (or 10^301). These can be used as primary keys or standard attributes, and can be used in queries or other operations. Within JSON documents, you can simply use standard JSON integer numbers with up to 300 digits, and large BigInt integers will be returned as standard JSON numbers. + +#### Local Studio Upgrade + +HarperDB has upgraded the local studio to match the same version that is offered on http:/studio.harperdb.io. The local studio now has the full robust feature set of the online version. + +### MQTT + +#### mTLS Support + +HarperDB now supports mTLS based authentication for HTTP, WebSockets, and MQTT. See the [configuration documentation for more information](../../../deployments/configuration). + +#### Single-Level Wildcards + +HarperDB's MQTT service now supports single-level wildcards (`+`), which facilitates a great range of subscriptions. + +#### Retain handling + +HarperDB's MQTT now supports the retain handling flags for subscriptions that are made using MQTT v5. + +#### CRDT + +HarperDB now supports basic conflict-free data type (CRDT) updates that allow properties to be individually updated and merged when separate properties are updated on different threads or nodes. Individual property CRDT updates are automatically performed when you update individual properties through the resource API. Individual property CRDT updates are used when making `PATCH` requests through the REST API. + +The CRDT functionality also supports explicit incrementation to merge multiple parallel incrementation requests with proper summing. See the [Resource API for more information](../../reference/resources). + +#### Configuration Improvements + +The configuration has improved support for detecting port conflicts, handling paths for fastify routes, and now includes support for specifying a heap limit and TLS ciphers. See the [configuration documentation for more information](../../../deployments/configuration). + +#### Balanced Audit Log Cleanup + +Audit log cleanup has been improved to reduce resource consumption during scheduled cleanups. + +#### `export_*` support for `search_by_conditions` + +The `export_local` and `export_to_s3` operations now support `search_by_conditions` as one of the allowed search operators. + +### Storage Performance Improvements + +Significant improvements were made to handling of free-space to decrease free-space fragmentation and improve performance of reusing free-space for new data. This includes prioritizing reuse of recently released free-space for more better memory/caching utilization. + +#### Compact Database + +In addition to storage improvements, HarperDB now includes functionality for [compacting a database](../../../deployments/harper-cli) (while offline), which can be used to eliminate all free-space to reset any fragmentation. + +#### Compression + +Compression is now enabled by default for all records over 4KB. + +To learn more on how to configure compression visit [configuration](https:/docs.harperdb.io/docs/v/4.3/deployments/configuration). diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.1.md b/docs/technical-details/release-notes/v4-tucker/4.3.1.md new file mode 100644 index 00000000..870968bd --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.1.md @@ -0,0 +1,12 @@ +--- +title: 4.3.1 +sidebar_position: 59698 +--- + +### HarperDB 4.3.1 + +3/25/2024 + +- Fix Fastify warning about responseTime usage +- Add access to the MQTT topic in the context +- Fix for ensuring local NATS streams are created diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.10.md b/docs/technical-details/release-notes/v4-tucker/4.3.10.md new file mode 100644 index 00000000..7badf0cc --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.10.md @@ -0,0 +1,13 @@ +--- +title: 4.3.10 +sidebar_position: 59689 +--- + +### HarperDB 4.3.10 + +5/5/2024 + +- Provide a `data` property on the request/context with deserialized data from the request body for any request including methods that don't typically have a request body +- Ensure that CRDTs are not double applied after committing a transaction +- Delete MQTT will after publishing even if it fails to publish +- Improve transaction retry logic to use async non-optimistic transactions after multiple retries diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.11.md b/docs/technical-details/release-notes/v4-tucker/4.3.11.md new file mode 100644 index 00000000..82b47381 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.11.md @@ -0,0 +1,11 @@ +--- +title: 4.3.11 +sidebar_position: 59688 +--- + +### HarperDB 4.3.11 + +5/15/2024 + +- Add support for multiple certificates with SNI-based selection of certificates for HTTPS/TLS +- Fix warning in Node v22 diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.12.md b/docs/technical-details/release-notes/v4-tucker/4.3.12.md new file mode 100644 index 00000000..3f016e25 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.12.md @@ -0,0 +1,11 @@ +--- +title: 4.3.12 +sidebar_position: 59687 +--- + +### HarperDB 4.3.12 + +5/16/2024 + +- Fix for handling ciphers in multiple certificates +- Allow each certificate config to have multiple hostnames diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.13.md b/docs/technical-details/release-notes/v4-tucker/4.3.13.md new file mode 100644 index 00000000..e7833e0a --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.13.md @@ -0,0 +1,12 @@ +--- +title: 4.3.13 +sidebar_position: 59686 +--- + +### HarperDB 4.3.13 + +5/22/2024 + +- Fix for handling HTTPS/TLS with IP address targets (no hostname) where SNI is not available +- Fix for memory leak when a node is down and consumers are trying to reconnect +- Faster cross-thread notification mechanism for transaction events diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.14.md b/docs/technical-details/release-notes/v4-tucker/4.3.14.md new file mode 100644 index 00000000..0bf4e9c8 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.14.md @@ -0,0 +1,10 @@ +--- +title: 4.3.14 +sidebar_position: 59685 +--- + +### HarperDB 4.3.14 + +5/24/2024 + +- Fix application of ciphers to multi-certificate TLS configuration diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.15.md b/docs/technical-details/release-notes/v4-tucker/4.3.15.md new file mode 100644 index 00000000..48321fb6 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.15.md @@ -0,0 +1,11 @@ +--- +title: 4.3.15 +sidebar_position: 59684 +--- + +### HarperDB 4.3.15 + +5/29/2024 + +- Add support for wildcards in hostnames for SNI +- Properly apply ciphers settings on multiple TLS configurations diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.16.md b/docs/technical-details/release-notes/v4-tucker/4.3.16.md new file mode 100644 index 00000000..195e27b7 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.16.md @@ -0,0 +1,11 @@ +--- +title: 4.3.16 +sidebar_position: 59683 +--- + +### HarperDB 4.3.16 + +6/3/2024 + +- Properly shim legacy TLS configuration with new multi-certificate support +- Show the changed filenames when an application is reloaded diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.17.md b/docs/technical-details/release-notes/v4-tucker/4.3.17.md new file mode 100644 index 00000000..27a0f4cb --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.17.md @@ -0,0 +1,15 @@ +--- +title: 4.3.17 +sidebar_position: 59682 +--- + +### HarperDB 4.3.17 + +6/13/2024 + +- Add MQTT analytics of incoming messages and separate by QoS level +- Ensure that any installed `harperdb` package in components is relinked to running harperdb. +- Upgrade storage to more efficiently avoid storage increases +- Fix to improve database metrics in system_information +- Fix for pathing on Windows with extension modules +- Add ability to define a range of listening threads diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.18.md b/docs/technical-details/release-notes/v4-tucker/4.3.18.md new file mode 100644 index 00000000..052b3821 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.18.md @@ -0,0 +1,10 @@ +--- +title: 4.3.18 +sidebar_position: 59681 +--- + +### HarperDB 4.3.18 + +6/18/2024 + +- Immediately terminate an MQTT connection when there is a keep-alive timeout. diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.19.md b/docs/technical-details/release-notes/v4-tucker/4.3.19.md new file mode 100644 index 00000000..2676c9f6 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.19.md @@ -0,0 +1,12 @@ +--- +title: 4.3.19 +sidebar_position: 59680 +--- + +### HarperDB 4.3.19 + +7/2/2024 + +- Properly return records for the existing value for subscriptions used for retained messages, so they are correctly serialized. +- Ensure that deploy components empty the target directory for a clean installation and expansion of a `package` sub-directory. +- Ensure that we do not double load components that are referenced by symlink from node_modules and in components directory. diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.2.md b/docs/technical-details/release-notes/v4-tucker/4.3.2.md new file mode 100644 index 00000000..ca273c5e --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.2.md @@ -0,0 +1,16 @@ +--- +title: 4.3.2 +sidebar_position: 59697 +--- + +### HarperDB 4.3.2 + +3/29/2024 + +- Clone node updates to individually clone missing parts +- Fixes for publishing OpenShift container +- Increase purge stream timeout +- Fixed declaration of analytics schema so queries work before a restart +- Fix for iterating queries when deleted records exist +- LMDB stability upgrade +- Fix for cleanup of last will in MQTT diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.20.md b/docs/technical-details/release-notes/v4-tucker/4.3.20.md new file mode 100644 index 00000000..d090990b --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.20.md @@ -0,0 +1,18 @@ +--- +title: 4.3.20 +sidebar_position: 59679 +--- + +### HarperDB 4.3.20 + +7/11/2024 + +- The restart_service operation is now executed as a job, making it possible to track the progress of a restart (which is performed as a rolling restart of threads) +- Disable Nagle's algorithm for TCP connections to improve performance +- Append Server-Timing header if a fastify route has already added one +- Avoid symlinking the harperdb directory to itself +- Fix for deleting an empty database +- Upgrade ws and pm2 packages for security vulnerabilities +- Improved TypeScript definitions for Resource and Context. +- The context of a source can set `noCacheStore` to avoid caching the results of a retrieval from source +- Better error reporting of MQTT parsing errors and termination of connections for compliance diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.21.md b/docs/technical-details/release-notes/v4-tucker/4.3.21.md new file mode 100644 index 00000000..7afefd12 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.21.md @@ -0,0 +1,14 @@ +--- +title: 4.3.21 +sidebar_position: 59678 +--- + +### HarperDB 4.3.21 + +8/21/2024 + +- Fixed an issue with iterating/serializing query results with a `limit`. +- Fixed an issue that was preventing the caching of structured records in memory. +- Fixed and added several TypeScript exported types including `tables`, `databases`, `Query`, and `Context`. +- Fixed logging warnings about license limits after a license is updated. +- Don't register a certificate as the default certificate for non-SNI connections unless it lists an IP address in the SAN field. diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.22.md b/docs/technical-details/release-notes/v4-tucker/4.3.22.md new file mode 100644 index 00000000..a4bc2003 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.22.md @@ -0,0 +1,15 @@ +--- +title: 4.3.22 +sidebar_position: 59677 +--- + +### HarperDB 4.3.22 + +9/6/2024 + +- Adding improved back-pressure handling for large subscriptions and backlogs with durable MQTT sessions +- Allow .extension in URL paths to indicate both preferred encoding and decoding +- Added support for multi-part ids in query parameters +- Limit describe calls by time before using statistical sampling +- Proper cleanup of a transaction when it is aborted due to running out of available read transactions +- Updates to release/builds diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.23.md b/docs/technical-details/release-notes/v4-tucker/4.3.23.md new file mode 100644 index 00000000..7496c1d1 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.23.md @@ -0,0 +1,12 @@ +--- +title: 4.3.23 +sidebar_position: 59676 +--- + +### HarperDB 4.3.23 + +9/12/2024 + +- Avoid long-running read transactions on subscription catch-ups +- Reverted change to setting default certificate for IP address only +- Better handling of last-will messages on startup diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.24.md b/docs/technical-details/release-notes/v4-tucker/4.3.24.md new file mode 100644 index 00000000..435c15ec --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.24.md @@ -0,0 +1,10 @@ +--- +title: 4.3.24 +sidebar_position: 59675 +--- + +### HarperDB 4.3.24 + +9/12/2024 + +- Fix for querying for large strings (over 255 characters) diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.25.md b/docs/technical-details/release-notes/v4-tucker/4.3.25.md new file mode 100644 index 00000000..601d9ec0 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.25.md @@ -0,0 +1,13 @@ +--- +title: 4.3.25 +sidebar_position: 59674 +--- + +### HarperDB 4.3.25 + +9/24/2024 + +- Add analytics for replication latency +- Fix iteration issue over asynchronous joined queries +- Local studio fix for loading applications in insecure context (HTTP) +- Local studio fix for loading configuration tab diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.26.md b/docs/technical-details/release-notes/v4-tucker/4.3.26.md new file mode 100644 index 00000000..c0dacf54 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.26.md @@ -0,0 +1,11 @@ +--- +title: 4.3.26 +sidebar_position: 59673 +--- + +### HarperDB 4.3.26 + +9/27/2024 + +- Fixed a security issue that allowed users to bypass access controls with the operations API +- Previously expiration handling was limited to tables with a source, but now it can be applied to any table diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.27.md b/docs/technical-details/release-notes/v4-tucker/4.3.27.md new file mode 100644 index 00000000..0bbd448a --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.27.md @@ -0,0 +1,14 @@ +--- +title: 4.3.27 +sidebar_position: 59672 +--- + +### HarperDB 4.3.27 + +10/2/2024 + +- Fixed handling HTTP upgrade with Connection header that does not use Upgrade as the sole value (for Firefox) +- Added metrics for requests by status code +- Properly remove attributes from the stored metadata when removed from GraphQL schema +- Fixed a regression in clustering retrieval of schema description +- Fix attribute validation/handling to ensure that sequential ids can be assigned with insert/upsert operations diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.28.md b/docs/technical-details/release-notes/v4-tucker/4.3.28.md new file mode 100644 index 00000000..361d416d --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.28.md @@ -0,0 +1,12 @@ +--- +title: 4.3.28 +sidebar_position: 59671 +--- + +### HarperDB 4.3.28 + +10/3/2024 + +- Tolerate user with no role when building NATS config +- Change metrics for requests by status code to be prefixed with "response\_" +- Log error `cause`, and other properties, when available. diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.29.md b/docs/technical-details/release-notes/v4-tucker/4.3.29.md new file mode 100644 index 00000000..5537df8b --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.29.md @@ -0,0 +1,17 @@ +--- +title: 4.3.29 +sidebar_position: 59670 +--- + +### HarperDB 4.3.29 + +10/7/2024 + +- Avoid unnecessary cookie session creation without explicit login +- Added support for caching directives in operations API +- Fixed issue with creating metadata for table with no primary key +- Local studio upgrade: + - Added support for "cache only" mode to view table data without origin resolution + - Added partial support for cookie-based authentication + - Added support for browsing tables with no primary key + - Improved performance for sorting tables diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.3.md b/docs/technical-details/release-notes/v4-tucker/4.3.3.md new file mode 100644 index 00000000..38175dda --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.3.md @@ -0,0 +1,10 @@ +--- +title: 4.3.3 +sidebar_position: 59696 +--- + +### HarperDB 4.3.3 + +4/01/2024 + +- Improve MQTT logging by properly logging auth failures, logging disconnections diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.30.md b/docs/technical-details/release-notes/v4-tucker/4.3.30.md new file mode 100644 index 00000000..e005db97 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.30.md @@ -0,0 +1,10 @@ +--- +title: 4.3.30 +sidebar_position: 59669 +--- + +### HarperDB 4.3.30 + +10/9/2024 + +- Properly assign transaction timestamp to writes from cache resolutions (ensuring that latencies can be calculated on replicating nodes) diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.31.md b/docs/technical-details/release-notes/v4-tucker/4.3.31.md new file mode 100644 index 00000000..80cab2b9 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.31.md @@ -0,0 +1,12 @@ +--- +title: 4.3.31 +sidebar_position: 59668 +--- + +### HarperDB 4.3.31 + +10/10/2024 + +- Reset the restart limit for manual restarts to ensure that NATS process will continue to restart after more than 10 manual restarts +- Only apply caching directives (from headers) to tables/resources that are configured to be caching, sourced from another resource +- Catch/tolerate errors on serializing objects for logging diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.32.md b/docs/technical-details/release-notes/v4-tucker/4.3.32.md new file mode 100644 index 00000000..0b5893b4 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.32.md @@ -0,0 +1,12 @@ +--- +title: 4.3.32 +sidebar_position: 59667 +--- + +### HarperDB 4.3.32 + +10/16/2024 + +- Fix a memory leak when cluster_network closes a hub connection +- Improved MQTT error handling, with less verbose logging of more common errors, and treat a missing subscription as an invalid/missing topic +- Record analytics and server-timing header even when cache resolution fails diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.33.md b/docs/technical-details/release-notes/v4-tucker/4.3.33.md new file mode 100644 index 00000000..7707a562 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.33.md @@ -0,0 +1,10 @@ +--- +title: 4.3.33 +sidebar_position: 59666 +--- + +### HarperDB 4.3.33 + +10/24/2024 + +- Change the default maximum length for a fastify route parameter from 100 to 1000 characters. diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.34.md b/docs/technical-details/release-notes/v4-tucker/4.3.34.md new file mode 100644 index 00000000..2bd65833 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.34.md @@ -0,0 +1,10 @@ +--- +title: 4.3.34 +sidebar_position: 59665 +--- + +### HarperDB 4.3.34 + +10/24/2024 + +- lmdb-js upgrade diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.35.md b/docs/technical-details/release-notes/v4-tucker/4.3.35.md new file mode 100644 index 00000000..f8dd7b73 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.35.md @@ -0,0 +1,11 @@ +--- +title: 4.3.35 +sidebar_position: 59664 +--- + +### HarperDB 4.3.35 + +11/12/2024 + +- Upgrades for supporting Node.js V23 +- Fix for handling a change in the schema for nested data structures diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.36.md b/docs/technical-details/release-notes/v4-tucker/4.3.36.md new file mode 100644 index 00000000..2eb8e636 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.36.md @@ -0,0 +1,10 @@ +--- +title: 4.3.36 +sidebar_position: 59663 +--- + +### HarperDB 4.3.36 + +11/14/2024 + +- lmdb-js upgrade for better free-space management diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.37.md b/docs/technical-details/release-notes/v4-tucker/4.3.37.md new file mode 100644 index 00000000..f36e1c32 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.37.md @@ -0,0 +1,10 @@ +--- +title: 4.3.37 +sidebar_position: 59662 +--- + +### HarperDB 4.3.37 + +12/6/2024 + +- lmdb-js upgrade for preventing crashes with shared user buffers diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.38.md b/docs/technical-details/release-notes/v4-tucker/4.3.38.md new file mode 100644 index 00000000..d1fce0f8 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.38.md @@ -0,0 +1,10 @@ +--- +title: 4.3.38 +sidebar_position: 59661 +--- + +### HarperDB 4.3.38 + +1/10/2025 + +- Fixes for audit log cleanup diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.4.md b/docs/technical-details/release-notes/v4-tucker/4.3.4.md new file mode 100644 index 00000000..0c96732f --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.4.md @@ -0,0 +1,11 @@ +--- +title: 4.3.4 +sidebar_position: 59695 +--- + +### HarperDB 4.3.4 + +4/9/2024 + +- Fixed a buffer overrun issue with decompressing compressed data +- Better keep-alive of transactions with long running queries diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.5.md b/docs/technical-details/release-notes/v4-tucker/4.3.5.md new file mode 100644 index 00000000..60888785 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.5.md @@ -0,0 +1,10 @@ +--- +title: 4.3.5 +sidebar_position: 59694 +--- + +### HarperDB 4.3.5 + +4/10/2024 + +- Fixed a buffer overrun issue with decompressing compressed data diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.6.md b/docs/technical-details/release-notes/v4-tucker/4.3.6.md new file mode 100644 index 00000000..54a4739a --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.6.md @@ -0,0 +1,14 @@ +--- +title: 4.3.6 +sidebar_position: 59693 +--- + +### HarperDB 4.3.6 + +4/12/2024 + +- Fixed parsing of dates from epoch millisecond times in queries +- Fixed CRDT incrementation of different data types +- Adjustments to text/plain content type q-value handling +- Fixed parsing of passwords with a colon +- Added MQTT events for connections, authorization, and disconnections diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.7.md b/docs/technical-details/release-notes/v4-tucker/4.3.7.md new file mode 100644 index 00000000..df9fb331 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.7.md @@ -0,0 +1,14 @@ +--- +title: 4.3.7 +sidebar_position: 59692 +--- + +### HarperDB 4.3.7 + +4/16/2024 + +- Fixed transaction handling to stay on open on long compaction operations +- Fixed handling of sorting on non-indexed attributes +- Storage stability improvements +- Fixed authentication/authorization of WebSockets connection and use of cookies +- Fixes for clone node operations diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.8.md b/docs/technical-details/release-notes/v4-tucker/4.3.8.md new file mode 100644 index 00000000..0e4c5b6c --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.8.md @@ -0,0 +1,14 @@ +--- +title: 4.3.8 +sidebar_position: 59691 +--- + +### HarperDB 4.3.8 + +4/26/2024 + +- Added support for the MQTT keep-alive feature (disconnecting if no control messages are received within keep-alive window) +- Improved handling of write queue timeouts, with configurability +- Fixed a memory leak that can occur with NATS reconnections after heartbeat misses +- Fixed a bug in clone node with a null port +- Add error events to MQTT events system diff --git a/docs/technical-details/release-notes/v4-tucker/4.3.9.md b/docs/technical-details/release-notes/v4-tucker/4.3.9.md new file mode 100644 index 00000000..17c95934 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.3.9.md @@ -0,0 +1,10 @@ +--- +title: 4.3.9 +sidebar_position: 59690 +--- + +### HarperDB 4.3.9 + +4/30/2024 + +- lmdb-js upgrade diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.0.md b/docs/technical-details/release-notes/v4-tucker/4.4.0.md new file mode 100644 index 00000000..e5f98221 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.0.md @@ -0,0 +1,60 @@ +--- +title: 4.4.0 +sidebar_position: 59599 +--- + +# 4.4.0 + +#### HarperDB 4.4.0 + +10/14/2024 + +### Native Replication + +HarperDB has a completely [new native replication system](../../../developers/replication/) which is faster, more efficient, secure, and reliable than the previous replication system. The new system (codenamed "Plexus") uses direct WebSocket connections between servers with highly optimized encoding and is driven by direct tracking audit/transaction log for efficient and flexible data transfer. This replication has improved resilience with the ability to reach consensus consistency when one node goes down through cross-node catch-up. Network connections can be performed over the existing operations API port or a separate port, for improved configurability. + +The native replication system is much easier to configure, with multiple options for authentication and security, including PKI/mTLS security that is highly robust and easy to use in conjunction with existing PKI certificates. Replication can be configured through explicit subscriptions or for automated replication of all data in a database. With automated replication, gossiping is used to automatically discover and connect to other nodes in the cluster. + +#### Sharding + +The new replication system also includes provisional support for [sharding](../../../developers/replication/sharding). This sharding mechanism paves the way for greater scalability and performance, by allow data to be distributed across multiple nodes. + +#### Replicated Operations + +Certain operations can now be replicated across the cluster, including the deployment and management of components. This allows for a more seamless experience when managing a cluster of HarperDB instances. Restarts can also be "replicated", and if used, will perform a rolling restart of all the nodes in a cluster. + +### Computed Properties + +Computed properties allow applications to define properties that are computed from other properties, allowing for composite properties that are calculated from other data stored in records without requiring actual storage of the computed value. For example, you could have a computed property for a full name based on first and last, or age/duration based on a date. Computed properties are also foundational for custom indexes. See the [schema documentation ](../../../developers/applications/defining-schemas), [Resource API](../../reference/resources), and our blog post on [computed properties](https:/www.harperdb.io/development/tutorials/how-to-create-custom-indexes-with-computed-properties) for more information. + +### Custom Indexing + +Custom indexes can now be defined using computed properties to allow for unlimited possibilities of indexing, including composite, full-text indexing, vector indexing. Again, see the [schema documentation](../../../developers/applications/defining-schemas) for more information. + +### Native Graph Support + +HarperDB now includes provisional support for native [GraphQL querying functionality](../../reference/graphql). This allows for querying of graph data using GraphQL syntax. This is provisional and some APIs may be updated in the future. + +### Dynamic Certificate Management + +Certificates are now stored in system tables and can be dynamically managed. Certificates can be added, replaced, and deleted without restarting HarperDB. This includes both standard certificates and certificate authorities, as well as private keys (private keys are not stored in table, they securely stored in a file). + +#### Status Report on Startup + +On startup, HarperDB will now print out an informative status of all running services and ports they are listening on. + +#### Support for Response object + +Resource methods can now return a `Response` object (or an object with `headers` and `status`) to allow for more control over the response. + +### Auto-incrementing Primary Keys + +Primary keys can now be auto-incrementing, allowing for automatic generation of numeric primary keys on insert/creation. Primary keys defined with `ID` or `String` will continue to use GUIDs for auto-assigned primary keys, which occurs on insert or creation if the primary key is not provided. However, for keys that are defined as `Any`, `Int`, or `Long`, the primary key will be assigned using auto-incrementation. This is significantly more efficient than GUIDs since the key only requires 8 bytes of storage instead of 31 bytes, and doesn't require random number generation. + +#### Developer/Production Mode for Configuration + +When using interactive installation (when configuration is not provided through arguments or env vars), HarperDB now provides an option for developer or production mode with a set of default configuration for each mode better suited for developer or production environments. + +**Export by Protocol** + +Exported resources can be configured to be specifically exported by protocol (REST, MQTT, etc.) for more granular control over what is exported where. diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.1.md b/docs/technical-details/release-notes/v4-tucker/4.4.1.md new file mode 100644 index 00000000..5c1e2037 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.1.md @@ -0,0 +1,13 @@ +--- +title: 4.4.1 +sidebar_position: 59598 +--- + +### HarperDB 4.4.1 + +10/17/2024 + +- Fix issue where non-RSA keys were not being parsed correctly on startup. +- Fix a memory leak when cluster_network closes a hub connection +- Improved MQTT error handling, with less verbose logging of more common errors, and treat a missing subscription as an invalid/missing topic +- Record analytics and server-timing header even when cache resolution fails diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.10.md b/docs/technical-details/release-notes/v4-tucker/4.4.10.md new file mode 100644 index 00000000..6d8aad2c --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.10.md @@ -0,0 +1,10 @@ +--- +title: 4.4.10 +sidebar_position: 59589 +--- + +### HarperDB 4.4.10 + +12/17/2024 + +- Fix for deploying packages and detecting node_modules directory diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.11.md b/docs/technical-details/release-notes/v4-tucker/4.4.11.md new file mode 100644 index 00000000..5e5b5fc0 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.11.md @@ -0,0 +1,11 @@ +--- +title: 4.4.11 +sidebar_position: 59588 +--- + +### HarperDB 4.4.11 + +12/18/2024 + +- Fix for initial certification creation on upgrade +- Docker build fix diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.12.md b/docs/technical-details/release-notes/v4-tucker/4.4.12.md new file mode 100644 index 00000000..8efe840e --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.12.md @@ -0,0 +1,11 @@ +--- +title: 4.4.12 +sidebar_position: 59587 +--- + +### HarperDB 4.4.12 + +12/19/2024 + +- Move components installed by reference into hdb/components for consistency and compatibility with next.js +- Use npm install --force to ensure modules are installed diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.13.md b/docs/technical-details/release-notes/v4-tucker/4.4.13.md new file mode 100644 index 00000000..cab28cc0 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.13.md @@ -0,0 +1,16 @@ +--- +title: 4.4.13 +sidebar_position: 59586 +--- + +### HarperDB 4.4.13 + +1/2/2025 + +- Fix for not using requestCert if the port doesn't need replication +- Fix for applying timeouts HTTP server for ancient node versions +- Updates for different replication configuration settings, including sharding and replication using stored credentials +- Mitigation crashing due GC'ed shared array buffers +- Fix for error handling with CLI failures +- Updated dependencies +- Fix for allow securePort to be set on authentication diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.14.md b/docs/technical-details/release-notes/v4-tucker/4.4.14.md new file mode 100644 index 00000000..b44a173d --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.14.md @@ -0,0 +1,13 @@ +--- +title: 4.4.14 +sidebar_position: 59585 +--- + +### HarperDB 4.4.14 + +1/3/2025 + +- Fix for starting HTTP server if headersTimeout is omitted in the configuration +- Fix for avoiding ping timeouts for large/long-duration WS messages between nodes +- Don't report errors for component that only uses a directory +- Add flag for disabling WebSocket on REST component diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.15.md b/docs/technical-details/release-notes/v4-tucker/4.4.15.md new file mode 100644 index 00000000..b6a8ee2b --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.15.md @@ -0,0 +1,12 @@ +--- +title: 4.4.15 +sidebar_position: 59584 +--- + +### HarperDB 4.4.15 + +1/8/2025 + +- Fix for manage the state of replication sequences for node +- Fix for better concurrency with ongoing replication +- Fix for accessing audit log entries diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.16.md b/docs/technical-details/release-notes/v4-tucker/4.4.16.md new file mode 100644 index 00000000..d85de974 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.16.md @@ -0,0 +1,16 @@ +--- +title: 4.4.16 +sidebar_position: 59583 +--- + +### HarperDB 4.4.16 + +1/22/2025 + +- Fix for cleaning up old audit entries and associated deletion entries +- Allow CLI operations to be run when cloning is enabled +- Report table size in describe operations +- Fix for cleaning up symlinks when dropping components +- Fix for enumerating components when symlinks are used +- Add an option for using a specific installation command with deploys +- Add an API for registering an HTTP upgrade listener with `server.upgrade` diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.17.md b/docs/technical-details/release-notes/v4-tucker/4.4.17.md new file mode 100644 index 00000000..239f7729 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.17.md @@ -0,0 +1,13 @@ +--- +title: 4.4.17 +sidebar_position: 59582 +--- + +### HarperDB 4.4.17 + +1/29/2025 + +- Provide statistics on the size of the audit log store +- Fix handling of symlinks to HarperDB package that to avoid NPM's errors in restricted containers +- Add option for rolling/consecutive restarts for deployments +- Fix for enabling root CAs for replication authorization diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.18.md b/docs/technical-details/release-notes/v4-tucker/4.4.18.md new file mode 100644 index 00000000..e7354587 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.18.md @@ -0,0 +1,12 @@ +--- +title: 4.4.18 +sidebar_position: 59581 +--- + +### HarperDB 4.4.18 + +1/29/2025 + +- Add option for disabling full table copy in replication +- Add option for startTime in route configuration +- Add/fix option to deploy with package from CLI diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.19.md b/docs/technical-details/release-notes/v4-tucker/4.4.19.md new file mode 100644 index 00000000..5a1cc14e --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.19.md @@ -0,0 +1,13 @@ +--- +title: 4.4.19 +sidebar_position: 59580 +--- + +### HarperDB 4.4.19 + +2/4/2025 + +- LMDB upgrade for free-list verification on commit +- Add check to avoid compacting database multiple times with compactOnStart +- Fix handling of denied/absent subscription +- Add support for including symlinked directories in packaging a deployed component diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.2.md b/docs/technical-details/release-notes/v4-tucker/4.4.2.md new file mode 100644 index 00000000..53dfbb7b --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.2.md @@ -0,0 +1,10 @@ +--- +title: 4.4.2 +sidebar_position: 59597 +--- + +### HarperDB 4.4.2 + +10/18/2024 + +- Republish of 4.4.1 with Git merge correction. diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.20.md b/docs/technical-details/release-notes/v4-tucker/4.4.20.md new file mode 100644 index 00000000..656de065 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.20.md @@ -0,0 +1,10 @@ +--- +title: 4.4.20 +sidebar_position: 59579 +--- + +### HarperDB 4.4.20 + +2/11/2025 + +- LMDB upgrade for improved handling of page boundaries with free-space lists diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.21.md b/docs/technical-details/release-notes/v4-tucker/4.4.21.md new file mode 100644 index 00000000..c63d84a2 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.21.md @@ -0,0 +1,12 @@ +--- +title: 4.4.21 +sidebar_position: 59578 +--- + +### HarperDB 4.4.21 + +2/25/2025 + +- Fix for saving audit log entries for large keys (> 1KB) +- Security fix for handling missing passwords +- Skip bin links for NPM installation to avoid access issues diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.22.md b/docs/technical-details/release-notes/v4-tucker/4.4.22.md new file mode 100644 index 00000000..d66163f9 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.22.md @@ -0,0 +1,10 @@ +--- +title: 4.4.22 +sidebar_position: 59577 +--- + +### HarperDB 4.4.22 + +3/5/2025 + +- Add new http configuration option `corsAccessControlAllowHeaders` diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.23.md b/docs/technical-details/release-notes/v4-tucker/4.4.23.md new file mode 100644 index 00000000..9048b3d6 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.23.md @@ -0,0 +1,11 @@ +--- +title: 4.4.23 +sidebar_position: 59576 +--- + +### HarperDB 4.4.23 + +3/7/2025 + +- Fix for subscriptions to children of segmented id +- Fix for better error reporting on NPM failures diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.24.md b/docs/technical-details/release-notes/v4-tucker/4.4.24.md new file mode 100644 index 00000000..324a2423 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.24.md @@ -0,0 +1,11 @@ +--- +title: 4.4.24 +sidebar_position: 59575 +--- + +### HarperDB 4.4.24 + +3/10/2025 + +- Use process.exit(0) to restart when enabled by env var +- Reset the cwd on thread restart diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.3.md b/docs/technical-details/release-notes/v4-tucker/4.4.3.md new file mode 100644 index 00000000..4e844820 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.3.md @@ -0,0 +1,14 @@ +--- +title: 4.4.3 +sidebar_position: 59596 +--- + +### HarperDB 4.4.3 + +10/25/2024 + +- Fix for notification of records through classes that override get for multi-tier caching +- Fix for CLI operations +- Support for longer route parameters in Fastify routes +- Fix for accessing `harperdb` package/module from user threads +- Improvements to clone node for cloning without credentials diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.4.md b/docs/technical-details/release-notes/v4-tucker/4.4.4.md new file mode 100644 index 00000000..bbf0df8d --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.4.md @@ -0,0 +1,12 @@ +--- +title: 4.4.4 +sidebar_position: 59595 +--- + +### HarperDB 4.4.4 + +11/4/2024 + +- Re-introduce declarative roles and permissions +- Fix for OpenAPI endpoint +- Fix for exports of `harperdb` package/module diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.5.md b/docs/technical-details/release-notes/v4-tucker/4.4.5.md new file mode 100644 index 00000000..448687c6 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.5.md @@ -0,0 +1,16 @@ +--- +title: 4.4.5 +sidebar_position: 59594 +--- + +### HarperDB 4.4.5 + +11/15/2024 + +- Fix for DOS vulnerability in large headers with cache-control and replication headers +- Fix for handling a change in the schema type for sub-fields in a nested object +- Add support for content type handlers to return iterators +- Fix for session management with custom authentication handler +- Updates for Node.js V23 compatibility +- Fix for sorting on nested properties +- Fix for querying on not_equal to a null with object values diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.6.md b/docs/technical-details/release-notes/v4-tucker/4.4.6.md new file mode 100644 index 00000000..4cc0cc86 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.6.md @@ -0,0 +1,13 @@ +--- +title: 4.4.6 +sidebar_position: 59593 +--- + +### HarperDB 4.4.6 + +11/25/2024 + +- Fix queries with only sorting applied +- Fix for handling invalidation events propagating through sources +- Expanded CLI support for deploying packages +- Support for deploying large packages diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.7.md b/docs/technical-details/release-notes/v4-tucker/4.4.7.md new file mode 100644 index 00000000..a4f6041f --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.7.md @@ -0,0 +1,11 @@ +--- +title: 4.4.7 +sidebar_position: 59592 +--- + +### HarperDB 4.4.7 + +11/27/2024 + +- Allow for package to deploy own modules +- Fix for preventing double sourcing of resources diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.8.md b/docs/technical-details/release-notes/v4-tucker/4.4.8.md new file mode 100644 index 00000000..493736a8 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.8.md @@ -0,0 +1,10 @@ +--- +title: 4.4.8 +sidebar_position: 59591 +--- + +### HarperDB 4.4.8 + +12/2/2024 + +- Add multiple node versions of published docker containers diff --git a/docs/technical-details/release-notes/v4-tucker/4.4.9.md b/docs/technical-details/release-notes/v4-tucker/4.4.9.md new file mode 100644 index 00000000..077e80cd --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.4.9.md @@ -0,0 +1,14 @@ +--- +title: 4.4.9 +sidebar_position: 59590 +--- + +### HarperDB 4.4.9 + +12/12/2024 + +- Change enableRootCAs to default to true +- Fixes for install and clone commands +- Add rejectUnauthorized to the CLI options +- Fixes for cloning +- Install modules in own component when deploying package by payload diff --git a/docs/technical-details/release-notes/v4-tucker/4.5.0.md b/docs/technical-details/release-notes/v4-tucker/4.5.0.md new file mode 100644 index 00000000..2f8203fa --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.5.0.md @@ -0,0 +1,99 @@ +--- +title: 4.5.0 +sidebar_position: 59499 +--- + +# 4.5.0 + +#### HarperDB 4.5.0 + +3/13/2025 + +### Blob Storage + +4.5 introduces a new [Blob storage system](../../reference/blob), that is designed to efficiently handle large binary objects, with built-in support for streaming large content/media in and out of storage. This provides significantly better performance and functionality for large unstructured data, such as HTML, images, video, and other large files. Components can leverage this functionality through the JavaScript `Blob` interface, and the new `createBlob` function. Blobs are fully replicated and integrated. Harper can also coerce strings to `Blob`s (when dictated by the field type), making it feasible to use blobs for large string data, including with MQTT messaging. + +### Password Hashing Upgrade + +4.5 adds two new password hashing algorithms for better security (to replace md5): +`sha256`: This is a solid general purpose of password hashing, with good security properties and excellent performance. This is the default algorithm in 4.5. +`argon2id`: This provides the highest level of security, and is the recommended algorithm that do not require frequent password verifications. However, it is more CPU intensive, and may not be suitable for environments with a high frequency of password verifications. + +### Resource and Storage Analytics + +4.5 includes numerous new analytics for resources and storage, including page faults, context switches, free space, disk usage, and other metrics. + +#### Default Replication Port + +The default port for replication has been changed from 9925 to 9933. + +### Property Forwarding + +Accessing record properties from resource instances should be accessible through standard property access syntax, regardless of whether the property was declared in a schema. Previously only properties declared in a schema were accessible through standard property access syntax. This change allows for more consistent and intuitive access to record properties, regardless of how they were defined. It is still recommended to declare properties in a schema for better performance and documentation. + +### Storage Reclamation + +Harper now includes functionality for automatically trying to clean up and evict non-essential data when storage is running low. When free space drops below 40% (configurable), Harper will start to: + +- Evict older entries from caching tables +- Evict older audit log entries +- Remove older rotated logs files + These efforts will become progressively more aggressive as free space decreases. + +### Expanded Sharding Functionality + +When sharding is being used, Harper can now honor write requests with residency information that will not be written to the local node's table. Harper also now allows nodes to be declaratively configured as part of a shard. + +### Certificate Revocation + +Certificates can now be revoked by configuring nodes with a list of revoked certificate serial numbers. + +### Built-in `loadEnv` Component + +There is a new `loadEnv` component loader that can be used to load environmental variables from a .env in a component. + +### Cluster Status Information + +The [`cluster_status` operation](../../../developers/operations-api/clustering) now includes new statistics for replication, including the timestamps of last received transactions, sent transactions, and committed transactions. + +### Improved URL path parsing + +Resources can be defined with nested paths and directly accessed by the exact path without requiring a trailing slash. The `id.property` syntax for accessing properties in URLs will only be applied to properties that are declared in a schema. This allows for URLs to generally include dots in paths without being interpreted as property access. A new [`directURLMapping` option/flag](../../../deployments/configuration) on resources that allows for more direct URL path handling as well. + +### `server.authenticateUser` API + +In addition to the `server.getUser` API that allows for retrieval of users by username, the `server.authenticateUser` API is now available which will _always_ verify the user by the provided password. + +#### Improved Message Delivery + +Performance of delivery of messages has been improved. + +### HTTP/2 + +HarperDB now supports HTTP/2 for all API endpoints. This can be enabled with the `http2` option in the configuration file. + +### `harperdb` symlink + +Using `import from 'harperdb'` will more consistently work when directly running a component locally. + +### Transaction Reuse + +By default, transactions can now be reused after calling `transaction.commit()`. + +### GraphQL configuration + +The GraphQL query endpoint can be configured to listen on different ports. GraphQL query endpoing is now also disabled by default, to avoid any conflicts. + +### Glob support for components + +Glob file handling for specifying files used by components has been improved for better consistency. + +### Table.getRecordCount + +`Table.getRecordCount()` is now available to get the number of records in a table. + +### Removal of record counts from REST API + +Previously the root path for a resource in the REST API would return a record count. However, this is a significant performance hazard and was never documented to exist, so this has been removed to ensure better performance and reliability. + +Note that downgrading from 4.5 to 4.4 is _not_ supported. diff --git a/docs/technical-details/release-notes/v4-tucker/4.5.1.md b/docs/technical-details/release-notes/v4-tucker/4.5.1.md new file mode 100644 index 00000000..ec431a8a --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.5.1.md @@ -0,0 +1,16 @@ +--- +title: 4.5.1 +sidebar_position: 59498 +--- + +### HarperDB 4.5.1 + +3/18/2025 + +- Fix/implementation for sharding data that is written for cache resolution +- Add support for replication.shard in configuration for defining local node's shard id +- Fix for source map handling in stack traces +- Improved error reporting for syntax errors in component code +- Improved logging on deployment and NPM installation +- Added shard information to cluster_status +- Fix for audit entry eviction when a table is deleted diff --git a/docs/technical-details/release-notes/v4-tucker/4.5.10.md b/docs/technical-details/release-notes/v4-tucker/4.5.10.md new file mode 100644 index 00000000..b74fbadb --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.5.10.md @@ -0,0 +1,11 @@ +--- +title: 4.5.10 +sidebar_position: 59489 +--- + +### HarperDB 4.5.10 + +5/20/2025 + +- Expose the `resources` map for being able to set and access custom resources +- Fix for cleaning up blob files that are used when a database is deleted diff --git a/docs/technical-details/release-notes/v4-tucker/4.5.11.md b/docs/technical-details/release-notes/v4-tucker/4.5.11.md new file mode 100644 index 00000000..cba2d019 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.5.11.md @@ -0,0 +1,10 @@ +--- +title: 4.5.11 +sidebar_position: 59488 +--- + +### HarperDB 4.5.11 +6/27/2025 + +* Fix bug (workaround Node.js bug) with assigning the ciphers to a server and applying to TLS connections +* Fix for handling TLS array when checking certificates configuration \ No newline at end of file diff --git a/docs/technical-details/release-notes/v4-tucker/4.5.12.md b/docs/technical-details/release-notes/v4-tucker/4.5.12.md new file mode 100644 index 00000000..6353bfc2 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.5.12.md @@ -0,0 +1,13 @@ +--- +title: 4.5.12 +sidebar_position: 59487 +--- + +### HarperDB 4.5.12 +7/9/2025 + +- Fix for dynamically setting `harperdb` package symlink on deploy +- Assign shard numbers from each node's config rather than from routes +- Handle certificates without a common name, falling back to the SANs +- Properly clean up blobs that are only transiently used for replication +- Ensure that we always set up server.shards even when there are no TLS connections diff --git a/docs/technical-details/release-notes/v4-tucker/4.5.13.md b/docs/technical-details/release-notes/v4-tucker/4.5.13.md new file mode 100644 index 00000000..2b8a6149 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.5.13.md @@ -0,0 +1,9 @@ +--- +title: 4.5.13 +sidebar_position: 59486 +--- + +### HarperDB 4.5.13 +7/12/2025 + +- Fix cleaning out audit entries when a blob has been removed diff --git a/docs/technical-details/release-notes/v4-tucker/4.5.14.md b/docs/technical-details/release-notes/v4-tucker/4.5.14.md new file mode 100644 index 00000000..0ad8f235 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.5.14.md @@ -0,0 +1,9 @@ +--- +title: 4.5.14 +sidebar_position: 59485 +--- + +### HarperDB 4.5.14 +7/15/2025 + +- Use proper back-pressure when copying a table for initial database sync diff --git a/docs/technical-details/release-notes/4.tucker/4.5.15.md b/docs/technical-details/release-notes/v4-tucker/4.5.15.md similarity index 100% rename from docs/technical-details/release-notes/4.tucker/4.5.15.md rename to docs/technical-details/release-notes/v4-tucker/4.5.15.md diff --git a/docs/technical-details/release-notes/4.tucker/4.5.16.md b/docs/technical-details/release-notes/v4-tucker/4.5.16.md similarity index 100% rename from docs/technical-details/release-notes/4.tucker/4.5.16.md rename to docs/technical-details/release-notes/v4-tucker/4.5.16.md diff --git a/docs/technical-details/release-notes/v4-tucker/4.5.2.md b/docs/technical-details/release-notes/v4-tucker/4.5.2.md new file mode 100644 index 00000000..62468720 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.5.2.md @@ -0,0 +1,13 @@ +--- +title: 4.5.2 +sidebar_position: 59497 +--- + +### HarperDB 4.5.2 + +3/25/2025 + +- For defined schemas, don't allow updates from remote nodes that could cause conflicts and repeated schema change requests +- New harper-chrome docker container for accessing Chrome binaries for use with tools like Puppeteer +- Improved rolling restart handling of errors with reaching individual nodes +- Defined cleaner operation object to avoid accident leaking of credentials with logging diff --git a/docs/technical-details/release-notes/v4-tucker/4.5.3.md b/docs/technical-details/release-notes/v4-tucker/4.5.3.md new file mode 100644 index 00000000..b0878089 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.5.3.md @@ -0,0 +1,11 @@ +--- +title: 4.5.3 +sidebar_position: 59496 +--- + +### HarperDB 4.5.3 + +4/3/2025 + +- Fix for immediately reloading updated certificates and private key files to ensure that certificates properly match the private key +- Fix for analytics of storage size when tables are deleted diff --git a/docs/technical-details/release-notes/v4-tucker/4.5.4.md b/docs/technical-details/release-notes/v4-tucker/4.5.4.md new file mode 100644 index 00000000..2d334a06 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.5.4.md @@ -0,0 +1,12 @@ +--- +title: 4.5.4 +sidebar_position: 59495 +--- + +### HarperDB 4.5.4 + +4/11/2025 + +- Fix for replication of (non-retained) published messages +- Make cookie domain be configurable to allow for cookies shared across sub-hostnames +- Fix for on-demand loading of shared blobs diff --git a/docs/technical-details/release-notes/v4-tucker/4.5.5.md b/docs/technical-details/release-notes/v4-tucker/4.5.5.md new file mode 100644 index 00000000..606f8063 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.5.5.md @@ -0,0 +1,11 @@ +--- +title: 4.5.5 +sidebar_position: 59494 +--- + +### HarperDB 4.5.5 + +4/15/2025 + +- Updates for better messaging with symlinks in Windows +- Fix for saving replicated blobs diff --git a/docs/technical-details/release-notes/v4-tucker/4.5.6.md b/docs/technical-details/release-notes/v4-tucker/4.5.6.md new file mode 100644 index 00000000..a711a988 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.5.6.md @@ -0,0 +1,12 @@ +--- +title: 4.5.6 +sidebar_position: 59493 +--- + +### HarperDB 4.5.6 + +4/17/2025 + +- Fix for changing the type of the primary key attribute +- Added a new `includeExpensiveRecordCountEstimates` property to the REST component for returning record count estimates +- Fix for dropping attributes diff --git a/docs/technical-details/release-notes/v4-tucker/4.5.7.md b/docs/technical-details/release-notes/v4-tucker/4.5.7.md new file mode 100644 index 00000000..ce785506 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.5.7.md @@ -0,0 +1,11 @@ +--- +title: 4.5.7 +sidebar_position: 59492 +--- + +### HarperDB 4.5.7 + +4/23/2025 + +- Fix for handling buffers from replicated sharded blob records to prevent overwriting while using +- Updated included studio version for fix for logging in diff --git a/docs/technical-details/release-notes/v4-tucker/4.5.8.md b/docs/technical-details/release-notes/v4-tucker/4.5.8.md new file mode 100644 index 00000000..32f43190 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.5.8.md @@ -0,0 +1,13 @@ +--- +title: 4.5.8 +sidebar_position: 59491 +--- + +### HarperDB 4.5.8 + +4/30/2025 + +- Fix MQTT subscription topics with trailing slashes to ensure they are not treated as a wildcard +- Fix the arguments that are used for the default connect/subscribe calls so they pass the second argument from connect like `connect(incomingMessages, query) -> subscribe(query)` +- Add support for replication connections using any configured certificate authorities to verify the server certificates +- Added more descriptive error messages on errors in user residency functions diff --git a/docs/technical-details/release-notes/v4-tucker/4.5.9.md b/docs/technical-details/release-notes/v4-tucker/4.5.9.md new file mode 100644 index 00000000..9d6d13ef --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.5.9.md @@ -0,0 +1,10 @@ +--- +title: 4.5.9 +sidebar_position: 59490 +--- + +### HarperDB 4.5.9 + +5/14/2025 + +- Remove --no-bin-links directive for NPM that was causing installs of dependencies to fail diff --git a/docs/technical-details/release-notes/v4-tucker/4.6.0.md b/docs/technical-details/release-notes/v4-tucker/4.6.0.md new file mode 100644 index 00000000..3100a9cb --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.6.0.md @@ -0,0 +1,36 @@ +--- +title: 4.6.0 +sidebar_position: 59399 +--- + +# 4.6.0 + +#### HarperDB 4.6.0 + +6/13/2025 + +### Vector Indexing: Hierarchical Navigable Small World + +Harper 4.6 now includes support for vector indexing, which allows for efficient and fast queries on large semantic data sets. Vector indexing is powered by the [Hierarchical Navigable Small World (HNSW) algorithm](https:/arxiv.org/abs/1603.09320) and can be used to index any vector-valued property, and is particularly useful for vector text-embedding data. This provides powerful efficient vector-based searching for semantic and AI-based querying functionality. HNSW is a preferred algorithm for vector indexing and searching because it provides an excellent balance of recall and performance. + +### New Extension API with support for dynamic reloading + +4.6 introduces a new extension API with significant ergonomic improvements for creating new extension components that are more robust and dynamic. The new API also provides a mechanism for dynamic reloading of some files and configuration without restarts. + +### Logging Improvements + +4.6 includes significant expansions to logging configurability, allowing for specific logging configurations of individual components. This also leverages the new extension API to allow for dynamic reloading of logging configuration. With the more granular logging, logs can be directed to different files and/or different log levels. +The logger includes support for HTTP logging, which configurability for logging standard HTTP methods and paths as well headers, ids, and timing information. It also supports distinct logging configuration for different components. +The new logger is now based on the Node.js Console API, with improved the formatting of log messages for various types of objects. +An important change is that logging to standard out/error will _not_ include the timestamp. And console logging does not get logged to the log files by default. + + +### Data Loader +4.6 includes a new [data loader](../../../developers/applications/data-loader) that can be used to load data into HarperDB as part of a component. The data loader can be used to load data from JSON file and can be deployed and distributed with a component to provide a reliable mechanism for ensuring specific records are loaded into Harper. + +### Resource API Upgrades + +4.6 includes an upgraded form of the Resource API that can be selected with significant improvements in ease of use. + +### only-if-cached behavior +Previously when the `only-in-cached` caching directive was used and the entry was not cached, Harper would return a 504, but still make a request to origin in the background. Now, Harper will no longer a request to origin for `only-if-cached`. \ No newline at end of file diff --git a/docs/technical-details/release-notes/v4-tucker/4.6.1.md b/docs/technical-details/release-notes/v4-tucker/4.6.1.md new file mode 100644 index 00000000..cf8ccd2c --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.6.1.md @@ -0,0 +1,18 @@ +--- +title: 4.6.1 +sidebar_position: 59398 +--- + +# 4.6.1 +7/10/2025 + +- Plugin API updates to use plugin nomenclature +- Fix for dynamically setting `harperdb` package symlink on deploy +- Assign shard numbers from each node's config rather than from routes +- Handle certificates without a common name, falling back to the SANs +- Properly clean up blobs that are only transiently used for replication +- Ensure that we always set up server.shards even when there are no TLS connections +- Fix for clone node getting the cluster status +- Properly initialize config on CLI operations to avoid path error +- Fix for lmdb for compiling for MacOS and using little-endian +- Allow secure cookies with localhost diff --git a/docs/technical-details/release-notes/v4-tucker/4.6.2.md b/docs/technical-details/release-notes/v4-tucker/4.6.2.md new file mode 100644 index 00000000..579f26df --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/4.6.2.md @@ -0,0 +1,11 @@ +--- +title: 4.6.2 +sidebar_position: 59397 +--- + +# 4.6.2 +7/15/2025 + +- Use proper back-pressure when copying a table for initial database sync +- Fix cleaning out audit entries when a blob has been removed +- Fix for running CLI operations when a Harper DB is not installed \ No newline at end of file diff --git a/docs/technical-details/release-notes/4.tucker/4.6.3.md b/docs/technical-details/release-notes/v4-tucker/4.6.3.md similarity index 100% rename from docs/technical-details/release-notes/4.tucker/4.6.3.md rename to docs/technical-details/release-notes/v4-tucker/4.6.3.md diff --git a/docs/technical-details/release-notes/v4-tucker/index.md b/docs/technical-details/release-notes/v4-tucker/index.md new file mode 100644 index 00000000..299b3dad --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/index.md @@ -0,0 +1,53 @@ +--- +title: Harper Tucker (Version 4) +--- + +# Harper Tucker (Version 4) + +HarperDB version 4 ([Tucker release](tucker)) represents major step forward in database technology. This release line has ground-breaking architectural advancements including: + +## [4.6](4.6.0) + +- Vector Indexing - 4.6 introduces a new Vector Indexing system based on Hierarchical Navigable Small World Graphs. +- New extension API - 4.6 introduces a new extension API for creating extensions components. +- Improved logging configurability - Logging can be dynamically updated and specifically configured for each component. +- Resource API - 4.6 has updated Resource APIs for ease of use. +- Data loader - 4.6 introduces a new data loader that allows for ensuring records exist as part of a component. + +## [4.5](4.5.0) + +- Blob Storage - 4.5 introduces a new [Blob storage system](../../reference/blob). +- Password Hashing Upgrade - two new password hashing algorithms for better security (to replace md5). +- New resource and storage Analytics + +## [4.4](4.4.0) + +- Native replication (codename "Plexus") which is faster, more efficient, secure, and reliable than the previous replication system and provides provisional sharding capabilities with a foundation for the future +- Computed properties that allow applications to define properties that are computed from other properties, allowing for composite properties that are calculated from other data stored in records without requiring actual storage of the computed value +- Custom indexing including composite, full-text indexing, and vector indexing + +## [4.3](4.3.0) + +- Relationships, joins, and broad new querying capabilities for complex and nested conditions, sorting, joining, and selecting with significant query optimizations +- More advanced transaction support for CRDTs and storage of large integers (with BigInt) +- Better management with new upgraded local studio and new CLI features + +## [4.2](4.2.0) + +- New component architecture and Resource API for advanced, robust custom database application development +- Real-time capabilites through MQTT, WebSockets, and Server-Sent Events +- REST interface for intuitive, fast, and standards-compliant HTTP interaction +- Native caching capabilities for high-performance cache scenarios +- Clone node functionality + +## [4.1](4.1.0) + +- New streaming iterators mechanism that allows query results to be delivered to clients _while_ querying results are being processed, for incredibly fast time-to-first-byte and concurrent processing/delivery +- New thread-based concurrency model for more efficient resource usage + +## [4.0](4.0.0) + +- New clustering technology that delivers robust, resilient and high-performance replication +- Major storage improvements with highly-efficient adaptive-structure modified MessagePack format, with on-demand deserialization capabilities + +Did you know our release names are dedicated to employee pups? For our fourth release, [meet Tucker!](tucker) diff --git a/docs/technical-details/release-notes/v4-tucker/tucker.md b/docs/technical-details/release-notes/v4-tucker/tucker.md new file mode 100644 index 00000000..703d4e63 --- /dev/null +++ b/docs/technical-details/release-notes/v4-tucker/tucker.md @@ -0,0 +1,11 @@ +--- +title: Harper Tucker (Version 4) +--- + +# Harper Tucker (Version 4) + +Did you know our release names are dedicated to employee pups? For our fourth release, we have Tucker. + +![picture of grey and white dog](/dogs/tucker.png) + +_G’day, I’m Tucker. My dad is David Cockerill, a software engineer here at Harper. I am a 3-year-old Labrador Husky mix. I love to protect my dad from all the squirrels and rabbits we have in our yard. I have very ticklish feet and love belly rubs!_ diff --git a/package.json b/package.json index 708cad5e..14a5efff 100644 --- a/package.json +++ b/package.json @@ -2,7 +2,12 @@ "name": "@harperdb/documentation", "private": true, "scripts": { - "format": "prettier --write 'docs/**/*' package.json" + "format": "prettier --write 'docs/**/*' package.json", + "site:install": "cd site && npm install", + "site:dev": "cd site && npm run start", + "site:build": "cd site && npm run build", + "site:serve": "cd site && npm run serve", + "site:clear": "cd site && npm run clear" }, "devDependencies": { "@harperdb/code-guidelines": "^0.0.2", diff --git a/site/.gitignore b/site/.gitignore new file mode 100644 index 00000000..82c62bae --- /dev/null +++ b/site/.gitignore @@ -0,0 +1,23 @@ +# Dependencies +/node_modules + +# Production +/build + +# Latest (/docs/) is a build time copy of the latest version +/docs + +# Generated files +.docusaurus +.cache-loader + +# Misc +.DS_Store +.env.local +.env.development.local +.env.test.local +.env.production.local + +npm-debug.log* +yarn-debug.log* +yarn-error.log* diff --git a/site/README.md b/site/README.md new file mode 100644 index 00000000..fc06acbb --- /dev/null +++ b/site/README.md @@ -0,0 +1,109 @@ +# Harper Documentation Site + +This directory contains the Docusaurus configuration and build files for the Harper documentation site. + +## 🚀 Quick Start + +```bash +# Install dependencies +npm install + +# Start development server +npm start +# Opens at http://localhost:3000 + +# Build for production +npm run build + +# Serve production build locally +npm run serve +``` + +## 📁 Directory Structure + +``` +site/ +├── build/ # Production build output +├── src/ # React components and custom pages +│ ├── css/ # Custom styles +│ └── pages/ # Custom pages +├── static/ # Static assets +│ ├── img/ # Images and logos +│ └── js/ # JavaScript files +├── versioned_docs/ # Documentation for previous versions +├── versioned_sidebars/ # Sidebar configurations for versions +├── docusaurus.config.ts # Main Docusaurus configuration +├── sidebars.ts # Sidebar navigation structure +├── redirects.ts # URL redirects configuration +└── versions.json # Version configuration +``` + +## 🛠️ Development + +### Running Locally + +```bash +# Start the development server with hot reload +npm start + +# Clear cache if you encounter issues +npm run clear +``` + +The development server runs at `http://localhost:3000` and automatically reloads when you make changes. + +### Building + +```bash +# Create production build +npm run build + +# Test production build locally +npm run serve +``` + +The production build is optimized and outputs to the `build/` directory. + +## 📋 Cutting a New Version + +When releasing a new version of Harper documentation: + +```bash +# Cut a new version (e.g., 4.7) +npm run version + +# This will: +# 1. Copy current docs to versioned_docs/version-4.7 +# 2. Copy current sidebars to versioned_sidebars +# 3. Update versions.json +``` + +After cutting a version: +1. The current `/docs` becomes the new "next" version +2. The previous latest version is archived +3. Update `docusaurus.config.ts` to set the new `lastVersion` + +## 🔧 Configuration + +- **`docusaurus.config.ts`** - Main site configuration (metadata, plugins, themes) +- **`sidebars.ts`** - Documentation navigation structure +- **`redirects.ts`** - URL redirect rules +- **`versions.json`** - Available documentation versions + +## 🔍 Search + +The site includes local search that indexes all documentation content at build time, providing fast client-side search without external dependencies. + +## 📝 Other Commands + +```bash +# Type checking +npm run typecheck + +# Clean all generated files and caches +npm run clear +``` + +## 🚢 Deployment + +The site builds to static HTML/CSS/JS files that can be deployed to any static hosting service. The production build is in the `build/` directory after running `npm run build`. \ No newline at end of file diff --git a/site/docusaurus.config.ts b/site/docusaurus.config.ts new file mode 100644 index 00000000..8a247010 --- /dev/null +++ b/site/docusaurus.config.ts @@ -0,0 +1,235 @@ +import { themes as prismThemes } from 'prism-react-renderer'; +import type { Config } from '@docusaurus/types'; +import type * as Preset from '@docusaurus/preset-classic'; +import { generateRedirects, createRedirects as createRedirectsBase } from './redirects'; + +// This runs in Node.js - Don't use client-side code here (browser APIs, JSX...) + +const scripts = []; + +// `npm run site:build` and `docusaurus build` sets this to 'production' +// `npm run site:dev` and `docusaurus start` sets it to 'development' +if (process.env.NODE_ENV === 'production') { + scripts.push({ src: 'js/reo.js' }); +} + +// Determine base URL from environment variable or use defaults +// For GitHub Pages deployment: DOCUSAURUS_BASE_URL=/documentation/ +// For local development: DOCUSAURUS_BASE_URL=/ (or unset) +// Can also be set via command line: npm run build -- --base-url /documentation/ +const baseUrl = process.env.DOCUSAURUS_BASE_URL || '/'; + +// Determine route base path for docs +// Can be set to '/docs/' if we need docs under a subdirectory +// Default is '/' to serve docs at the root +const routeBasePath = process.env.DOCUSAURUS_ROUTE_BASE_PATH || '/'; + +// URL can also be overridden if needed +const url = process.env.DOCUSAURUS_URL || 'https://docs.harperdb.io'; + +// Always log configuration at build time +console.log('Docusaurus URL config:', { url, baseUrl, routeBasePath }); + +const config: Config = { + title: 'Harper Docs', + tagline: + 'Harper fuses database, cache, messaging, and application functions into a single process — delivering performance and simplicity for data-intensive, latency-sensitive applications.', + favicon: 'img/HarperDogLogo.svg', + + // Future flags, see https://docusaurus.io/docs/api/docusaurus-config#future + future: { + v4: true, // Improve compatibility with the upcoming Docusaurus v4 + }, + + // Set the production url of your site here + url, + // Set the // pathname under which your site is served + baseUrl, + + // Serve images from the repository root or from env var path + staticDirectories: process.env.IMAGES_PATH ? ['static', process.env.IMAGES_PATH] : ['static', '../images'], + + // GitHub pages deployment config. + // If you aren't using GitHub pages, you don't need these. + organizationName: 'HarperDB', // Usually your GitHub org/user name. + projectName: 'documentation', // Usually your repo name. + + onBrokenLinks: 'warn', + onBrokenMarkdownLinks: 'warn', + + presets: [ + [ + '@docusaurus/preset-classic', + { + docs: { + path: '../docs', + sidebarPath: './sidebars.ts', + // Docs are served at the configured route base path + routeBasePath, + editUrl: ({ docPath }) => { + // Find where docs/ starts in the path and use everything from there + const docsIndex = docPath.indexOf('docs/'); + if (docsIndex !== -1) { + const cleanPath = docPath.substring(docsIndex); + // TODO: When implementing versioned docs, this will need to handle version branches + return `https://github.com/HarperDB/documentation/blob/main/${cleanPath}`; + } + // Fallback if docs/ is not found + return `https://github.com/HarperDB/documentation/blob/main/docs/${docPath}`; + }, + lastVersion: '4.6', + includeCurrentVersion: false, + versions: { + '4.6': { + banner: 'none', // No banner for this version + }, + '4.5': { + // No banner for 4.5 as its still actively maintained. Docusaurus doesn't allow us to set custom + // text for the banner. Only option is to eject swizzle DocVersionBanner (`npm run swizzle @docusaurus/theme-classic DocVersionBanner -- --eject`) + // and modify the internal rendering logic based on the version number. Cannot even add a new `banner` option without even more hackery. + // Here is a relevant discussion thread: https://github.com/facebook/docusaurus/discussions/7112 if we really want this, we should look to contribute this feature. + banner: 'none', + }, + }, + remarkPlugins: [[require('@docusaurus/remark-plugin-npm2yarn'), { sync: true }]], + }, + blog: false, + theme: { + customCss: './src/css/custom.css', + }, + } satisfies Preset.Options, + ], + ], + + plugins: [ + [ + '@docusaurus/plugin-client-redirects', + { + redirects: generateRedirects(routeBasePath), + createRedirects: (existingPath: string) => createRedirectsBase(existingPath, routeBasePath), + }, + ], + ], + + themes: [ + [ + require.resolve('@easyops-cn/docusaurus-search-local'), + { + hashed: true, + language: ['en'], + indexDocs: true, + indexBlog: false, + indexPages: false, + docsRouteBasePath: routeBasePath, + highlightSearchTermsOnTargetPage: true, + searchResultLimits: 8, + // Explicitly set the search bar position + searchBarPosition: 'right', + }, + ], + '@docusaurus/theme-mermaid', + ], + + markdown: { + mermaid: true, + }, + + themeConfig: { + // Replace with your project's social card + image: 'img/HarperOpenGraph.jpg', + navbar: { + logo: { + alt: 'Harper Logo', + src: 'img/HarperPrimaryBlk.svg', + srcDark: 'img/HarperPrimaryWht.svg', + href: 'https://www.harpersystems.dev', + }, + items: [ + { + type: 'docSidebar', + sidebarId: 'docsSidebar', + position: 'left', + label: 'Documentation', + }, + { + type: 'docsVersionDropdown', + position: 'right', + dropdownActiveClassDisabled: true, + }, + { + href: 'https://github.com/HarperDB/harperdb', + label: 'GitHub', + position: 'right', + }, + ], + }, + footer: { + style: 'dark', + links: [ + { + title: 'Documentation', + items: [ + { + label: 'Getting Started', + to: `${routeBasePath}/getting-started`, + }, + // { + // label: 'Developers', + // to: `${routeBasePath}/developers`, + // }, + { + label: 'Administration', + to: `${routeBasePath}/administration`, + }, + ], + }, + { + title: 'Community', + items: [ + { + label: 'Slack', + href: 'https://harperdbcommunity.slack.com', + }, + { + label: 'LinkedIn', + href: 'https://www.linkedin.com/company/harpersystems/', + }, + { + label: 'X (Twitter)', + href: 'https://twitter.com/harperdbio', + }, + ], + }, + { + title: 'More', + items: [ + { + label: 'Harper Systems', + href: 'https://www.harpersystems.dev', + }, + { + label: 'Blog', + href: 'https://www.harpersystems.dev/blog', + }, + { + label: 'GitHub', + href: 'https://github.com/HarperDB/harperdb', + }, + { + label: 'Contact', + href: 'https://www.harpersystems.dev/contact', + }, + ], + }, + ], + copyright: `Copyright © ${new Date().getFullYear()} HarperDB, Inc.`, + }, + prism: { + theme: prismThemes.github, + darkTheme: prismThemes.dracula, + }, + } satisfies Preset.ThemeConfig, + scripts, +}; + +export default config; diff --git a/site/package-lock.json b/site/package-lock.json new file mode 100644 index 00000000..5ebcdfd6 --- /dev/null +++ b/site/package-lock.json @@ -0,0 +1,19104 @@ +{ + "name": "site", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "site", + "version": "0.0.0", + "dependencies": { + "@docusaurus/core": "^3.8.1", + "@docusaurus/plugin-client-redirects": "^3.8.1", + "@docusaurus/preset-classic": "^3.8.1", + "@docusaurus/remark-plugin-npm2yarn": "^3.8.1", + "@docusaurus/theme-mermaid": "^3.8.1", + "@easyops-cn/docusaurus-search-local": "^0.52.1", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "prism-react-renderer": "^2.3.0", + "react": "^19.1.1", + "react-dom": "^19.1.1" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "^3.8.1", + "@docusaurus/tsconfig": "^3.8.1", + "@docusaurus/types": "^3.8.1", + "typescript": "^5.9.2" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@algolia/autocomplete-core": { + "version": "1.17.9", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.17.9.tgz", + "integrity": "sha512-O7BxrpLDPJWWHv/DLA9DRFWs+iY1uOJZkqUwjS5HSZAGcl0hIVCQ97LTLewiZmZ402JYUrun+8NqFP+hCknlbQ==", + "license": "MIT", + "dependencies": { + "@algolia/autocomplete-plugin-algolia-insights": "1.17.9", + "@algolia/autocomplete-shared": "1.17.9" + } + }, + "node_modules/@algolia/autocomplete-plugin-algolia-insights": { + "version": "1.17.9", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.17.9.tgz", + "integrity": "sha512-u1fEHkCbWF92DBeB/KHeMacsjsoI0wFhjZtlCq2ddZbAehshbZST6Hs0Avkc0s+4UyBGbMDnSuXHLuvRWK5iDQ==", + "license": "MIT", + "dependencies": { + "@algolia/autocomplete-shared": "1.17.9" + }, + "peerDependencies": { + "search-insights": ">= 1 < 3" + } + }, + "node_modules/@algolia/autocomplete-preset-algolia": { + "version": "1.17.9", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.17.9.tgz", + "integrity": "sha512-Na1OuceSJeg8j7ZWn5ssMu/Ax3amtOwk76u4h5J4eK2Nx2KB5qt0Z4cOapCsxot9VcEN11ADV5aUSlQF4RhGjQ==", + "license": "MIT", + "dependencies": { + "@algolia/autocomplete-shared": "1.17.9" + }, + "peerDependencies": { + "@algolia/client-search": ">= 4.9.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, + "node_modules/@algolia/autocomplete-shared": { + "version": "1.17.9", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.17.9.tgz", + "integrity": "sha512-iDf05JDQ7I0b7JEA/9IektxN/80a2MZ1ToohfmNS3rfeuQnIKI3IJlIafD0xu4StbtQTghx9T3Maa97ytkXenQ==", + "license": "MIT", + "peerDependencies": { + "@algolia/client-search": ">= 4.9.1 < 6", + "algoliasearch": ">= 4.9.1 < 6" + } + }, + "node_modules/@algolia/client-abtesting": { + "version": "5.30.0", + "resolved": "https://registry.npmjs.org/@algolia/client-abtesting/-/client-abtesting-5.30.0.tgz", + "integrity": "sha512-Q3OQXYlTNqVUN/V1qXX8VIzQbLjP3yrRBO9m6NRe1CBALmoGHh9JrYosEGvfior28+DjqqU3Q+nzCSuf/bX0Gw==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.30.0", + "@algolia/requester-browser-xhr": "5.30.0", + "@algolia/requester-fetch": "5.30.0", + "@algolia/requester-node-http": "5.30.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/client-analytics": { + "version": "5.30.0", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-5.30.0.tgz", + "integrity": "sha512-/b+SAfHjYjx/ZVeVReCKTTnFAiZWOyvYLrkYpeNMraMT6akYRR8eC1AvFcvR60GLG/jytxcJAp42G8nN5SdcLg==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.30.0", + "@algolia/requester-browser-xhr": "5.30.0", + "@algolia/requester-fetch": "5.30.0", + "@algolia/requester-node-http": "5.30.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/client-common": { + "version": "5.30.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.30.0.tgz", + "integrity": "sha512-tbUgvkp2d20mHPbM0+NPbLg6SzkUh0lADUUjzNCF+HiPkjFRaIW3NGMlESKw5ia4Oz6ZvFzyREquUX6rdkdJcQ==", + "license": "MIT", + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/client-insights": { + "version": "5.30.0", + "resolved": "https://registry.npmjs.org/@algolia/client-insights/-/client-insights-5.30.0.tgz", + "integrity": "sha512-caXuZqJK761m32KoEAEkjkE2WF/zYg1McuGesWXiLSgfxwZZIAf+DljpiSToBUXhoPesvjcLtINyYUzbkwE0iw==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.30.0", + "@algolia/requester-browser-xhr": "5.30.0", + "@algolia/requester-fetch": "5.30.0", + "@algolia/requester-node-http": "5.30.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/client-personalization": { + "version": "5.30.0", + "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-5.30.0.tgz", + "integrity": "sha512-7K6P7TRBHLX1zTmwKDrIeBSgUidmbj6u3UW/AfroLRDGf9oZFytPKU49wg28lz/yulPuHY0nZqiwbyAxq9V17w==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.30.0", + "@algolia/requester-browser-xhr": "5.30.0", + "@algolia/requester-fetch": "5.30.0", + "@algolia/requester-node-http": "5.30.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/client-query-suggestions": { + "version": "5.30.0", + "resolved": "https://registry.npmjs.org/@algolia/client-query-suggestions/-/client-query-suggestions-5.30.0.tgz", + "integrity": "sha512-WMjWuBjYxJheRt7Ec5BFr33k3cV0mq2WzmH9aBf5W4TT8kUp34x91VRsYVaWOBRlxIXI8o/WbhleqSngiuqjLA==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.30.0", + "@algolia/requester-browser-xhr": "5.30.0", + "@algolia/requester-fetch": "5.30.0", + "@algolia/requester-node-http": "5.30.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/client-search": { + "version": "5.30.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-5.30.0.tgz", + "integrity": "sha512-puc1/LREfSqzgmrOFMY5L/aWmhYOlJ0TTpa245C0ZNMKEkdOkcimFbXTXQ8lZhzh+rlyFgR7cQGNtXJ5H0XgZg==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.30.0", + "@algolia/requester-browser-xhr": "5.30.0", + "@algolia/requester-fetch": "5.30.0", + "@algolia/requester-node-http": "5.30.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/events": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@algolia/events/-/events-4.0.1.tgz", + "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==", + "license": "MIT" + }, + "node_modules/@algolia/ingestion": { + "version": "1.30.0", + "resolved": "https://registry.npmjs.org/@algolia/ingestion/-/ingestion-1.30.0.tgz", + "integrity": "sha512-NfqiIKVgGKTLr6T9F81oqB39pPiEtILTy0z8ujxPKg2rCvI/qQeDqDWFBmQPElCfUTU6kk67QAgMkQ7T6fE+gg==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.30.0", + "@algolia/requester-browser-xhr": "5.30.0", + "@algolia/requester-fetch": "5.30.0", + "@algolia/requester-node-http": "5.30.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/monitoring": { + "version": "1.30.0", + "resolved": "https://registry.npmjs.org/@algolia/monitoring/-/monitoring-1.30.0.tgz", + "integrity": "sha512-/eeM3aqLKro5KBZw0W30iIA6afkGa+bcpvEM0NDa92m5t3vil4LOmJI9FkgzfmSkF4368z/SZMOTPShYcaVXjA==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.30.0", + "@algolia/requester-browser-xhr": "5.30.0", + "@algolia/requester-fetch": "5.30.0", + "@algolia/requester-node-http": "5.30.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/recommend": { + "version": "5.30.0", + "resolved": "https://registry.npmjs.org/@algolia/recommend/-/recommend-5.30.0.tgz", + "integrity": "sha512-iWeAUWqw+xT+2IyUyTqnHCK+cyCKYV5+B6PXKdagc9GJJn6IaPs8vovwoC0Za5vKCje/aXQ24a2Z1pKpc/tdHg==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.30.0", + "@algolia/requester-browser-xhr": "5.30.0", + "@algolia/requester-fetch": "5.30.0", + "@algolia/requester-node-http": "5.30.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/requester-browser-xhr": { + "version": "5.30.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.30.0.tgz", + "integrity": "sha512-alo3ly0tdNLjfMSPz9dmNwYUFHx7guaz5dTGlIzVGnOiwLgIoM6NgA+MJLMcH6e1S7OpmE2AxOy78svlhst2tQ==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.30.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/requester-fetch": { + "version": "5.30.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-fetch/-/requester-fetch-5.30.0.tgz", + "integrity": "sha512-WOnTYUIY2InllHBy6HHMpGIOo7Or4xhYUx/jkoSK/kPIa1BRoFEHqa8v4pbKHtoG7oLvM2UAsylSnjVpIhGZXg==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.30.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@algolia/requester-node-http": { + "version": "5.30.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-5.30.0.tgz", + "integrity": "sha512-uSTUh9fxeHde1c7KhvZKUrivk90sdiDftC+rSKNFKKEU9TiIKAGA7B2oKC+AoMCqMymot1vW9SGbeESQPTZd0w==", + "license": "MIT", + "dependencies": { + "@algolia/client-common": "5.30.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "license": "Apache-2.0", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@antfu/install-pkg": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@antfu/install-pkg/-/install-pkg-1.1.0.tgz", + "integrity": "sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ==", + "license": "MIT", + "dependencies": { + "package-manager-detector": "^1.3.0", + "tinyexec": "^1.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/@antfu/utils": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/@antfu/utils/-/utils-8.1.1.tgz", + "integrity": "sha512-Mex9nXf9vR6AhcXmMrlz/HVgYYZpVGJ6YlPgwl7UnaFpnshXs6EK/oa5Gpf3CzENMjkvEx2tQtntGnb7UtSTOQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.27.7", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.27.7.tgz", + "integrity": "sha512-xgu/ySj2mTiUFmdE9yCMfBxLp4DHd5DwmbbD05YAuICfodYT3VvRxbrh81LGQ/8UpSdtMdfKMn3KouYDX59DGQ==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.27.7", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.27.7.tgz", + "integrity": "sha512-BU2f9tlKQ5CAthiMIgpzAh4eDTLWo1mqi9jqE2OxMG0E/OM199VJt2q8BztTxpnSW0i1ymdwLXRJnYzvDM5r2w==", + "license": "MIT", + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.27.5", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-module-transforms": "^7.27.3", + "@babel/helpers": "^7.27.6", + "@babel/parser": "^7.27.7", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.27.7", + "@babel/types": "^7.27.7", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.27.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.27.5.tgz", + "integrity": "sha512-ZGhA37l0e/g2s1Cnzdix0O3aLYm66eF8aufiVteOgnwxgnRP8GoyMj7VWsgWnQbVKXyge7hqrFh2K2TQM6t1Hw==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.27.5", + "@babel/types": "^7.27.3", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.27.3", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.27.3.tgz", + "integrity": "sha512-fXSwMQqitTGeHLBC08Eq5yXz2m37E4pJX1qAU1+2cNedz/ifv/bVXft90VeSav5nFO61EcNgwr0aJxbyPaWBPg==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.27.3" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz", + "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.27.2", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-class-features-plugin": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.27.1.tgz", + "integrity": "sha512-QwGAmuvM17btKU5VqXfb+Giw4JcN0hjuufz3DYnpeVDvZLAObloM77bhMXiqry3Iio+Ai4phVRDwl6WU10+r5A==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.1", + "@babel/helper-member-expression-to-functions": "^7.27.1", + "@babel/helper-optimise-call-expression": "^7.27.1", + "@babel/helper-replace-supers": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1", + "@babel/traverse": "^7.27.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-class-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.27.1.tgz", + "integrity": "sha512-uVDC72XVf8UbrH5qQTc18Agb8emwjTiZrQE11Nv3CuBEZmVvTwwE9CBUEvHku06gQCAyYf8Nv6ja1IN+6LMbxQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.1", + "regexpu-core": "^6.2.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-define-polyfill-provider": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.5.tgz", + "integrity": "sha512-uJnGFcPsWQK8fvjgGP5LZUZZsYGIoPeRjSF5PGwrelYgq7Q15/Ft9NGFp1zglwgIv//W0uG4BevRuSJRyylZPg==", + "license": "MIT", + "dependencies": { + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-plugin-utils": "^7.27.1", + "debug": "^4.4.1", + "lodash.debounce": "^4.0.8", + "resolve": "^1.22.10" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.27.1.tgz", + "integrity": "sha512-E5chM8eWjTp/aNoVpcbfM7mLxu9XGLWYise2eBKGQomAk/Mb4XoxyqXTZbuTohbsl8EKqdlMhnDI2CCLfcs9wA==", + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.27.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.27.3.tgz", + "integrity": "sha512-dSOvYwvyLsWBeIRyOeHXp5vPj5l1I011r52FM1+r1jCERv+aFXYk4whgQccYEGYxK2H3ZAIA8nuPkQ0HaUo3qg==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.27.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.27.1.tgz", + "integrity": "sha512-URMGH08NzYFhubNSGJrpUEphGKQwMQYBySzat5cAByY1/YgIRkULnIy3tAMeszlL/so2HbeilYloUmSpd7GdVw==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz", + "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-remap-async-to-generator": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.27.1.tgz", + "integrity": "sha512-7fiA521aVw8lSPeI4ZOD3vRFkoqkJcS+z4hFo82bFSH/2tNd6eJ5qCVMS5OzDmZh/kaHQeBaeyxK6wljcPtveA==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.1", + "@babel/helper-wrap-function": "^7.27.1", + "@babel/traverse": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.27.1.tgz", + "integrity": "sha512-7EHz6qDZc8RYS5ElPoShMheWvEgERonFCs7IAonWLLUTXW59DP14bCZt89/GKyreYn8g3S83m21FelHKbeDCKA==", + "license": "MIT", + "dependencies": { + "@babel/helper-member-expression-to-functions": "^7.27.1", + "@babel/helper-optimise-call-expression": "^7.27.1", + "@babel/traverse": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.27.1.tgz", + "integrity": "sha512-Tub4ZKEXqbPjXgWLl2+3JpQAYBJ8+ikpQ2Ocj/q/r0LwE3UhENh7EUabyHjz2kCEsrRY83ew2DQdHluuiDQFzg==", + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz", + "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-wrap-function": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.27.1.tgz", + "integrity": "sha512-NFJK2sHUvrjo8wAU/nQTWU890/zB2jj0qBcCbZbbf+005cAsv6tMjXz31fBign6M5ov1o0Bllu+9nbqkfsjjJQ==", + "license": "MIT", + "dependencies": { + "@babel/template": "^7.27.1", + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.27.6", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.6.tgz", + "integrity": "sha512-muE8Tt8M22638HU31A3CgfSUciwz1fhATfoVai05aPXGor//CdWDCbnlY1yvBPo07njuVOCNGCSp/GTt12lIug==", + "license": "MIT", + "dependencies": { + "@babel/template": "^7.27.2", + "@babel/types": "^7.27.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.27.7", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.7.tgz", + "integrity": "sha512-qnzXzDXdr/po3bOTbTIQZ7+TxNKxpkN5IifVLXS+r7qwynkZfPyjZfE7hCXbo7IoO9TNcSyibgONsf2HauUd3Q==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.27.7" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-firefox-class-in-computed-class-key": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.27.1.tgz", + "integrity": "sha512-QPG3C9cCVRQLxAVwmefEmwdTanECuUBMQZ/ym5kiw3XKCGA7qkuQLcjWWHcrD/GKbn/WmJwaezfuuAOcyKlRPA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/traverse": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-class-field-initializer-scope": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-class-field-initializer-scope/-/plugin-bugfix-safari-class-field-initializer-scope-7.27.1.tgz", + "integrity": "sha512-qNeq3bCKnGgLkEXUuFry6dPlGfCdQNZbn7yUAPCInwAJHMU7THJfrBSozkcWq5sNM6RcF3S8XyQL2A52KNR9IA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.27.1.tgz", + "integrity": "sha512-g4L7OYun04N1WyqMNjldFwlfPCLVkgB54A/YCXICZYBsvJJE3kByKv9c9+R/nAfmIfjl2rKYLNyMHboYbZaWaA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.27.1.tgz", + "integrity": "sha512-oO02gcONcD5O1iTLi/6frMJBIwWEHceWGSGqrpCmEL8nogiS6J9PBlE48CaK20/Jx1LuRml9aDftLgdjXT8+Cw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1", + "@babel/plugin-transform-optional-chaining": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.13.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.27.1.tgz", + "integrity": "sha512-6BpaYGDavZqkI6yT+KSPdpZFfpnd68UKXbcjI9pJ13pvHhPrCKWOOLp+ysvMeA+DxnhuPpgIaRpxRxo5A9t5jw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/traverse": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-assertions": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.27.1.tgz", + "integrity": "sha512-UT/Jrhw57xg4ILHLFnzFpPDlMbcdEicaAtjPQpbj9wa8T4r5KVWCimHcL/460g8Ht0DMxDyjsLgiWSkVjnwPFg==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz", + "integrity": "sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz", + "integrity": "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz", + "integrity": "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.27.1.tgz", + "integrity": "sha512-8Z4TGic6xW70FKThA5HYEKKyBpOOsucTOD1DjU3fZxDg+K3zBJcXMFnt/4yQiZnf5+MiOMSXQ9PaEK/Ilh1DeA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.27.1.tgz", + "integrity": "sha512-eST9RrwlpaoJBDHShc+DS2SG4ATTi2MYNb4OxYkf3n+7eb49LWpnS+HSpVfW4x927qQwgk8A2hGNVaajAEw0EA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-remap-async-to-generator": "^7.27.1", + "@babel/traverse": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-to-generator": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.27.1.tgz", + "integrity": "sha512-NREkZsZVJS4xmTr8qzE5y8AfIPqsdQfRuUiLRTEzb7Qii8iFWCyDKaUV2c0rCuh4ljDZ98ALHP/PetiBV2nddA==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-remap-async-to-generator": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoped-functions": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.27.1.tgz", + "integrity": "sha512-cnqkuOtZLapWYZUYM5rVIdv1nXYuFVIltZ6ZJ7nIj585QsjKM5dhL2Fu/lICXZ1OyIAFc7Qy+bvDAtTXqGrlhg==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoping": { + "version": "7.27.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.27.5.tgz", + "integrity": "sha512-JF6uE2s67f0y2RZcm2kpAUEbD50vH62TyWVebxwHAlbSdM49VqPz8t4a1uIjp4NIOIZ4xzLfjY5emt/RCyC7TQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.27.1.tgz", + "integrity": "sha512-D0VcalChDMtuRvJIu3U/fwWjf8ZMykz5iZsg77Nuj821vCKI3zCyRLwRdWbsuJ/uRwZhZ002QtCqIkwC/ZkvbA==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.27.1.tgz", + "integrity": "sha512-s734HmYU78MVzZ++joYM+NkJusItbdRcbm+AGRgJCt3iA+yux0QpD9cBVdz3tKyrjVYWRl7j0mHSmv4lhV0aoA==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0" + } + }, + "node_modules/@babel/plugin-transform-classes": { + "version": "7.27.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.27.7.tgz", + "integrity": "sha512-CuLkokN1PEZ0Fsjtq+001aog/C2drDK9nTfK/NRK0n6rBin6cBrvM+zfQjDE+UllhR6/J4a6w8Xq9i4yi3mQrw==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.3", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-replace-supers": "^7.27.1", + "@babel/traverse": "^7.27.7", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-computed-properties": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.27.1.tgz", + "integrity": "sha512-lj9PGWvMTVksbWiDT2tW68zGS/cyo4AkZ/QTp0sQT0mjPopCmrSkzxeXkznjqBxzDI6TclZhOJbBmbBLjuOZUw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/template": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-destructuring": { + "version": "7.27.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.27.7.tgz", + "integrity": "sha512-pg3ZLdIKWCP0CrJm0O4jYjVthyBeioVfvz9nwt6o5paUxsgJ/8GucSMAIaj6M7xA4WY+SrvtGu2LijzkdyecWQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/traverse": "^7.27.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dotall-regex": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.27.1.tgz", + "integrity": "sha512-gEbkDVGRvjj7+T1ivxrfgygpT7GUd4vmODtYpbs0gZATdkX8/iSnOtZSxiZnsgm1YjTgjI6VKBGSJJevkrclzw==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-keys": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.27.1.tgz", + "integrity": "sha512-MTyJk98sHvSs+cvZ4nOauwTTG1JeonDjSGvGGUNHreGQns+Mpt6WX/dVzWBHgg+dYZhkC4X+zTDfkTU+Vy9y7Q==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-named-capturing-groups-regex": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-named-capturing-groups-regex/-/plugin-transform-duplicate-named-capturing-groups-regex-7.27.1.tgz", + "integrity": "sha512-hkGcueTEzuhB30B3eJCbCYeCaaEQOmQR0AdvzpD4LoN0GXMWzzGSuRrxR2xTnCrvNbVwK9N6/jQ92GSLfiZWoQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-dynamic-import": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.27.1.tgz", + "integrity": "sha512-MHzkWQcEmjzzVW9j2q8LGjwGWpG2mjwaaB0BNQwst3FIjqsg8Ct/mIZlvSPJvfi9y2AC8mi/ktxbFVL9pZ1I4A==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-exponentiation-operator": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.27.1.tgz", + "integrity": "sha512-uspvXnhHvGKf2r4VVtBpeFnuDWsJLQ6MF6lGJLC89jBR1uoVeqM416AZtTuhTezOfgHicpJQmoD5YUakO/YmXQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-export-namespace-from": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.27.1.tgz", + "integrity": "sha512-tQvHWSZ3/jH2xuq/vZDy0jNn+ZdXJeM8gHvX4lnJmsc3+50yPlWdZXIc5ay+umX+2/tJIqHqiEqcJvxlmIvRvQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-for-of": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.27.1.tgz", + "integrity": "sha512-BfbWFFEJFQzLCQ5N8VocnCtA8J1CLkNTe2Ms2wocj75dd6VpiqS5Z5quTYcUoo4Yq+DN0rtikODccuv7RU81sw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-function-name": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.27.1.tgz", + "integrity": "sha512-1bQeydJF9Nr1eBCMMbC+hdwmRlsv5XYOMu03YSWFwNs0HsAmtSxxF1fyuYPqemVldVyFmlCU7w8UE14LupUSZQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-compilation-targets": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/traverse": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-json-strings": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.27.1.tgz", + "integrity": "sha512-6WVLVJiTjqcQauBhn1LkICsR2H+zm62I3h9faTDKt1qP4jn2o72tSvqMwtGFKGTpojce0gJs+76eZ2uCHRZh0Q==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-literals": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.27.1.tgz", + "integrity": "sha512-0HCFSepIpLTkLcsi86GG3mTUzxV5jpmbv97hTETW3yzrAij8aqlD36toB1D0daVFJM8NK6GvKO0gslVQmm+zZA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-logical-assignment-operators": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.27.1.tgz", + "integrity": "sha512-SJvDs5dXxiae4FbSL1aBJlG4wvl594N6YEVVn9e3JGulwioy6z3oPjx/sQBO3Y4NwUu5HNix6KJ3wBZoewcdbw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-member-expression-literals": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.27.1.tgz", + "integrity": "sha512-hqoBX4dcZ1I33jCSWcXrP+1Ku7kdqXf1oeah7ooKOIiAdKQ+uqftgCFNOSzA5AMS2XIHEYeGFg4cKRCdpxzVOQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-amd": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.27.1.tgz", + "integrity": "sha512-iCsytMg/N9/oFq6n+gFTvUYDZQOMK5kEdeYxmxt91fcJGycfxVP9CnrxoliM0oumFERba2i8ZtwRUCMhvP1LnA==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-transforms": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.27.1.tgz", + "integrity": "sha512-OJguuwlTYlN0gBZFRPqwOGNWssZjfIUdS7HMYtN8c1KmwpwHFBwTeFZrg9XZa+DFTitWOW5iTAG7tyCUPsCCyw==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-transforms": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-systemjs": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.27.1.tgz", + "integrity": "sha512-w5N1XzsRbc0PQStASMksmUeqECuzKuTJer7kFagK8AXgpCMkeDMO5S+aaFb7A51ZYDF7XI34qsTX+fkHiIm5yA==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-transforms": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-umd": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.27.1.tgz", + "integrity": "sha512-iQBE/xC5BV1OxJbp6WG7jq9IWiD+xxlZhLrdwpPkTX3ydmXdvoCpyfJN7acaIBZaOqTfr76pgzqBJflNbeRK+w==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-transforms": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.27.1.tgz", + "integrity": "sha512-SstR5JYy8ddZvD6MhV0tM/j16Qds4mIpJTOd1Yu9J9pJjH93bxHECF7pgtc28XvkzTD6Pxcm/0Z73Hvk7kb3Ng==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-new-target": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.27.1.tgz", + "integrity": "sha512-f6PiYeqXQ05lYq3TIfIDu/MtliKUbNwkGApPUvyo6+tc7uaR4cPjPe7DFPr15Uyycg2lZU6btZ575CuQoYh7MQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.27.1.tgz", + "integrity": "sha512-aGZh6xMo6q9vq1JGcw58lZ1Z0+i0xB2x0XaauNIUXd6O1xXc3RwoWEBlsTQrY4KQ9Jf0s5rgD6SiNkaUdJegTA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-numeric-separator": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.27.1.tgz", + "integrity": "sha512-fdPKAcujuvEChxDBJ5c+0BTaS6revLV7CJL08e4m3de8qJfNIuCc2nc7XJYOjBoTMJeqSmwXJ0ypE14RCjLwaw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-rest-spread": { + "version": "7.27.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.27.7.tgz", + "integrity": "sha512-201B1kFTWhckclcXpWHc8uUpYziDX/Pl4rxl0ZX0DiCZ3jknwfSUALL3QCYeeXXB37yWxJbo+g+Vfq8pAaHi3w==", + "license": "MIT", + "dependencies": { + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/plugin-transform-destructuring": "^7.27.7", + "@babel/plugin-transform-parameters": "^7.27.7", + "@babel/traverse": "^7.27.7" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-super": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.27.1.tgz", + "integrity": "sha512-SFy8S9plRPbIcxlJ8A6mT/CxFdJx/c04JEctz4jf8YZaVS2px34j7NXRrlGlHkN/M2gnpL37ZpGRGVFLd3l8Ng==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-replace-supers": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-catch-binding": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.27.1.tgz", + "integrity": "sha512-txEAEKzYrHEX4xSZN4kJ+OfKXFVSWKB2ZxM9dpcE3wT7smwkNmXo5ORRlVzMVdJbD+Q8ILTgSD7959uj+3Dm3Q==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-chaining": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.27.1.tgz", + "integrity": "sha512-BQmKPPIuc8EkZgNKsv0X4bPmOoayeu4F1YCwx2/CfmDSXDbp7GnzlUH+/ul5VGfRg1AoFPsrIThlEBj2xb4CAg==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-parameters": { + "version": "7.27.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.27.7.tgz", + "integrity": "sha512-qBkYTYCb76RRxUM6CcZA5KRu8K4SM8ajzVeUgVdMVO9NN9uI/GaVmBg/WKJJGnNokV9SY8FxNOVWGXzqzUidBg==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-methods": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.27.1.tgz", + "integrity": "sha512-10FVt+X55AjRAYI9BrdISN9/AQWHqldOeZDUoLyif1Kn05a56xVBXb8ZouL8pZ9jem8QpXaOt8TS7RHUIS+GPA==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-property-in-object": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.27.1.tgz", + "integrity": "sha512-5J+IhqTi1XPa0DXF83jYOaARrX+41gOewWbkPyjMNRDqgOCqdffGh8L3f/Ek5utaEBZExjSAzcyjmV9SSAWObQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.1", + "@babel/helper-create-class-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-property-literals": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.27.1.tgz", + "integrity": "sha512-oThy3BCuCha8kDZ8ZkgOg2exvPYUlprMukKQXI1r1pJ47NCvxfkEy8vK+r/hT9nF0Aa4H1WUPZZjHTFtAhGfmQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-constant-elements": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.27.1.tgz", + "integrity": "sha512-edoidOjl/ZxvYo4lSBOQGDSyToYVkTAwyVoa2tkuYTSmjrB1+uAedoL5iROVLXkxH+vRgA7uP4tMg2pUJpZ3Ug==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-display-name": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.27.1.tgz", + "integrity": "sha512-p9+Vl3yuHPmkirRrg021XiP+EETmPMQTLr6Ayjj85RLNEbb3Eya/4VI0vAdzQG9SEAl2Lnt7fy5lZyMzjYoZQQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.27.1.tgz", + "integrity": "sha512-2KH4LWGSrJIkVf5tSiBFYuXDAoWRq2MMwgivCf+93dd0GQi8RXLjKA/0EvRnVV5G0hrHczsquXuD01L8s6dmBw==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.1", + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/plugin-syntax-jsx": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-development": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.27.1.tgz", + "integrity": "sha512-ykDdF5yI4f1WrAolLqeF3hmYU12j9ntLQl/AOG1HAS21jxyg1Q0/J/tpREuYLfatGdGmXp/3yS0ZA76kOlVq9Q==", + "license": "MIT", + "dependencies": { + "@babel/plugin-transform-react-jsx": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-pure-annotations": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.27.1.tgz", + "integrity": "sha512-JfuinvDOsD9FVMTHpzA/pBLisxpv1aSf+OIV8lgH3MuWrks19R27e6a6DipIg4aX1Zm9Wpb04p8wljfKrVSnPA==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regenerator": { + "version": "7.27.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.27.5.tgz", + "integrity": "sha512-uhB8yHerfe3MWnuLAhEbeQ4afVoqv8BQsPqrTv7e/jZ9y00kJL6l9a/f4OWaKxotmjzewfEyXE1vgDJenkQ2/Q==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regexp-modifiers": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regexp-modifiers/-/plugin-transform-regexp-modifiers-7.27.1.tgz", + "integrity": "sha512-TtEciroaiODtXvLZv4rmfMhkCv8jx3wgKpL68PuiPh2M4fvz5jhsA7697N1gMvkvr/JTF13DrFYyEbY9U7cVPA==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-reserved-words": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.27.1.tgz", + "integrity": "sha512-V2ABPHIJX4kC7HegLkYoDpfg9PVmuWy/i6vUM5eGK22bx4YVFD3M5F0QQnWQoDs6AGsUWTVOopBiMFQgHaSkVw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime": { + "version": "7.27.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.27.4.tgz", + "integrity": "sha512-D68nR5zxU64EUzV8i7T3R5XP0Xhrou/amNnddsRQssx6GrTLdZl1rLxyjtVZBd+v/NVX4AbTPOB5aU8thAZV1A==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.11.0", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-runtime/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/plugin-transform-shorthand-properties": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.27.1.tgz", + "integrity": "sha512-N/wH1vcn4oYawbJ13Y/FxcQrWk63jhfNa7jef0ih7PHSIHX2LB7GWE1rkPrOnka9kwMxb6hMl19p7lidA+EHmQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-spread": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.27.1.tgz", + "integrity": "sha512-kpb3HUqaILBJcRFVhFUs6Trdd4mkrzcGXss+6/mxUd273PfbWqSDHRzMT2234gIg2QYfAjvXLSquP1xECSg09Q==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-sticky-regex": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.27.1.tgz", + "integrity": "sha512-lhInBO5bi/Kowe2/aLdBAawijx+q1pQzicSgnkB6dUPc1+RC8QmJHKf2OjvU+NZWitguJHEaEmbV6VWEouT58g==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-template-literals": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.27.1.tgz", + "integrity": "sha512-fBJKiV7F2DxZUkg5EtHKXQdbsbURW3DZKQUWphDum0uRP6eHGGa/He9mc0mypL680pb+e/lDIthRohlv8NCHkg==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typeof-symbol": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.27.1.tgz", + "integrity": "sha512-RiSILC+nRJM7FY5srIyc4/fGIwUhyDuuBSdWn4y6yT6gm652DpCHZjIipgn6B7MQ1ITOUnAKWixEUjQRIBIcLw==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typescript": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.27.1.tgz", + "integrity": "sha512-Q5sT5+O4QUebHdbwKedFBEwRLb02zJ7r4A5Gg2hUoLuU3FjdMcyqcywqUrLCaDsFCxzokf7u9kuy7qz51YUuAg==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.1", + "@babel/helper-create-class-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1", + "@babel/plugin-syntax-typescript": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-escapes": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.27.1.tgz", + "integrity": "sha512-Ysg4v6AmF26k9vpfFuTZg8HRfVWzsh1kVfowA23y9j/Gu6dOuahdUVhkLqpObp3JIv27MLSii6noRnuKN8H0Mg==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-property-regex": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.27.1.tgz", + "integrity": "sha512-uW20S39PnaTImxp39O5qFlHLS9LJEmANjMG7SxIhap8rCHqu0Ik+tLEPX5DKmHn6CsWQ7j3lix2tFOa5YtL12Q==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-regex": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.27.1.tgz", + "integrity": "sha512-xvINq24TRojDuyt6JGtHmkVkrfVV3FPT16uytxImLeBZqW3/H52yN+kM1MGuyPkIQxrzKwPHs5U/MP3qKyzkGw==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-sets-regex": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.27.1.tgz", + "integrity": "sha512-EtkOujbc4cgvb0mlpQefi4NTPBzhSIevblFevACNLUspmrALgmEBdL/XfnyyITfd8fKBZrZys92zOWcik7j9Tw==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/preset-env": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.27.2.tgz", + "integrity": "sha512-Ma4zSuYSlGNRlCLO+EAzLnCmJK2vdstgv+n7aUP+/IKZrOfWHOJVdSJtuub8RzHTj3ahD37k5OKJWvzf16TQyQ==", + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.27.2", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-validator-option": "^7.27.1", + "@babel/plugin-bugfix-firefox-class-in-computed-class-key": "^7.27.1", + "@babel/plugin-bugfix-safari-class-field-initializer-scope": "^7.27.1", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.27.1", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.27.1", + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": "^7.27.1", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-import-assertions": "^7.27.1", + "@babel/plugin-syntax-import-attributes": "^7.27.1", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.27.1", + "@babel/plugin-transform-async-generator-functions": "^7.27.1", + "@babel/plugin-transform-async-to-generator": "^7.27.1", + "@babel/plugin-transform-block-scoped-functions": "^7.27.1", + "@babel/plugin-transform-block-scoping": "^7.27.1", + "@babel/plugin-transform-class-properties": "^7.27.1", + "@babel/plugin-transform-class-static-block": "^7.27.1", + "@babel/plugin-transform-classes": "^7.27.1", + "@babel/plugin-transform-computed-properties": "^7.27.1", + "@babel/plugin-transform-destructuring": "^7.27.1", + "@babel/plugin-transform-dotall-regex": "^7.27.1", + "@babel/plugin-transform-duplicate-keys": "^7.27.1", + "@babel/plugin-transform-duplicate-named-capturing-groups-regex": "^7.27.1", + "@babel/plugin-transform-dynamic-import": "^7.27.1", + "@babel/plugin-transform-exponentiation-operator": "^7.27.1", + "@babel/plugin-transform-export-namespace-from": "^7.27.1", + "@babel/plugin-transform-for-of": "^7.27.1", + "@babel/plugin-transform-function-name": "^7.27.1", + "@babel/plugin-transform-json-strings": "^7.27.1", + "@babel/plugin-transform-literals": "^7.27.1", + "@babel/plugin-transform-logical-assignment-operators": "^7.27.1", + "@babel/plugin-transform-member-expression-literals": "^7.27.1", + "@babel/plugin-transform-modules-amd": "^7.27.1", + "@babel/plugin-transform-modules-commonjs": "^7.27.1", + "@babel/plugin-transform-modules-systemjs": "^7.27.1", + "@babel/plugin-transform-modules-umd": "^7.27.1", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.27.1", + "@babel/plugin-transform-new-target": "^7.27.1", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.27.1", + "@babel/plugin-transform-numeric-separator": "^7.27.1", + "@babel/plugin-transform-object-rest-spread": "^7.27.2", + "@babel/plugin-transform-object-super": "^7.27.1", + "@babel/plugin-transform-optional-catch-binding": "^7.27.1", + "@babel/plugin-transform-optional-chaining": "^7.27.1", + "@babel/plugin-transform-parameters": "^7.27.1", + "@babel/plugin-transform-private-methods": "^7.27.1", + "@babel/plugin-transform-private-property-in-object": "^7.27.1", + "@babel/plugin-transform-property-literals": "^7.27.1", + "@babel/plugin-transform-regenerator": "^7.27.1", + "@babel/plugin-transform-regexp-modifiers": "^7.27.1", + "@babel/plugin-transform-reserved-words": "^7.27.1", + "@babel/plugin-transform-shorthand-properties": "^7.27.1", + "@babel/plugin-transform-spread": "^7.27.1", + "@babel/plugin-transform-sticky-regex": "^7.27.1", + "@babel/plugin-transform-template-literals": "^7.27.1", + "@babel/plugin-transform-typeof-symbol": "^7.27.1", + "@babel/plugin-transform-unicode-escapes": "^7.27.1", + "@babel/plugin-transform-unicode-property-regex": "^7.27.1", + "@babel/plugin-transform-unicode-regex": "^7.27.1", + "@babel/plugin-transform-unicode-sets-regex": "^7.27.1", + "@babel/preset-modules": "0.1.6-no-external-plugins", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.11.0", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "core-js-compat": "^3.40.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-env/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/preset-modules": { + "version": "0.1.6-no-external-plugins", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", + "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/preset-react": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.27.1.tgz", + "integrity": "sha512-oJHWh2gLhU9dW9HHr42q0cI0/iHHXTLGe39qvpAZZzagHy0MzYLCnCVV0symeRvzmjHyVU7mw2K06E6u/JwbhA==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-validator-option": "^7.27.1", + "@babel/plugin-transform-react-display-name": "^7.27.1", + "@babel/plugin-transform-react-jsx": "^7.27.1", + "@babel/plugin-transform-react-jsx-development": "^7.27.1", + "@babel/plugin-transform-react-pure-annotations": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-typescript": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.27.1.tgz", + "integrity": "sha512-l7WfQfX0WK4M0v2RudjuQK4u99BS6yLHYEmdtVPP7lKV013zr9DygFuWNlnbvQ9LR+LS0Egz/XAvGx5U9MX0fQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-validator-option": "^7.27.1", + "@babel/plugin-syntax-jsx": "^7.27.1", + "@babel/plugin-transform-modules-commonjs": "^7.27.1", + "@babel/plugin-transform-typescript": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.27.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.6.tgz", + "integrity": "sha512-vbavdySgbTTrmFE+EsiqUTzlOr5bzlnJtUv9PynGCAKvfQqjIXbvFdumPM/GxMDfyuGMJaJAU6TO4zc1Jf1i8Q==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime-corejs3": { + "version": "7.27.6", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.27.6.tgz", + "integrity": "sha512-vDVrlmRAY8z9Ul/HxT+8ceAru95LQgkSKiXkSYZvqtbkPSfhZJgpRp45Cldbh1GJ1kxzQkI70AqyrTI58KpaWQ==", + "license": "MIT", + "dependencies": { + "core-js-pure": "^3.30.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.27.7", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.27.7.tgz", + "integrity": "sha512-X6ZlfR/O/s5EQ/SnUSLzr+6kGnkg8HXGMzpgsMsrJVcfDtH1vIp6ctCN4eZ1LS5c0+te5Cb6Y514fASjMRJ1nw==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.27.5", + "@babel/parser": "^7.27.7", + "@babel/template": "^7.27.2", + "@babel/types": "^7.27.7", + "debug": "^4.3.1", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.27.7", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.7.tgz", + "integrity": "sha512-8OLQgDScAOHXnAz2cV+RfzzNMipuLVBz2biuAJFMV9bfkNf393je3VM8CLkjQodW5+iWsSJdSgSWT6rsZoXHPw==", + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@braintree/sanitize-url": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-7.1.1.tgz", + "integrity": "sha512-i1L7noDNxtFyL5DmZafWy1wRVhGehQmzZaz1HiN5e7iylJMSZR7ekOV7NsIqa5qBldlLrsKv4HbgFUVlQrz8Mw==", + "license": "MIT" + }, + "node_modules/@chevrotain/cst-dts-gen": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@chevrotain/cst-dts-gen/-/cst-dts-gen-11.0.3.tgz", + "integrity": "sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ==", + "license": "Apache-2.0", + "dependencies": { + "@chevrotain/gast": "11.0.3", + "@chevrotain/types": "11.0.3", + "lodash-es": "4.17.21" + } + }, + "node_modules/@chevrotain/gast": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@chevrotain/gast/-/gast-11.0.3.tgz", + "integrity": "sha512-+qNfcoNk70PyS/uxmj3li5NiECO+2YKZZQMbmjTqRI3Qchu8Hig/Q9vgkHpI3alNjr7M+a2St5pw5w5F6NL5/Q==", + "license": "Apache-2.0", + "dependencies": { + "@chevrotain/types": "11.0.3", + "lodash-es": "4.17.21" + } + }, + "node_modules/@chevrotain/regexp-to-ast": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@chevrotain/regexp-to-ast/-/regexp-to-ast-11.0.3.tgz", + "integrity": "sha512-1fMHaBZxLFvWI067AVbGJav1eRY7N8DDvYCTwGBiE/ytKBgP8azTdgyrKyWZ9Mfh09eHWb5PgTSO8wi7U824RA==", + "license": "Apache-2.0" + }, + "node_modules/@chevrotain/types": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@chevrotain/types/-/types-11.0.3.tgz", + "integrity": "sha512-gsiM3G8b58kZC2HaWR50gu6Y1440cHiJ+i3JUvcp/35JchYejb2+5MVeJK0iKThYpAa/P2PYFV4hoi44HD+aHQ==", + "license": "Apache-2.0" + }, + "node_modules/@chevrotain/utils": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/@chevrotain/utils/-/utils-11.0.3.tgz", + "integrity": "sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ==", + "license": "Apache-2.0" + }, + "node_modules/@colors/colors": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", + "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==", + "license": "MIT", + "optional": true, + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/@csstools/cascade-layer-name-parser": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@csstools/cascade-layer-name-parser/-/cascade-layer-name-parser-2.0.5.tgz", + "integrity": "sha512-p1ko5eHgV+MgXFVa4STPKpvPxr6ReS8oS2jzTukjR74i5zJNyWO1ZM1m8YKBXnzDKWfBN1ztLYlHxbVemDD88A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/color-helpers": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-5.0.2.tgz", + "integrity": "sha512-JqWH1vsgdGcw2RR6VliXXdA0/59LttzlU8UlRT/iUUsEeWfYq8I+K0yhihEUTTHLRm1EXvpsCx3083EU15ecsA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + } + }, + "node_modules/@csstools/css-calc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-2.1.4.tgz", + "integrity": "sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-color-parser": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-3.0.10.tgz", + "integrity": "sha512-TiJ5Ajr6WRd1r8HSiwJvZBiJOqtH86aHpUjq5aEKWHiII2Qfjqd/HCWKPOW8EP4vcspXbHnXrwIDlu5savQipg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/color-helpers": "^5.0.2", + "@csstools/css-calc": "^2.1.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-parser-algorithms": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-3.0.5.tgz", + "integrity": "sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-tokenizer": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.4.tgz", + "integrity": "sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/@csstools/media-query-list-parser": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@csstools/media-query-list-parser/-/media-query-list-parser-4.0.3.tgz", + "integrity": "sha512-HAYH7d3TLRHDOUQK4mZKf9k9Ph/m8Akstg66ywKR4SFAigjs3yBiUeZtFxywiTm5moZMAp/5W/ZuFnNXXYLuuQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/postcss-cascade-layers": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/@csstools/postcss-cascade-layers/-/postcss-cascade-layers-5.0.2.tgz", + "integrity": "sha512-nWBE08nhO8uWl6kSAeCx4im7QfVko3zLrtgWZY4/bP87zrSPpSyN/3W3TDqz1jJuH+kbKOHXg5rJnK+ZVYcFFg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/selector-specificity": "^5.0.0", + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-cascade-layers/node_modules/@csstools/selector-specificity": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-5.0.0.tgz", + "integrity": "sha512-PCqQV3c4CoVm3kdPhyeZ07VmBRdH2EpMFA/pd9OASpOEC3aXNGoqPDAZ80D0cLpMBxnmk0+yNhGsEx31hq7Gtw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss-selector-parser": "^7.0.0" + } + }, + "node_modules/@csstools/postcss-cascade-layers/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@csstools/postcss-color-function": { + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/@csstools/postcss-color-function/-/postcss-color-function-4.0.10.tgz", + "integrity": "sha512-4dY0NBu7NVIpzxZRgh/Q/0GPSz/jLSw0i/u3LTUor0BkQcz/fNhN10mSWBDsL0p9nDb0Ky1PD6/dcGbhACuFTQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.0.10", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.1.0", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-color-mix-function": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@csstools/postcss-color-mix-function/-/postcss-color-mix-function-3.0.10.tgz", + "integrity": "sha512-P0lIbQW9I4ShE7uBgZRib/lMTf9XMjJkFl/d6w4EMNHu2qvQ6zljJGEcBkw/NsBtq/6q3WrmgxSS8kHtPMkK4Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.0.10", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.1.0", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-color-mix-variadic-function-arguments": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-color-mix-variadic-function-arguments/-/postcss-color-mix-variadic-function-arguments-1.0.0.tgz", + "integrity": "sha512-Z5WhouTyD74dPFPrVE7KydgNS9VvnjB8qcdes9ARpCOItb4jTnm7cHp4FhxCRUoyhabD0WVv43wbkJ4p8hLAlQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.0.10", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.1.0", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-content-alt-text": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@csstools/postcss-content-alt-text/-/postcss-content-alt-text-2.0.6.tgz", + "integrity": "sha512-eRjLbOjblXq+byyaedQRSrAejKGNAFued+LcbzT+LCL78fabxHkxYjBbxkroONxHHYu2qxhFK2dBStTLPG3jpQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.1.0", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-exponential-functions": { + "version": "2.0.9", + "resolved": "https://registry.npmjs.org/@csstools/postcss-exponential-functions/-/postcss-exponential-functions-2.0.9.tgz", + "integrity": "sha512-abg2W/PI3HXwS/CZshSa79kNWNZHdJPMBXeZNyPQFbbj8sKO3jXxOt/wF7juJVjyDTc6JrvaUZYFcSBZBhaxjw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-calc": "^2.1.4", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-font-format-keywords": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-font-format-keywords/-/postcss-font-format-keywords-4.0.0.tgz", + "integrity": "sha512-usBzw9aCRDvchpok6C+4TXC57btc4bJtmKQWOHQxOVKen1ZfVqBUuCZ/wuqdX5GHsD0NRSr9XTP+5ID1ZZQBXw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-gamut-mapping": { + "version": "2.0.10", + "resolved": "https://registry.npmjs.org/@csstools/postcss-gamut-mapping/-/postcss-gamut-mapping-2.0.10.tgz", + "integrity": "sha512-QDGqhJlvFnDlaPAfCYPsnwVA6ze+8hhrwevYWlnUeSjkkZfBpcCO42SaUD8jiLlq7niouyLgvup5lh+f1qessg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.0.10", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-gradients-interpolation-method": { + "version": "5.0.10", + "resolved": "https://registry.npmjs.org/@csstools/postcss-gradients-interpolation-method/-/postcss-gradients-interpolation-method-5.0.10.tgz", + "integrity": "sha512-HHPauB2k7Oits02tKFUeVFEU2ox/H3OQVrP3fSOKDxvloOikSal+3dzlyTZmYsb9FlY9p5EUpBtz0//XBmy+aw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.0.10", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.1.0", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-hwb-function": { + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/@csstools/postcss-hwb-function/-/postcss-hwb-function-4.0.10.tgz", + "integrity": "sha512-nOKKfp14SWcdEQ++S9/4TgRKchooLZL0TUFdun3nI4KPwCjETmhjta1QT4ICQcGVWQTvrsgMM/aLB5We+kMHhQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.0.10", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.1.0", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-ic-unit": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@csstools/postcss-ic-unit/-/postcss-ic-unit-4.0.2.tgz", + "integrity": "sha512-lrK2jjyZwh7DbxaNnIUjkeDmU8Y6KyzRBk91ZkI5h8nb1ykEfZrtIVArdIjX4DHMIBGpdHrgP0n4qXDr7OHaKA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/postcss-progressive-custom-properties": "^4.1.0", + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-initial": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@csstools/postcss-initial/-/postcss-initial-2.0.1.tgz", + "integrity": "sha512-L1wLVMSAZ4wovznquK0xmC7QSctzO4D0Is590bxpGqhqjboLXYA16dWZpfwImkdOgACdQ9PqXsuRroW6qPlEsg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-is-pseudo-class": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/@csstools/postcss-is-pseudo-class/-/postcss-is-pseudo-class-5.0.3.tgz", + "integrity": "sha512-jS/TY4SpG4gszAtIg7Qnf3AS2pjcUM5SzxpApOrlndMeGhIbaTzWBzzP/IApXoNWEW7OhcjkRT48jnAUIFXhAQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/selector-specificity": "^5.0.0", + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-is-pseudo-class/node_modules/@csstools/selector-specificity": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-5.0.0.tgz", + "integrity": "sha512-PCqQV3c4CoVm3kdPhyeZ07VmBRdH2EpMFA/pd9OASpOEC3aXNGoqPDAZ80D0cLpMBxnmk0+yNhGsEx31hq7Gtw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss-selector-parser": "^7.0.0" + } + }, + "node_modules/@csstools/postcss-is-pseudo-class/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@csstools/postcss-light-dark-function": { + "version": "2.0.9", + "resolved": "https://registry.npmjs.org/@csstools/postcss-light-dark-function/-/postcss-light-dark-function-2.0.9.tgz", + "integrity": "sha512-1tCZH5bla0EAkFAI2r0H33CDnIBeLUaJh1p+hvvsylJ4svsv2wOmJjJn+OXwUZLXef37GYbRIVKX+X+g6m+3CQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.1.0", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-logical-float-and-clear": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-float-and-clear/-/postcss-logical-float-and-clear-3.0.0.tgz", + "integrity": "sha512-SEmaHMszwakI2rqKRJgE+8rpotFfne1ZS6bZqBoQIicFyV+xT1UF42eORPxJkVJVrH9C0ctUgwMSn3BLOIZldQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-logical-overflow": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-overflow/-/postcss-logical-overflow-2.0.0.tgz", + "integrity": "sha512-spzR1MInxPuXKEX2csMamshR4LRaSZ3UXVaRGjeQxl70ySxOhMpP2252RAFsg8QyyBXBzuVOOdx1+bVO5bPIzA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-logical-overscroll-behavior": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-overscroll-behavior/-/postcss-logical-overscroll-behavior-2.0.0.tgz", + "integrity": "sha512-e/webMjoGOSYfqLunyzByZj5KKe5oyVg/YSbie99VEaSDE2kimFm0q1f6t/6Jo+VVCQ/jbe2Xy+uX+C4xzWs4w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-logical-resize": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-resize/-/postcss-logical-resize-3.0.0.tgz", + "integrity": "sha512-DFbHQOFW/+I+MY4Ycd/QN6Dg4Hcbb50elIJCfnwkRTCX05G11SwViI5BbBlg9iHRl4ytB7pmY5ieAFk3ws7yyg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-logical-viewport-units": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-viewport-units/-/postcss-logical-viewport-units-3.0.4.tgz", + "integrity": "sha512-q+eHV1haXA4w9xBwZLKjVKAWn3W2CMqmpNpZUk5kRprvSiBEGMgrNH3/sJZ8UA3JgyHaOt3jwT9uFa4wLX4EqQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-media-minmax": { + "version": "2.0.9", + "resolved": "https://registry.npmjs.org/@csstools/postcss-media-minmax/-/postcss-media-minmax-2.0.9.tgz", + "integrity": "sha512-af9Qw3uS3JhYLnCbqtZ9crTvvkR+0Se+bBqSr7ykAnl9yKhk6895z9rf+2F4dClIDJWxgn0iZZ1PSdkhrbs2ig==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/css-calc": "^2.1.4", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/media-query-list-parser": "^4.0.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-media-queries-aspect-ratio-number-values": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@csstools/postcss-media-queries-aspect-ratio-number-values/-/postcss-media-queries-aspect-ratio-number-values-3.0.5.tgz", + "integrity": "sha512-zhAe31xaaXOY2Px8IYfoVTB3wglbJUVigGphFLj6exb7cjZRH9A6adyE22XfFK3P2PzwRk0VDeTJmaxpluyrDg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/media-query-list-parser": "^4.0.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-nested-calc": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-nested-calc/-/postcss-nested-calc-4.0.0.tgz", + "integrity": "sha512-jMYDdqrQQxE7k9+KjstC3NbsmC063n1FTPLCgCRS2/qHUbHM0mNy9pIn4QIiQGs9I/Bg98vMqw7mJXBxa0N88A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-normalize-display-values": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.0.tgz", + "integrity": "sha512-HlEoG0IDRoHXzXnkV4in47dzsxdsjdz6+j7MLjaACABX2NfvjFS6XVAnpaDyGesz9gK2SC7MbNwdCHusObKJ9Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-oklab-function": { + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/@csstools/postcss-oklab-function/-/postcss-oklab-function-4.0.10.tgz", + "integrity": "sha512-ZzZUTDd0fgNdhv8UUjGCtObPD8LYxMH+MJsW9xlZaWTV8Ppr4PtxlHYNMmF4vVWGl0T6f8tyWAKjoI6vePSgAg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.0.10", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.1.0", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-progressive-custom-properties": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-progressive-custom-properties/-/postcss-progressive-custom-properties-4.1.0.tgz", + "integrity": "sha512-YrkI9dx8U4R8Sz2EJaoeD9fI7s7kmeEBfmO+UURNeL6lQI7VxF6sBE+rSqdCBn4onwqmxFdBU3lTwyYb/lCmxA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-random-function": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@csstools/postcss-random-function/-/postcss-random-function-2.0.1.tgz", + "integrity": "sha512-q+FQaNiRBhnoSNo+GzqGOIBKoHQ43lYz0ICrV+UudfWnEF6ksS6DsBIJSISKQT2Bvu3g4k6r7t0zYrk5pDlo8w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-calc": "^2.1.4", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-relative-color-syntax": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@csstools/postcss-relative-color-syntax/-/postcss-relative-color-syntax-3.0.10.tgz", + "integrity": "sha512-8+0kQbQGg9yYG8hv0dtEpOMLwB9M+P7PhacgIzVzJpixxV4Eq9AUQtQw8adMmAJU1RBBmIlpmtmm3XTRd/T00g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.0.10", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.1.0", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-scope-pseudo-class": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@csstools/postcss-scope-pseudo-class/-/postcss-scope-pseudo-class-4.0.1.tgz", + "integrity": "sha512-IMi9FwtH6LMNuLea1bjVMQAsUhFxJnyLSgOp/cpv5hrzWmrUYU5fm0EguNDIIOHUqzXode8F/1qkC/tEo/qN8Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-scope-pseudo-class/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@csstools/postcss-sign-functions": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@csstools/postcss-sign-functions/-/postcss-sign-functions-1.1.4.tgz", + "integrity": "sha512-P97h1XqRPcfcJndFdG95Gv/6ZzxUBBISem0IDqPZ7WMvc/wlO+yU0c5D/OCpZ5TJoTt63Ok3knGk64N+o6L2Pg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-calc": "^2.1.4", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-stepped-value-functions": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@csstools/postcss-stepped-value-functions/-/postcss-stepped-value-functions-4.0.9.tgz", + "integrity": "sha512-h9btycWrsex4dNLeQfyU3y3w40LMQooJWFMm/SK9lrKguHDcFl4VMkncKKoXi2z5rM9YGWbUQABI8BT2UydIcA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-calc": "^2.1.4", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-text-decoration-shorthand": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@csstools/postcss-text-decoration-shorthand/-/postcss-text-decoration-shorthand-4.0.2.tgz", + "integrity": "sha512-8XvCRrFNseBSAGxeaVTaNijAu+FzUvjwFXtcrynmazGb/9WUdsPCpBX+mHEHShVRq47Gy4peYAoxYs8ltUnmzA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/color-helpers": "^5.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-trigonometric-functions": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@csstools/postcss-trigonometric-functions/-/postcss-trigonometric-functions-4.0.9.tgz", + "integrity": "sha512-Hnh5zJUdpNrJqK9v1/E3BbrQhaDTj5YiX7P61TOvUhoDHnUmsNNxcDAgkQ32RrcWx9GVUvfUNPcUkn8R3vIX6A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-calc": "^2.1.4", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-unset-value": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-unset-value/-/postcss-unset-value-4.0.0.tgz", + "integrity": "sha512-cBz3tOCI5Fw6NIFEwU3RiwK6mn3nKegjpJuzCndoGq3BZPkUjnsq7uQmIeMNeMbMk7YD2MfKcgCpZwX5jyXqCA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/utilities": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@csstools/utilities/-/utilities-2.0.0.tgz", + "integrity": "sha512-5VdOr0Z71u+Yp3ozOx8T11N703wIFGVRgOWbOZMKgglPJsWA54MRIoMNVMa7shUToIhx5J8vX4sOZgD2XiihiQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@discoveryjs/json-ext": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", + "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@docsearch/css": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.9.0.tgz", + "integrity": "sha512-cQbnVbq0rrBwNAKegIac/t6a8nWoUAn8frnkLFW6YARaRmAQr5/Eoe6Ln2fqkUCZ40KpdrKbpSAmgrkviOxuWA==", + "license": "MIT" + }, + "node_modules/@docsearch/react": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.9.0.tgz", + "integrity": "sha512-mb5FOZYZIkRQ6s/NWnM98k879vu5pscWqTLubLFBO87igYYT4VzVazh4h5o/zCvTIZgEt3PvsCOMOswOUo9yHQ==", + "license": "MIT", + "dependencies": { + "@algolia/autocomplete-core": "1.17.9", + "@algolia/autocomplete-preset-algolia": "1.17.9", + "@docsearch/css": "3.9.0", + "algoliasearch": "^5.14.2" + }, + "peerDependencies": { + "@types/react": ">= 16.8.0 < 20.0.0", + "react": ">= 16.8.0 < 20.0.0", + "react-dom": ">= 16.8.0 < 20.0.0", + "search-insights": ">= 1 < 3" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + }, + "search-insights": { + "optional": true + } + } + }, + "node_modules/@docusaurus/babel": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/babel/-/babel-3.8.1.tgz", + "integrity": "sha512-3brkJrml8vUbn9aeoZUlJfsI/GqyFcDgQJwQkmBtclJgWDEQBKKeagZfOgx0WfUQhagL1sQLNW0iBdxnI863Uw==", + "license": "MIT", + "dependencies": { + "@babel/core": "^7.25.9", + "@babel/generator": "^7.25.9", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-transform-runtime": "^7.25.9", + "@babel/preset-env": "^7.25.9", + "@babel/preset-react": "^7.25.9", + "@babel/preset-typescript": "^7.25.9", + "@babel/runtime": "^7.25.9", + "@babel/runtime-corejs3": "^7.25.9", + "@babel/traverse": "^7.25.9", + "@docusaurus/logger": "3.8.1", + "@docusaurus/utils": "3.8.1", + "babel-plugin-dynamic-import-node": "^2.3.3", + "fs-extra": "^11.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/bundler": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/bundler/-/bundler-3.8.1.tgz", + "integrity": "sha512-/z4V0FRoQ0GuSLToNjOSGsk6m2lQUG4FRn8goOVoZSRsTrU8YR2aJacX5K3RG18EaX9b+52pN4m1sL3MQZVsQA==", + "license": "MIT", + "dependencies": { + "@babel/core": "^7.25.9", + "@docusaurus/babel": "3.8.1", + "@docusaurus/cssnano-preset": "3.8.1", + "@docusaurus/logger": "3.8.1", + "@docusaurus/types": "3.8.1", + "@docusaurus/utils": "3.8.1", + "babel-loader": "^9.2.1", + "clean-css": "^5.3.3", + "copy-webpack-plugin": "^11.0.0", + "css-loader": "^6.11.0", + "css-minimizer-webpack-plugin": "^5.0.1", + "cssnano": "^6.1.2", + "file-loader": "^6.2.0", + "html-minifier-terser": "^7.2.0", + "mini-css-extract-plugin": "^2.9.2", + "null-loader": "^4.0.1", + "postcss": "^8.5.4", + "postcss-loader": "^7.3.4", + "postcss-preset-env": "^10.2.1", + "terser-webpack-plugin": "^5.3.9", + "tslib": "^2.6.0", + "url-loader": "^4.1.1", + "webpack": "^5.95.0", + "webpackbar": "^6.0.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/faster": "*" + }, + "peerDependenciesMeta": { + "@docusaurus/faster": { + "optional": true + } + } + }, + "node_modules/@docusaurus/core": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.8.1.tgz", + "integrity": "sha512-ENB01IyQSqI2FLtOzqSI3qxG2B/jP4gQPahl2C3XReiLebcVh5B5cB9KYFvdoOqOWPyr5gXK4sjgTKv7peXCrA==", + "license": "MIT", + "dependencies": { + "@docusaurus/babel": "3.8.1", + "@docusaurus/bundler": "3.8.1", + "@docusaurus/logger": "3.8.1", + "@docusaurus/mdx-loader": "3.8.1", + "@docusaurus/utils": "3.8.1", + "@docusaurus/utils-common": "3.8.1", + "@docusaurus/utils-validation": "3.8.1", + "boxen": "^6.2.1", + "chalk": "^4.1.2", + "chokidar": "^3.5.3", + "cli-table3": "^0.6.3", + "combine-promises": "^1.1.0", + "commander": "^5.1.0", + "core-js": "^3.31.1", + "detect-port": "^1.5.1", + "escape-html": "^1.0.3", + "eta": "^2.2.0", + "eval": "^0.1.8", + "execa": "5.1.1", + "fs-extra": "^11.1.1", + "html-tags": "^3.3.1", + "html-webpack-plugin": "^5.6.0", + "leven": "^3.1.0", + "lodash": "^4.17.21", + "open": "^8.4.0", + "p-map": "^4.0.0", + "prompts": "^2.4.2", + "react-helmet-async": "npm:@slorber/react-helmet-async@1.3.0", + "react-loadable": "npm:@docusaurus/react-loadable@6.0.0", + "react-loadable-ssr-addon-v5-slorber": "^1.0.1", + "react-router": "^5.3.4", + "react-router-config": "^5.1.1", + "react-router-dom": "^5.3.4", + "semver": "^7.5.4", + "serve-handler": "^6.1.6", + "tinypool": "^1.0.2", + "tslib": "^2.6.0", + "update-notifier": "^6.0.2", + "webpack": "^5.95.0", + "webpack-bundle-analyzer": "^4.10.2", + "webpack-dev-server": "^4.15.2", + "webpack-merge": "^6.0.1" + }, + "bin": { + "docusaurus": "bin/docusaurus.mjs" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@mdx-js/react": "^3.0.0", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/cssnano-preset": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.8.1.tgz", + "integrity": "sha512-G7WyR2N6SpyUotqhGznERBK+x84uyhfMQM2MmDLs88bw4Flom6TY46HzkRkSEzaP9j80MbTN8naiL1fR17WQug==", + "license": "MIT", + "dependencies": { + "cssnano-preset-advanced": "^6.1.2", + "postcss": "^8.5.4", + "postcss-sort-media-queries": "^5.2.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/logger": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.8.1.tgz", + "integrity": "sha512-2wjeGDhKcExEmjX8k1N/MRDiPKXGF2Pg+df/bDDPnnJWHXnVEZxXj80d6jcxp1Gpnksl0hF8t/ZQw9elqj2+ww==", + "license": "MIT", + "dependencies": { + "chalk": "^4.1.2", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/mdx-loader": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.8.1.tgz", + "integrity": "sha512-DZRhagSFRcEq1cUtBMo4TKxSNo/W6/s44yhr8X+eoXqCLycFQUylebOMPseHi5tc4fkGJqwqpWJLz6JStU9L4w==", + "license": "MIT", + "dependencies": { + "@docusaurus/logger": "3.8.1", + "@docusaurus/utils": "3.8.1", + "@docusaurus/utils-validation": "3.8.1", + "@mdx-js/mdx": "^3.0.0", + "@slorber/remark-comment": "^1.0.0", + "escape-html": "^1.0.3", + "estree-util-value-to-estree": "^3.0.1", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "image-size": "^2.0.2", + "mdast-util-mdx": "^3.0.0", + "mdast-util-to-string": "^4.0.0", + "rehype-raw": "^7.0.0", + "remark-directive": "^3.0.0", + "remark-emoji": "^4.0.0", + "remark-frontmatter": "^5.0.0", + "remark-gfm": "^4.0.0", + "stringify-object": "^3.3.0", + "tslib": "^2.6.0", + "unified": "^11.0.3", + "unist-util-visit": "^5.0.0", + "url-loader": "^4.1.1", + "vfile": "^6.0.1", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/module-type-aliases": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.8.1.tgz", + "integrity": "sha512-6xhvAJiXzsaq3JdosS7wbRt/PwEPWHr9eM4YNYqVlbgG1hSK3uQDXTVvQktasp3VO6BmfYWPozueLWuj4gB+vg==", + "license": "MIT", + "dependencies": { + "@docusaurus/types": "3.8.1", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "@types/react-router-dom": "*", + "react-helmet-async": "npm:@slorber/react-helmet-async@1.3.0", + "react-loadable": "npm:@docusaurus/react-loadable@6.0.0" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/@docusaurus/plugin-client-redirects": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-client-redirects/-/plugin-client-redirects-3.8.1.tgz", + "integrity": "sha512-F+86R7PBn6VNgy/Ux8w3ZRypJGJEzksbejQKlbTC8u6uhBUhfdXWkDp6qdOisIoW0buY5nLqucvZt1zNJzhJhA==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.8.1", + "@docusaurus/logger": "3.8.1", + "@docusaurus/utils": "3.8.1", + "@docusaurus/utils-common": "3.8.1", + "@docusaurus/utils-validation": "3.8.1", + "eta": "^2.2.0", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-blog": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.8.1.tgz", + "integrity": "sha512-vNTpMmlvNP9n3hGEcgPaXyvTljanAKIUkuG9URQ1DeuDup0OR7Ltvoc8yrmH+iMZJbcQGhUJF+WjHLwuk8HSdw==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.8.1", + "@docusaurus/logger": "3.8.1", + "@docusaurus/mdx-loader": "3.8.1", + "@docusaurus/theme-common": "3.8.1", + "@docusaurus/types": "3.8.1", + "@docusaurus/utils": "3.8.1", + "@docusaurus/utils-common": "3.8.1", + "@docusaurus/utils-validation": "3.8.1", + "cheerio": "1.0.0-rc.12", + "feed": "^4.2.2", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "schema-dts": "^1.1.2", + "srcset": "^4.0.0", + "tslib": "^2.6.0", + "unist-util-visit": "^5.0.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/plugin-content-docs": "*", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-docs": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.8.1.tgz", + "integrity": "sha512-oByRkSZzeGNQByCMaX+kif5Nl2vmtj2IHQI2fWjCfCootsdKZDPFLonhIp5s3IGJO7PLUfe0POyw0Xh/RrGXJA==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.8.1", + "@docusaurus/logger": "3.8.1", + "@docusaurus/mdx-loader": "3.8.1", + "@docusaurus/module-type-aliases": "3.8.1", + "@docusaurus/theme-common": "3.8.1", + "@docusaurus/types": "3.8.1", + "@docusaurus/utils": "3.8.1", + "@docusaurus/utils-common": "3.8.1", + "@docusaurus/utils-validation": "3.8.1", + "@types/react-router-config": "^5.0.7", + "combine-promises": "^1.1.0", + "fs-extra": "^11.1.1", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "schema-dts": "^1.1.2", + "tslib": "^2.6.0", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-content-pages": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.8.1.tgz", + "integrity": "sha512-a+V6MS2cIu37E/m7nDJn3dcxpvXb6TvgdNI22vJX8iUTp8eoMoPa0VArEbWvCxMY/xdC26WzNv4wZ6y0iIni/w==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.8.1", + "@docusaurus/mdx-loader": "3.8.1", + "@docusaurus/types": "3.8.1", + "@docusaurus/utils": "3.8.1", + "@docusaurus/utils-validation": "3.8.1", + "fs-extra": "^11.1.1", + "tslib": "^2.6.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-css-cascade-layers": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-css-cascade-layers/-/plugin-css-cascade-layers-3.8.1.tgz", + "integrity": "sha512-VQ47xRxfNKjHS5ItzaVXpxeTm7/wJLFMOPo1BkmoMG4Cuz4nuI+Hs62+RMk1OqVog68Swz66xVPK8g9XTrBKRw==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.8.1", + "@docusaurus/types": "3.8.1", + "@docusaurus/utils": "3.8.1", + "@docusaurus/utils-validation": "3.8.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/plugin-debug": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.8.1.tgz", + "integrity": "sha512-nT3lN7TV5bi5hKMB7FK8gCffFTBSsBsAfV84/v293qAmnHOyg1nr9okEw8AiwcO3bl9vije5nsUvP0aRl2lpaw==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.8.1", + "@docusaurus/types": "3.8.1", + "@docusaurus/utils": "3.8.1", + "fs-extra": "^11.1.1", + "react-json-view-lite": "^2.3.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-analytics": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.8.1.tgz", + "integrity": "sha512-Hrb/PurOJsmwHAsfMDH6oVpahkEGsx7F8CWMjyP/dw1qjqmdS9rcV1nYCGlM8nOtD3Wk/eaThzUB5TSZsGz+7Q==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.8.1", + "@docusaurus/types": "3.8.1", + "@docusaurus/utils-validation": "3.8.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-gtag": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.8.1.tgz", + "integrity": "sha512-tKE8j1cEZCh8KZa4aa80zpSTxsC2/ZYqjx6AAfd8uA8VHZVw79+7OTEP2PoWi0uL5/1Is0LF5Vwxd+1fz5HlKg==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.8.1", + "@docusaurus/types": "3.8.1", + "@docusaurus/utils-validation": "3.8.1", + "@types/gtag.js": "^0.0.12", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-google-tag-manager": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.8.1.tgz", + "integrity": "sha512-iqe3XKITBquZq+6UAXdb1vI0fPY5iIOitVjPQ581R1ZKpHr0qe+V6gVOrrcOHixPDD/BUKdYwkxFjpNiEN+vBw==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.8.1", + "@docusaurus/types": "3.8.1", + "@docusaurus/utils-validation": "3.8.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-sitemap": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.8.1.tgz", + "integrity": "sha512-+9YV/7VLbGTq8qNkjiugIelmfUEVkTyLe6X8bWq7K5qPvGXAjno27QAfFq63mYfFFbJc7z+pudL63acprbqGzw==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.8.1", + "@docusaurus/logger": "3.8.1", + "@docusaurus/types": "3.8.1", + "@docusaurus/utils": "3.8.1", + "@docusaurus/utils-common": "3.8.1", + "@docusaurus/utils-validation": "3.8.1", + "fs-extra": "^11.1.1", + "sitemap": "^7.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/plugin-svgr": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-svgr/-/plugin-svgr-3.8.1.tgz", + "integrity": "sha512-rW0LWMDsdlsgowVwqiMb/7tANDodpy1wWPwCcamvhY7OECReN3feoFwLjd/U4tKjNY3encj0AJSTxJA+Fpe+Gw==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.8.1", + "@docusaurus/types": "3.8.1", + "@docusaurus/utils": "3.8.1", + "@docusaurus/utils-validation": "3.8.1", + "@svgr/core": "8.1.0", + "@svgr/webpack": "^8.1.0", + "tslib": "^2.6.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/preset-classic": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.8.1.tgz", + "integrity": "sha512-yJSjYNHXD8POMGc2mKQuj3ApPrN+eG0rO1UPgSx7jySpYU+n4WjBikbrA2ue5ad9A7aouEtMWUoiSRXTH/g7KQ==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.8.1", + "@docusaurus/plugin-content-blog": "3.8.1", + "@docusaurus/plugin-content-docs": "3.8.1", + "@docusaurus/plugin-content-pages": "3.8.1", + "@docusaurus/plugin-css-cascade-layers": "3.8.1", + "@docusaurus/plugin-debug": "3.8.1", + "@docusaurus/plugin-google-analytics": "3.8.1", + "@docusaurus/plugin-google-gtag": "3.8.1", + "@docusaurus/plugin-google-tag-manager": "3.8.1", + "@docusaurus/plugin-sitemap": "3.8.1", + "@docusaurus/plugin-svgr": "3.8.1", + "@docusaurus/theme-classic": "3.8.1", + "@docusaurus/theme-common": "3.8.1", + "@docusaurus/theme-search-algolia": "3.8.1", + "@docusaurus/types": "3.8.1" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/remark-plugin-npm2yarn": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/remark-plugin-npm2yarn/-/remark-plugin-npm2yarn-3.8.1.tgz", + "integrity": "sha512-lrws3n7jyJrDKqpdFiEX0ZIwVOgGj9z+ZXf0k/rXrftcNh06Y+cEMnNVoNpO3EJKiLDXmTWoxGf/I1qWb/WV6g==", + "license": "MIT", + "dependencies": { + "mdast-util-mdx": "^3.0.0", + "npm-to-yarn": "^3.0.0", + "tslib": "^2.6.0", + "unified": "^11.0.3", + "unist-util-visit": "^5.0.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/theme-classic": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.8.1.tgz", + "integrity": "sha512-bqDUCNqXeYypMCsE1VcTXSI1QuO4KXfx8Cvl6rYfY0bhhqN6d2WZlRkyLg/p6pm+DzvanqHOyYlqdPyP0iz+iw==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.8.1", + "@docusaurus/logger": "3.8.1", + "@docusaurus/mdx-loader": "3.8.1", + "@docusaurus/module-type-aliases": "3.8.1", + "@docusaurus/plugin-content-blog": "3.8.1", + "@docusaurus/plugin-content-docs": "3.8.1", + "@docusaurus/plugin-content-pages": "3.8.1", + "@docusaurus/theme-common": "3.8.1", + "@docusaurus/theme-translations": "3.8.1", + "@docusaurus/types": "3.8.1", + "@docusaurus/utils": "3.8.1", + "@docusaurus/utils-common": "3.8.1", + "@docusaurus/utils-validation": "3.8.1", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "copy-text-to-clipboard": "^3.2.0", + "infima": "0.2.0-alpha.45", + "lodash": "^4.17.21", + "nprogress": "^0.2.0", + "postcss": "^8.5.4", + "prism-react-renderer": "^2.3.0", + "prismjs": "^1.29.0", + "react-router-dom": "^5.3.4", + "rtlcss": "^4.1.0", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/theme-common": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.8.1.tgz", + "integrity": "sha512-UswMOyTnPEVRvN5Qzbo+l8k4xrd5fTFu2VPPfD6FcW/6qUtVLmJTQCktbAL3KJ0BVXGm5aJXz/ZrzqFuZERGPw==", + "license": "MIT", + "dependencies": { + "@docusaurus/mdx-loader": "3.8.1", + "@docusaurus/module-type-aliases": "3.8.1", + "@docusaurus/utils": "3.8.1", + "@docusaurus/utils-common": "3.8.1", + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router-config": "*", + "clsx": "^2.0.0", + "parse-numeric-range": "^1.3.0", + "prism-react-renderer": "^2.3.0", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "@docusaurus/plugin-content-docs": "*", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/theme-mermaid": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-mermaid/-/theme-mermaid-3.8.1.tgz", + "integrity": "sha512-IWYqjyTPjkNnHsFFu9+4YkeXS7PD1xI3Bn2shOhBq+f95mgDfWInkpfBN4aYvx4fTT67Am6cPtohRdwh4Tidtg==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.8.1", + "@docusaurus/module-type-aliases": "3.8.1", + "@docusaurus/theme-common": "3.8.1", + "@docusaurus/types": "3.8.1", + "@docusaurus/utils-validation": "3.8.1", + "mermaid": ">=11.6.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/theme-search-algolia": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.8.1.tgz", + "integrity": "sha512-NBFH5rZVQRAQM087aYSRKQ9yGEK9eHd+xOxQjqNpxMiV85OhJDD4ZGz6YJIod26Fbooy54UWVdzNU0TFeUUUzQ==", + "license": "MIT", + "dependencies": { + "@docsearch/react": "^3.9.0", + "@docusaurus/core": "3.8.1", + "@docusaurus/logger": "3.8.1", + "@docusaurus/plugin-content-docs": "3.8.1", + "@docusaurus/theme-common": "3.8.1", + "@docusaurus/theme-translations": "3.8.1", + "@docusaurus/utils": "3.8.1", + "@docusaurus/utils-validation": "3.8.1", + "algoliasearch": "^5.17.1", + "algoliasearch-helper": "^3.22.6", + "clsx": "^2.0.0", + "eta": "^2.2.0", + "fs-extra": "^11.1.1", + "lodash": "^4.17.21", + "tslib": "^2.6.0", + "utility-types": "^3.10.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/theme-translations": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.8.1.tgz", + "integrity": "sha512-OTp6eebuMcf2rJt4bqnvuwmm3NVXfzfYejL+u/Y1qwKhZPrjPoKWfk1CbOP5xH5ZOPkiAsx4dHdQBRJszK3z2g==", + "license": "MIT", + "dependencies": { + "fs-extra": "^11.1.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/tsconfig": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/tsconfig/-/tsconfig-3.8.1.tgz", + "integrity": "sha512-XBWCcqhRHhkhfolnSolNL+N7gj3HVE3CoZVqnVjfsMzCoOsuQw2iCLxVVHtO+rePUUfouVZHURDgmqIySsF66A==", + "dev": true, + "license": "MIT" + }, + "node_modules/@docusaurus/types": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.8.1.tgz", + "integrity": "sha512-ZPdW5AB+pBjiVrcLuw3dOS6BFlrG0XkS2lDGsj8TizcnREQg3J8cjsgfDviszOk4CweNfwo1AEELJkYaMUuOPg==", + "license": "MIT", + "dependencies": { + "@mdx-js/mdx": "^3.0.0", + "@types/history": "^4.7.11", + "@types/react": "*", + "commander": "^5.1.0", + "joi": "^17.9.2", + "react-helmet-async": "npm:@slorber/react-helmet-async@1.3.0", + "utility-types": "^3.10.0", + "webpack": "^5.95.0", + "webpack-merge": "^5.9.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/@docusaurus/types/node_modules/webpack-merge": { + "version": "5.10.0", + "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.10.0.tgz", + "integrity": "sha512-+4zXKdx7UnO+1jaN4l2lHVD+mFvnlZQP/6ljaJVb4SZiwIKeUnrT5l0gkT8z+n4hKpC+jpOv6O9R+gLtag7pSA==", + "license": "MIT", + "dependencies": { + "clone-deep": "^4.0.1", + "flat": "^5.0.2", + "wildcard": "^2.0.0" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@docusaurus/utils": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.8.1.tgz", + "integrity": "sha512-P1ml0nvOmEFdmu0smSXOqTS1sxU5tqvnc0dA4MTKV39kye+bhQnjkIKEE18fNOvxjyB86k8esoCIFM3x4RykOQ==", + "license": "MIT", + "dependencies": { + "@docusaurus/logger": "3.8.1", + "@docusaurus/types": "3.8.1", + "@docusaurus/utils-common": "3.8.1", + "escape-string-regexp": "^4.0.0", + "execa": "5.1.1", + "file-loader": "^6.2.0", + "fs-extra": "^11.1.1", + "github-slugger": "^1.5.0", + "globby": "^11.1.0", + "gray-matter": "^4.0.3", + "jiti": "^1.20.0", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "micromatch": "^4.0.5", + "p-queue": "^6.6.2", + "prompts": "^2.4.2", + "resolve-pathname": "^3.0.0", + "tslib": "^2.6.0", + "url-loader": "^4.1.1", + "utility-types": "^3.10.0", + "webpack": "^5.88.1" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/utils-common": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.8.1.tgz", + "integrity": "sha512-zTZiDlvpvoJIrQEEd71c154DkcriBecm4z94OzEE9kz7ikS3J+iSlABhFXM45mZ0eN5pVqqr7cs60+ZlYLewtg==", + "license": "MIT", + "dependencies": { + "@docusaurus/types": "3.8.1", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@docusaurus/utils-validation": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.8.1.tgz", + "integrity": "sha512-gs5bXIccxzEbyVecvxg6upTwaUbfa0KMmTj7HhHzc016AGyxH2o73k1/aOD0IFrdCsfJNt37MqNI47s2MgRZMA==", + "license": "MIT", + "dependencies": { + "@docusaurus/logger": "3.8.1", + "@docusaurus/utils": "3.8.1", + "@docusaurus/utils-common": "3.8.1", + "fs-extra": "^11.2.0", + "joi": "^17.9.2", + "js-yaml": "^4.1.0", + "lodash": "^4.17.21", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + } + }, + "node_modules/@easyops-cn/autocomplete.js": { + "version": "0.38.1", + "resolved": "https://registry.npmjs.org/@easyops-cn/autocomplete.js/-/autocomplete.js-0.38.1.tgz", + "integrity": "sha512-drg76jS6syilOUmVNkyo1c7ZEBPcPuK+aJA7AksM5ZIIbV57DMHCywiCr+uHyv8BE5jUTU98j/H7gVrkHrWW3Q==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "immediate": "^3.2.3" + } + }, + "node_modules/@easyops-cn/docusaurus-search-local": { + "version": "0.52.1", + "resolved": "https://registry.npmjs.org/@easyops-cn/docusaurus-search-local/-/docusaurus-search-local-0.52.1.tgz", + "integrity": "sha512-pwfANjTLOQyAPc2Iz93WbG4OQM5C4COCWARbLAs79FIpIS38gHq3PrbDIX8f7oDhGQp1u6f8fr3K3u3+yZXZTA==", + "license": "MIT", + "dependencies": { + "@docusaurus/plugin-content-docs": "^2 || ^3", + "@docusaurus/theme-translations": "^2 || ^3", + "@docusaurus/utils": "^2 || ^3", + "@docusaurus/utils-common": "^2 || ^3", + "@docusaurus/utils-validation": "^2 || ^3", + "@easyops-cn/autocomplete.js": "^0.38.1", + "@node-rs/jieba": "^1.6.0", + "cheerio": "^1.0.0", + "clsx": "^2.1.1", + "comlink": "^4.4.2", + "debug": "^4.2.0", + "fs-extra": "^10.0.0", + "klaw-sync": "^6.0.0", + "lunr": "^2.3.9", + "lunr-languages": "^1.4.0", + "mark.js": "^8.11.1", + "tslib": "^2.4.0" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "@docusaurus/theme-common": "^2 || ^3", + "react": "^16.14.0 || ^17 || ^18 || ^19", + "react-dom": "^16.14.0 || 17 || ^18 || ^19" + } + }, + "node_modules/@easyops-cn/docusaurus-search-local/node_modules/cheerio": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.1.0.tgz", + "integrity": "sha512-+0hMx9eYhJvWbgpKV9hN7jg0JcwydpopZE4hgi+KvQtByZXPp04NiCWU0LzcAbP63abZckIHkTQaXVF52mX3xQ==", + "license": "MIT", + "dependencies": { + "cheerio-select": "^2.1.0", + "dom-serializer": "^2.0.0", + "domhandler": "^5.0.3", + "domutils": "^3.2.2", + "encoding-sniffer": "^0.2.0", + "htmlparser2": "^10.0.0", + "parse5": "^7.3.0", + "parse5-htmlparser2-tree-adapter": "^7.1.0", + "parse5-parser-stream": "^7.1.2", + "undici": "^7.10.0", + "whatwg-mimetype": "^4.0.0" + }, + "engines": { + "node": ">=18.17" + }, + "funding": { + "url": "https://github.com/cheeriojs/cheerio?sponsor=1" + } + }, + "node_modules/@easyops-cn/docusaurus-search-local/node_modules/entities": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/@easyops-cn/docusaurus-search-local/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@easyops-cn/docusaurus-search-local/node_modules/htmlparser2": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-10.0.0.tgz", + "integrity": "sha512-TwAZM+zE5Tq3lrEHvOlvwgj1XLWQCtaaibSN11Q+gGBAS7Y1uZSWwXXRe4iF6OXnaq1riyQAPFOBtYc77Mxq0g==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "license": "MIT", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.2.1", + "entities": "^6.0.0" + } + }, + "node_modules/@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@hapi/topo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", + "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", + "license": "BSD-3-Clause", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@iconify/types": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@iconify/types/-/types-2.0.0.tgz", + "integrity": "sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg==", + "license": "MIT" + }, + "node_modules/@iconify/utils": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@iconify/utils/-/utils-2.3.0.tgz", + "integrity": "sha512-GmQ78prtwYW6EtzXRU1rY+KwOKfz32PD7iJh6Iyqw68GiKuoZ2A6pRtzWONz5VQJbp50mEjXh/7NkumtrAgRKA==", + "license": "MIT", + "dependencies": { + "@antfu/install-pkg": "^1.0.0", + "@antfu/utils": "^8.1.0", + "@iconify/types": "^2.0.0", + "debug": "^4.4.0", + "globals": "^15.14.0", + "kolorist": "^1.8.0", + "local-pkg": "^1.0.0", + "mlly": "^1.7.4" + } + }, + "node_modules/@iconify/utils/node_modules/globals": { + "version": "15.15.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-15.15.0.tgz", + "integrity": "sha512-7ACyT3wmyp3I61S4fG682L0VA2RGD9otkqGJIwNUMF1SWUombIIk+af1unuDYgMm082aHYwD+mzJvv9Iu8dsgg==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.11", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.11.tgz", + "integrity": "sha512-C512c1ytBTio4MrpWKlJpyFHT6+qfFL8SZ58zBzJ1OOzUEjHeF1BtjY2fH7n4x/g2OV/KiiMLAivOp1DXmiMMw==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.9.tgz", + "integrity": "sha512-amBU75CKOOkcQLfyM6J+DnWwz41yTsWI7o8MQ003LwUIWb4NYX/evAblTx1oBBYJySqL/zHPxHXDw5ewpQaUFw==", + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.3.tgz", + "integrity": "sha512-AiR5uKpFxP3PjO4R19kQGIMwxyRyPuXmKEEy301V1C0+1rVjS94EZQXf1QKZYN8Q0YM+estSPhmx5JwNftv6nw==", + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.28", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.28.tgz", + "integrity": "sha512-KNNHHwW3EIp4EDYOvYFGyIFfx36R2dNJYH4knnZlF8T5jdbD5Wx8xmSaQ2gP9URkJ04LGEtlcCtwArKcmFcwKw==", + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@leichtgewicht/ip-codec": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz", + "integrity": "sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==", + "license": "MIT" + }, + "node_modules/@mdx-js/mdx": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-3.1.0.tgz", + "integrity": "sha512-/QxEhPAvGwbQmy1Px8F899L5Uc2KZ6JtXwlCgJmjSTBedwOZkByYcBG4GceIGPXRDsmfxhHazuS+hlOShRLeDw==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdx": "^2.0.0", + "collapse-white-space": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-util-scope": "^1.0.0", + "estree-walker": "^3.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "markdown-extensions": "^2.0.0", + "recma-build-jsx": "^1.0.0", + "recma-jsx": "^1.0.0", + "recma-stringify": "^1.0.0", + "rehype-recma": "^1.0.0", + "remark-mdx": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "source-map": "^0.7.0", + "unified": "^11.0.0", + "unist-util-position-from-estree": "^2.0.0", + "unist-util-stringify-position": "^4.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@mdx-js/react": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.1.0.tgz", + "integrity": "sha512-QjHtSaoameoalGnKDT3FoIl4+9RwyTmo9ZJGBdLOks/YOiWHoRDI3PUwEzOE7kEmGcV3AFcp9K6dYu9rEuKLAQ==", + "license": "MIT", + "dependencies": { + "@types/mdx": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=16", + "react": ">=16" + } + }, + "node_modules/@mermaid-js/parser": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/@mermaid-js/parser/-/parser-0.6.2.tgz", + "integrity": "sha512-+PO02uGF6L6Cs0Bw8RpGhikVvMWEysfAyl27qTlroUB8jSWr1lL0Sf6zi78ZxlSnmgSY2AMMKVgghnN9jTtwkQ==", + "license": "MIT", + "dependencies": { + "langium": "3.3.1" + } + }, + "node_modules/@node-rs/jieba": { + "version": "1.10.4", + "resolved": "https://registry.npmjs.org/@node-rs/jieba/-/jieba-1.10.4.tgz", + "integrity": "sha512-GvDgi8MnBiyWd6tksojej8anIx18244NmIOc1ovEw8WKNUejcccLfyu8vj66LWSuoZuKILVtNsOy4jvg3aoxIw==", + "license": "MIT", + "engines": { + "node": ">= 10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Brooooooklyn" + }, + "optionalDependencies": { + "@node-rs/jieba-android-arm-eabi": "1.10.4", + "@node-rs/jieba-android-arm64": "1.10.4", + "@node-rs/jieba-darwin-arm64": "1.10.4", + "@node-rs/jieba-darwin-x64": "1.10.4", + "@node-rs/jieba-freebsd-x64": "1.10.4", + "@node-rs/jieba-linux-arm-gnueabihf": "1.10.4", + "@node-rs/jieba-linux-arm64-gnu": "1.10.4", + "@node-rs/jieba-linux-arm64-musl": "1.10.4", + "@node-rs/jieba-linux-x64-gnu": "1.10.4", + "@node-rs/jieba-linux-x64-musl": "1.10.4", + "@node-rs/jieba-wasm32-wasi": "1.10.4", + "@node-rs/jieba-win32-arm64-msvc": "1.10.4", + "@node-rs/jieba-win32-ia32-msvc": "1.10.4", + "@node-rs/jieba-win32-x64-msvc": "1.10.4" + } + }, + "node_modules/@node-rs/jieba-darwin-arm64": { + "version": "1.10.4", + "resolved": "https://registry.npmjs.org/@node-rs/jieba-darwin-arm64/-/jieba-darwin-arm64-1.10.4.tgz", + "integrity": "sha512-G++RYEJ2jo0rxF9626KUy90wp06TRUjAsvY/BrIzEOX/ingQYV/HjwQzNPRR1P1o32a6/U8RGo7zEBhfdybL6w==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@pnpm/config.env-replace": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz", + "integrity": "sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w==", + "license": "MIT", + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@pnpm/network.ca-file/-/network.ca-file-1.0.2.tgz", + "integrity": "sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA==", + "license": "MIT", + "dependencies": { + "graceful-fs": "4.2.10" + }, + "engines": { + "node": ">=12.22.0" + } + }, + "node_modules/@pnpm/network.ca-file/node_modules/graceful-fs": { + "version": "4.2.10", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz", + "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==", + "license": "ISC" + }, + "node_modules/@pnpm/npm-conf": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@pnpm/npm-conf/-/npm-conf-2.3.1.tgz", + "integrity": "sha512-c83qWb22rNRuB0UaVCI0uRPNRr8Z0FWnEIvT47jiHAmOIUHbBOg5XvV7pM5x+rKn9HRpjxquDbXYSXr3fAKFcw==", + "license": "MIT", + "dependencies": { + "@pnpm/config.env-replace": "^1.1.0", + "@pnpm/network.ca-file": "^1.0.1", + "config-chain": "^1.1.11" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@polka/url": { + "version": "1.0.0-next.29", + "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.29.tgz", + "integrity": "sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww==", + "license": "MIT" + }, + "node_modules/@sideway/address": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.5.tgz", + "integrity": "sha512-IqO/DUQHUkPeixNQ8n0JA6102hT9CmaljNTPmQ1u8MEhBo/R4Q8eKLN/vGZxuebwOroDB4cbpjheD4+/sKFK4Q==", + "license": "BSD-3-Clause", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@sideway/formula": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==", + "license": "BSD-3-Clause" + }, + "node_modules/@sideway/pinpoint": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", + "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "license": "MIT" + }, + "node_modules/@sindresorhus/is": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.6.0.tgz", + "integrity": "sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/@slorber/remark-comment": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@slorber/remark-comment/-/remark-comment-1.0.0.tgz", + "integrity": "sha512-RCE24n7jsOj1M0UPvIQCHTe7fI0sFL4S2nwKVWwHyVr/wI/H8GosgsJGyhnsZoGFnD/P2hLf1mSbrrgSLN93NA==", + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.1.0", + "micromark-util-symbol": "^1.0.1" + } + }, + "node_modules/@svgr/babel-plugin-add-jsx-attribute": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-b9MIk7yhdS1pMCZM8VeNfUlSKVRhsHZNMl5O9SfaX0l0t5wjdgu4IDzGB8bpnGBBOjGST3rRFVsaaEtI4W6f7g==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz", + "integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-8.0.0.tgz", + "integrity": "sha512-KVQ+PtIjb1BuYT3ht8M5KbzWBhdAjjUPdlMtpuw/VjT8coTrItWX6Qafl9+ji831JaJcu6PJNKCV0bp01lBNzQ==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-dynamic-title": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-8.0.0.tgz", + "integrity": "sha512-omNiKqwjNmOQJ2v6ge4SErBbkooV2aAWwaPFs2vUY7p7GhVkzRkJ00kILXQvRhA6miHnNpXv7MRnnSjdRjK8og==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-svg-em-dimensions": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-8.0.0.tgz", + "integrity": "sha512-mURHYnu6Iw3UBTbhGwE/vsngtCIbHE43xCRK7kCw4t01xyGqb2Pd+WXekRRoFOBIY29ZoOhUCTEweDMdrjfi9g==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-react-native-svg": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-8.1.0.tgz", + "integrity": "sha512-Tx8T58CHo+7nwJ+EhUwx3LfdNSG9R2OKfaIXXs5soiy5HtgoAEkDay9LIimLOcG8dJQH1wPZp/cnAv6S9CrR1Q==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-plugin-transform-svg-component": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-8.0.0.tgz", + "integrity": "sha512-DFx8xa3cZXTdb/k3kfPeaixecQLgKh5NVBMwD0AQxOzcZawK4oo1Jh9LbrcACUivsCA7TLG8eeWgrDXjTMhRmw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/babel-preset": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-8.1.0.tgz", + "integrity": "sha512-7EYDbHE7MxHpv4sxvnVPngw5fuR6pw79SkcrILHJ/iMpuKySNCl5W1qcwPEpU+LgyRXOaAFgH0KhwD18wwg6ug==", + "license": "MIT", + "dependencies": { + "@svgr/babel-plugin-add-jsx-attribute": "8.0.0", + "@svgr/babel-plugin-remove-jsx-attribute": "8.0.0", + "@svgr/babel-plugin-remove-jsx-empty-expression": "8.0.0", + "@svgr/babel-plugin-replace-jsx-attribute-value": "8.0.0", + "@svgr/babel-plugin-svg-dynamic-title": "8.0.0", + "@svgr/babel-plugin-svg-em-dimensions": "8.0.0", + "@svgr/babel-plugin-transform-react-native-svg": "8.1.0", + "@svgr/babel-plugin-transform-svg-component": "8.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@svgr/core": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/core/-/core-8.1.0.tgz", + "integrity": "sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA==", + "license": "MIT", + "dependencies": { + "@babel/core": "^7.21.3", + "@svgr/babel-preset": "8.1.0", + "camelcase": "^6.2.0", + "cosmiconfig": "^8.1.3", + "snake-case": "^3.0.4" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/hast-util-to-babel-ast": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-8.0.0.tgz", + "integrity": "sha512-EbDKwO9GpfWP4jN9sGdYwPBU0kdomaPIL2Eu4YwmgP+sJeXT+L7bMwJUBnhzfH8Q2qMBqZ4fJwpCyYsAN3mt2Q==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.21.3", + "entities": "^4.4.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@svgr/plugin-jsx": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-8.1.0.tgz", + "integrity": "sha512-0xiIyBsLlr8quN+WyuxooNW9RJ0Dpr8uOnH/xrCVO8GLUcwHISwj1AG0k+LFzteTkAA0GbX0kj9q6Dk70PTiPA==", + "license": "MIT", + "dependencies": { + "@babel/core": "^7.21.3", + "@svgr/babel-preset": "8.1.0", + "@svgr/hast-util-to-babel-ast": "8.0.0", + "svg-parser": "^2.0.4" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "*" + } + }, + "node_modules/@svgr/plugin-svgo": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-8.1.0.tgz", + "integrity": "sha512-Ywtl837OGO9pTLIN/onoWLmDQ4zFUycI1g76vuKGEz6evR/ZTJlJuz3G/fIkb6OVBJ2g0o6CGJzaEjfmEo3AHA==", + "license": "MIT", + "dependencies": { + "cosmiconfig": "^8.1.3", + "deepmerge": "^4.3.1", + "svgo": "^3.0.2" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + }, + "peerDependencies": { + "@svgr/core": "*" + } + }, + "node_modules/@svgr/webpack": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-8.1.0.tgz", + "integrity": "sha512-LnhVjMWyMQV9ZmeEy26maJk+8HTIbd59cH4F2MJ439k9DqejRisfFNGAPvRYlKETuh9LrImlS8aKsBgKjMA8WA==", + "license": "MIT", + "dependencies": { + "@babel/core": "^7.21.3", + "@babel/plugin-transform-react-constant-elements": "^7.21.3", + "@babel/preset-env": "^7.20.2", + "@babel/preset-react": "^7.18.6", + "@babel/preset-typescript": "^7.21.0", + "@svgr/core": "8.1.0", + "@svgr/plugin-jsx": "8.1.0", + "@svgr/plugin-svgo": "8.1.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/gregberge" + } + }, + "node_modules/@szmarczak/http-timer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-5.0.1.tgz", + "integrity": "sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==", + "license": "MIT", + "dependencies": { + "defer-to-connect": "^2.0.1" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/@trysound/sax": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", + "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==", + "license": "ISC", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/@types/body-parser": { + "version": "1.19.6", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.6.tgz", + "integrity": "sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==", + "license": "MIT", + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/bonjour": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.13.tgz", + "integrity": "sha512-z9fJ5Im06zvUL548KvYNecEVlA7cVDkGUi6kZusb04mpyEFKCIZJvloCcmpmLaIahDpOQGHaHmG6imtPMmPXGQ==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.38", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz", + "integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/connect-history-api-fallback": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.4.tgz", + "integrity": "sha512-n6Cr2xS1h4uAulPRdlw6Jl6s1oG8KrVilPN2yUITEs+K48EzMJJ3W1xy8K5eWuFvjp3R74AOIGSmp2UfBJ8HFw==", + "license": "MIT", + "dependencies": { + "@types/express-serve-static-core": "*", + "@types/node": "*" + } + }, + "node_modules/@types/d3": { + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.3.tgz", + "integrity": "sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==", + "license": "MIT", + "dependencies": { + "@types/d3-array": "*", + "@types/d3-axis": "*", + "@types/d3-brush": "*", + "@types/d3-chord": "*", + "@types/d3-color": "*", + "@types/d3-contour": "*", + "@types/d3-delaunay": "*", + "@types/d3-dispatch": "*", + "@types/d3-drag": "*", + "@types/d3-dsv": "*", + "@types/d3-ease": "*", + "@types/d3-fetch": "*", + "@types/d3-force": "*", + "@types/d3-format": "*", + "@types/d3-geo": "*", + "@types/d3-hierarchy": "*", + "@types/d3-interpolate": "*", + "@types/d3-path": "*", + "@types/d3-polygon": "*", + "@types/d3-quadtree": "*", + "@types/d3-random": "*", + "@types/d3-scale": "*", + "@types/d3-scale-chromatic": "*", + "@types/d3-selection": "*", + "@types/d3-shape": "*", + "@types/d3-time": "*", + "@types/d3-time-format": "*", + "@types/d3-timer": "*", + "@types/d3-transition": "*", + "@types/d3-zoom": "*" + } + }, + "node_modules/@types/d3-array": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.1.tgz", + "integrity": "sha512-Y2Jn2idRrLzUfAKV2LyRImR+y4oa2AntrgID95SHJxuMUrkNXmanDSed71sRNZysveJVt1hLLemQZIady0FpEg==", + "license": "MIT" + }, + "node_modules/@types/d3-axis": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.6.tgz", + "integrity": "sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-brush": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.6.tgz", + "integrity": "sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-chord": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.6.tgz", + "integrity": "sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==", + "license": "MIT" + }, + "node_modules/@types/d3-color": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz", + "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==", + "license": "MIT" + }, + "node_modules/@types/d3-contour": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.6.tgz", + "integrity": "sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==", + "license": "MIT", + "dependencies": { + "@types/d3-array": "*", + "@types/geojson": "*" + } + }, + "node_modules/@types/d3-delaunay": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.4.tgz", + "integrity": "sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==", + "license": "MIT" + }, + "node_modules/@types/d3-dispatch": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.6.tgz", + "integrity": "sha512-4fvZhzMeeuBJYZXRXrRIQnvUYfyXwYmLsdiN7XXmVNQKKw1cM8a5WdID0g1hVFZDqT9ZqZEY5pD44p24VS7iZQ==", + "license": "MIT" + }, + "node_modules/@types/d3-drag": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz", + "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-dsv": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.7.tgz", + "integrity": "sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==", + "license": "MIT" + }, + "node_modules/@types/d3-ease": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz", + "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==", + "license": "MIT" + }, + "node_modules/@types/d3-fetch": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.7.tgz", + "integrity": "sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==", + "license": "MIT", + "dependencies": { + "@types/d3-dsv": "*" + } + }, + "node_modules/@types/d3-force": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.10.tgz", + "integrity": "sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw==", + "license": "MIT" + }, + "node_modules/@types/d3-format": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.4.tgz", + "integrity": "sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==", + "license": "MIT" + }, + "node_modules/@types/d3-geo": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.1.0.tgz", + "integrity": "sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==", + "license": "MIT", + "dependencies": { + "@types/geojson": "*" + } + }, + "node_modules/@types/d3-hierarchy": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.7.tgz", + "integrity": "sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==", + "license": "MIT" + }, + "node_modules/@types/d3-interpolate": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz", + "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==", + "license": "MIT", + "dependencies": { + "@types/d3-color": "*" + } + }, + "node_modules/@types/d3-path": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.1.tgz", + "integrity": "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==", + "license": "MIT" + }, + "node_modules/@types/d3-polygon": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.2.tgz", + "integrity": "sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==", + "license": "MIT" + }, + "node_modules/@types/d3-quadtree": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.6.tgz", + "integrity": "sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==", + "license": "MIT" + }, + "node_modules/@types/d3-random": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.3.tgz", + "integrity": "sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==", + "license": "MIT" + }, + "node_modules/@types/d3-scale": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz", + "integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==", + "license": "MIT", + "dependencies": { + "@types/d3-time": "*" + } + }, + "node_modules/@types/d3-scale-chromatic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz", + "integrity": "sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==", + "license": "MIT" + }, + "node_modules/@types/d3-selection": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz", + "integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==", + "license": "MIT" + }, + "node_modules/@types/d3-shape": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.7.tgz", + "integrity": "sha512-VLvUQ33C+3J+8p+Daf+nYSOsjB4GXp19/S/aGo60m9h1v6XaxjiT82lKVWJCfzhtuZ3yD7i/TPeC/fuKLLOSmg==", + "license": "MIT", + "dependencies": { + "@types/d3-path": "*" + } + }, + "node_modules/@types/d3-time": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz", + "integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==", + "license": "MIT" + }, + "node_modules/@types/d3-time-format": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.3.tgz", + "integrity": "sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==", + "license": "MIT" + }, + "node_modules/@types/d3-timer": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz", + "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==", + "license": "MIT" + }, + "node_modules/@types/d3-transition": { + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz", + "integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==", + "license": "MIT", + "dependencies": { + "@types/d3-selection": "*" + } + }, + "node_modules/@types/d3-zoom": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz", + "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==", + "license": "MIT", + "dependencies": { + "@types/d3-interpolate": "*", + "@types/d3-selection": "*" + } + }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "license": "MIT", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/eslint": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-9.6.1.tgz", + "integrity": "sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==", + "license": "MIT", + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.7", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", + "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", + "license": "MIT", + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "license": "MIT" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", + "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + "license": "MIT", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/express": { + "version": "4.17.23", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.23.tgz", + "integrity": "sha512-Crp6WY9aTYP3qPi2wGDo9iUe/rceX01UMhnF1jmwDcKCFM6cx7YhGP/Mpr3y9AASpfHixIG0E6azCcL5OcDHsQ==", + "license": "MIT", + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.33", + "@types/qs": "*", + "@types/serve-static": "*" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "5.0.6", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-5.0.6.tgz", + "integrity": "sha512-3xhRnjJPkULekpSzgtoNYYcTWgEZkp4myc+Saevii5JPnHNvHMRlBSHDbs7Bh1iPPoVTERHEZXyhyLbMEsExsA==", + "license": "MIT", + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "node_modules/@types/express/node_modules/@types/express-serve-static-core": { + "version": "4.19.6", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.19.6.tgz", + "integrity": "sha512-N4LZ2xG7DatVqhCZzOGb1Yi5lMbXSZcmdLDe9EzSndPV2HpWYWzRbaerl2n27irrm94EPpprqa8KpskPT085+A==", + "license": "MIT", + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*", + "@types/send": "*" + } + }, + "node_modules/@types/geojson": { + "version": "7946.0.16", + "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.16.tgz", + "integrity": "sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg==", + "license": "MIT" + }, + "node_modules/@types/gtag.js": { + "version": "0.0.12", + "resolved": "https://registry.npmjs.org/@types/gtag.js/-/gtag.js-0.0.12.tgz", + "integrity": "sha512-YQV9bUsemkzG81Ea295/nF/5GijnD2Af7QhEofh7xu+kvCN6RdodgNwwGWXB5GMI3NoyvQo0odNctoH/qLMIpg==", + "license": "MIT" + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/history": { + "version": "4.7.11", + "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz", + "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==", + "license": "MIT" + }, + "node_modules/@types/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==", + "license": "MIT" + }, + "node_modules/@types/http-cache-semantics": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz", + "integrity": "sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==", + "license": "MIT" + }, + "node_modules/@types/http-errors": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.5.tgz", + "integrity": "sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg==", + "license": "MIT" + }, + "node_modules/@types/http-proxy": { + "version": "1.17.16", + "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.16.tgz", + "integrity": "sha512-sdWoUajOB1cd0A8cRRQ1cfyWNbmFKLAqBB89Y8x5iYyG/mkJHc0YUH8pdWBy2omi9qtCpiIgGjuwO0dQST2l5w==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "license": "MIT" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "license": "MIT" + }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "license": "MIT", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/mdx": { + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz", + "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==", + "license": "MIT" + }, + "node_modules/@types/mime": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.5.tgz", + "integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==", + "license": "MIT" + }, + "node_modules/@types/ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==", + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "24.0.8", + "resolved": "https://registry.npmjs.org/@types/node/-/node-24.0.8.tgz", + "integrity": "sha512-WytNrFSgWO/esSH9NbpWUfTMGQwCGIKfCmNlmFDNiI5gGhgMmEA+V1AEvKLeBNvvtBnailJtkrEa2OIISwrVAA==", + "license": "MIT", + "dependencies": { + "undici-types": "~7.8.0" + } + }, + "node_modules/@types/node-forge": { + "version": "1.3.11", + "resolved": "https://registry.npmjs.org/@types/node-forge/-/node-forge-1.3.11.tgz", + "integrity": "sha512-FQx220y22OKNTqaByeBGqHWYz4cl94tpcxeFdvBo3wjG6XPBuZ0BNgNZRV5J5TFmmcsJ4IzsLkmGRiQbnYsBEQ==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/prismjs": { + "version": "1.26.5", + "resolved": "https://registry.npmjs.org/@types/prismjs/-/prismjs-1.26.5.tgz", + "integrity": "sha512-AUZTa7hQ2KY5L7AmtSiqxlhWxb4ina0yd8hNbl4TWuqnv/pFP0nDMb3YrfSBf4hJVGLh2YEIBfKaBW/9UEl6IQ==", + "license": "MIT" + }, + "node_modules/@types/qs": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.14.0.tgz", + "integrity": "sha512-eOunJqu0K1923aExK6y8p6fsihYEn/BYuQ4g0CxAAgFc4b/ZLN4CrsRZ55srTdqoiLzU2B2evC+apEIxprEzkQ==", + "license": "MIT" + }, + "node_modules/@types/range-parser": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz", + "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==", + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "19.1.8", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.8.tgz", + "integrity": "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g==", + "license": "MIT", + "dependencies": { + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-router": { + "version": "5.1.20", + "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz", + "integrity": "sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q==", + "license": "MIT", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*" + } + }, + "node_modules/@types/react-router-config": { + "version": "5.0.11", + "resolved": "https://registry.npmjs.org/@types/react-router-config/-/react-router-config-5.0.11.tgz", + "integrity": "sha512-WmSAg7WgqW7m4x8Mt4N6ZyKz0BubSj/2tVUMsAHp+Yd2AMwcSbeFq9WympT19p5heCFmF97R9eD5uUR/t4HEqw==", + "license": "MIT", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router": "^5.1.0" + } + }, + "node_modules/@types/react-router-dom": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.3.3.tgz", + "integrity": "sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==", + "license": "MIT", + "dependencies": { + "@types/history": "^4.7.11", + "@types/react": "*", + "@types/react-router": "*" + } + }, + "node_modules/@types/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", + "license": "MIT" + }, + "node_modules/@types/sax": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.7.tgz", + "integrity": "sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/send": { + "version": "0.17.5", + "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.5.tgz", + "integrity": "sha512-z6F2D3cOStZvuk2SaP6YrwkNO65iTZcwA2ZkSABegdkAh/lf+Aa/YQndZVfmEXT5vgAp6zv06VQ3ejSVjAny4w==", + "license": "MIT", + "dependencies": { + "@types/mime": "^1", + "@types/node": "*" + } + }, + "node_modules/@types/serve-index": { + "version": "1.9.4", + "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.4.tgz", + "integrity": "sha512-qLpGZ/c2fhSs5gnYsQxtDEq3Oy8SXPClIXkW5ghvAvsNuVSA8k+gCONcUCS/UjLEYvYps+e8uBtfgXgvhwfNug==", + "license": "MIT", + "dependencies": { + "@types/express": "*" + } + }, + "node_modules/@types/serve-static": { + "version": "1.15.8", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.8.tgz", + "integrity": "sha512-roei0UY3LhpOJvjbIP6ZZFngyLKl5dskOtDhxY5THRSpO+ZI+nzJ+m5yUMzGrp89YRa7lvknKkMYjqQFGwA7Sg==", + "license": "MIT", + "dependencies": { + "@types/http-errors": "*", + "@types/node": "*", + "@types/send": "*" + } + }, + "node_modules/@types/sockjs": { + "version": "0.3.36", + "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.36.tgz", + "integrity": "sha512-MK9V6NzAS1+Ud7JV9lJLFqW85VbC9dq3LmwZCuBe4wBDgKC0Kj/jd8Xl+nSviU+Qc3+m7umHHyHg//2KSa0a0Q==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/trusted-types": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz", + "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==", + "license": "MIT", + "optional": true + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "license": "MIT" + }, + "node_modules/@types/ws": { + "version": "8.18.1", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz", + "integrity": "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/yargs": { + "version": "17.0.33", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.33.tgz", + "integrity": "sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==", + "license": "MIT", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==", + "license": "MIT" + }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "license": "ISC" + }, + "node_modules/@webassemblyjs/ast": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.14.1.tgz", + "integrity": "sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/helper-numbers": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2" + } + }, + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz", + "integrity": "sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==", + "license": "MIT" + }, + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz", + "integrity": "sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==", + "license": "MIT" + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.14.1.tgz", + "integrity": "sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==", + "license": "MIT" + }, + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.13.2.tgz", + "integrity": "sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/floating-point-hex-parser": "1.13.2", + "@webassemblyjs/helper-api-error": "1.13.2", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz", + "integrity": "sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==", + "license": "MIT" + }, + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.14.1.tgz", + "integrity": "sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/wasm-gen": "1.14.1" + } + }, + "node_modules/@webassemblyjs/ieee754": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.13.2.tgz", + "integrity": "sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==", + "license": "MIT", + "dependencies": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "node_modules/@webassemblyjs/leb128": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.13.2.tgz", + "integrity": "sha512-Lde1oNoIdzVzdkNEAWZ1dZ5orIbff80YPdHx20mrHwHrVNNTjNr8E3xz9BdpcGqRQbAEa+fkrCb+fRFTl/6sQw==", + "license": "Apache-2.0", + "dependencies": { + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/utf8": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.13.2.tgz", + "integrity": "sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ==", + "license": "MIT" + }, + "node_modules/@webassemblyjs/wasm-edit": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.14.1.tgz", + "integrity": "sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/helper-wasm-section": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-opt": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1", + "@webassemblyjs/wast-printer": "1.14.1" + } + }, + "node_modules/@webassemblyjs/wasm-gen": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.14.1.tgz", + "integrity": "sha512-AmomSIjP8ZbfGQhumkNvgC33AY7qtMCXnN6bL2u2Js4gVCg8fp735aEiMSBbDR7UQIj90n4wKAFUSEd0QN2Ukg==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" + } + }, + "node_modules/@webassemblyjs/wasm-opt": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.14.1.tgz", + "integrity": "sha512-PTcKLUNvBqnY2U6E5bdOQcSM+oVP/PmrDY9NzowJjislEjwP/C4an2303MCVS2Mg9d3AJpIGdUFIQQWbPds0Sw==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1" + } + }, + "node_modules/@webassemblyjs/wasm-parser": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.14.1.tgz", + "integrity": "sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-api-error": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" + } + }, + "node_modules/@webassemblyjs/wast-printer": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.14.1.tgz", + "integrity": "sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw==", + "license": "MIT", + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", + "license": "BSD-3-Clause" + }, + "node_modules/@xtuc/long": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", + "license": "Apache-2.0" + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "license": "MIT", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/accepts/node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", + "license": "MIT", + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/address": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/address/-/address-1.2.2.tgz", + "integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==", + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "license": "MIT", + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ajv": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", + "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "license": "MIT", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/algoliasearch": { + "version": "5.30.0", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-5.30.0.tgz", + "integrity": "sha512-ILSdPX4je0n5WUKD34TMe57/eqiXUzCIjAsdtLQYhomqOjTtFUg1s6dE7kUegc4Mc43Xr7IXYlMutU9HPiYfdw==", + "license": "MIT", + "dependencies": { + "@algolia/client-abtesting": "5.30.0", + "@algolia/client-analytics": "5.30.0", + "@algolia/client-common": "5.30.0", + "@algolia/client-insights": "5.30.0", + "@algolia/client-personalization": "5.30.0", + "@algolia/client-query-suggestions": "5.30.0", + "@algolia/client-search": "5.30.0", + "@algolia/ingestion": "1.30.0", + "@algolia/monitoring": "1.30.0", + "@algolia/recommend": "5.30.0", + "@algolia/requester-browser-xhr": "5.30.0", + "@algolia/requester-fetch": "5.30.0", + "@algolia/requester-node-http": "5.30.0" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/algoliasearch-helper": { + "version": "3.26.0", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.26.0.tgz", + "integrity": "sha512-Rv2x3GXleQ3ygwhkhJubhhYGsICmShLAiqtUuJTUkr9uOCOXyF2E71LVT4XDnVffbknv8XgScP4U0Oxtgm+hIw==", + "license": "MIT", + "dependencies": { + "@algolia/events": "^4.0.1" + }, + "peerDependencies": { + "algoliasearch": ">= 3.1 < 6" + } + }, + "node_modules/ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "license": "ISC", + "dependencies": { + "string-width": "^4.1.0" + } + }, + "node_modules/ansi-align/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/ansi-align/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "license": "MIT", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-escapes/node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-html-community": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz", + "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==", + "engines": [ + "node >= 0.8.0" + ], + "license": "Apache-2.0", + "bin": { + "ansi-html": "bin/ansi-html" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "license": "Python-2.0" + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", + "license": "MIT" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/astring": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz", + "integrity": "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==", + "license": "MIT", + "bin": { + "astring": "bin/astring" + } + }, + "node_modules/autoprefixer": { + "version": "10.4.21", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.21.tgz", + "integrity": "sha512-O+A6LWV5LDHSJD3LjHYoNi4VLsj/Whi7k6zG12xTYaU4cQ8oxQGckXNX8cRHK5yOZ/ppVHe0ZBXGzSV9jXdVbQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.24.4", + "caniuse-lite": "^1.0.30001702", + "fraction.js": "^4.3.7", + "normalize-range": "^0.1.2", + "picocolors": "^1.1.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/babel-loader": { + "version": "9.2.1", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-9.2.1.tgz", + "integrity": "sha512-fqe8naHt46e0yIdkjUZYqddSXfej3AHajX+CSO5X7oy0EmPc6o5Xh+RClNoHjnieWz9AW4kZxW9yyFMhVB1QLA==", + "license": "MIT", + "dependencies": { + "find-cache-dir": "^4.0.0", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0", + "webpack": ">=5" + } + }, + "node_modules/babel-plugin-dynamic-import-node": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", + "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", + "license": "MIT", + "dependencies": { + "object.assign": "^4.1.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2": { + "version": "0.4.14", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.14.tgz", + "integrity": "sha512-Co2Y9wX854ts6U8gAAPXfn0GmAyctHuK8n0Yhfjd6t30g7yvKjspvvOo9yG+z52PZRgFErt7Ka2pYnXCjLKEpg==", + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.27.7", + "@babel/helper-define-polyfill-provider": "^0.6.5", + "semver": "^6.3.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/babel-plugin-polyfill-corejs3": { + "version": "0.11.1", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.11.1.tgz", + "integrity": "sha512-yGCqvBT4rwMczo28xkH/noxJ6MZ4nJfkVYdoDaC/utLtWrXxv27HVrzAeSbqR8SxDsp46n0YF47EbHoixy6rXQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.3", + "core-js-compat": "^3.40.0" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-regenerator": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.5.tgz", + "integrity": "sha512-ISqQ2frbiNU9vIJkzg7dlPpznPZ4jOiUQ1uSmB0fEHeowtN3COYRsXr/xexn64NpU13P06jc/L5TgiJXOgrbEg==", + "license": "MIT", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.5" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "license": "MIT" + }, + "node_modules/batch": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", + "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==", + "license": "MIT" + }, + "node_modules/big.js": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", + "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/body-parser": { + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.13.0", + "raw-body": "2.5.2", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/body-parser/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/body-parser/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/body-parser/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/bonjour-service": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.3.0.tgz", + "integrity": "sha512-3YuAUiSkWykd+2Azjgyxei8OWf8thdn8AITIog2M4UICzoqfjlqr64WIjEXZllf/W6vK1goqleSR6brGomxQqA==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "multicast-dns": "^7.2.5" + } + }, + "node_modules/boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==", + "license": "ISC" + }, + "node_modules/boxen": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz", + "integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==", + "license": "MIT", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^6.2.0", + "chalk": "^4.1.2", + "cli-boxes": "^3.0.0", + "string-width": "^5.0.1", + "type-fest": "^2.5.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.0.1" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.25.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.25.1.tgz", + "integrity": "sha512-KGj0KoOMXLpSNkkEI6Z6mShmQy0bc1I+T7K9N81k4WWMrfz+6fQ6es80B/YLAeRoKvjYE1YSHHOW1qe9xIVzHw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "caniuse-lite": "^1.0.30001726", + "electron-to-chromium": "^1.5.173", + "node-releases": "^2.0.19", + "update-browserslist-db": "^1.1.3" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "license": "MIT" + }, + "node_modules/bytes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", + "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/cacheable-lookup": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-7.0.0.tgz", + "integrity": "sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==", + "license": "MIT", + "engines": { + "node": ">=14.16" + } + }, + "node_modules/cacheable-request": { + "version": "10.2.14", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-10.2.14.tgz", + "integrity": "sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ==", + "license": "MIT", + "dependencies": { + "@types/http-cache-semantics": "^4.0.2", + "get-stream": "^6.0.1", + "http-cache-semantics": "^4.1.1", + "keyv": "^4.5.3", + "mimic-response": "^4.0.0", + "normalize-url": "^8.0.0", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + } + }, + "node_modules/call-bind": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", + "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camel-case": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz", + "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==", + "license": "MIT", + "dependencies": { + "pascal-case": "^3.1.2", + "tslib": "^2.0.3" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/caniuse-api": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz", + "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.0.0", + "caniuse-lite": "^1.0.0", + "lodash.memoize": "^4.1.2", + "lodash.uniq": "^4.5.0" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001726", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001726.tgz", + "integrity": "sha512-VQAUIUzBiZ/UnlM28fSp2CRF3ivUn1BWEvxMcVTNwpw91Py1pGbPIyIKtd+tzct9C3ouceCVdGAXxZOpZAsgdw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/cheerio": { + "version": "1.0.0-rc.12", + "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz", + "integrity": "sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==", + "license": "MIT", + "dependencies": { + "cheerio-select": "^2.1.0", + "dom-serializer": "^2.0.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "htmlparser2": "^8.0.1", + "parse5": "^7.0.0", + "parse5-htmlparser2-tree-adapter": "^7.0.0" + }, + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/cheeriojs/cheerio?sponsor=1" + } + }, + "node_modules/cheerio-select": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz", + "integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==", + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0", + "css-select": "^5.1.0", + "css-what": "^6.1.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/chevrotain": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/chevrotain/-/chevrotain-11.0.3.tgz", + "integrity": "sha512-ci2iJH6LeIkvP9eJW6gpueU8cnZhv85ELY8w8WiFtNjMHA5ad6pQLaJo9mEly/9qUyCpvqX8/POVUTf18/HFdw==", + "license": "Apache-2.0", + "dependencies": { + "@chevrotain/cst-dts-gen": "11.0.3", + "@chevrotain/gast": "11.0.3", + "@chevrotain/regexp-to-ast": "11.0.3", + "@chevrotain/types": "11.0.3", + "@chevrotain/utils": "11.0.3", + "lodash-es": "4.17.21" + } + }, + "node_modules/chevrotain-allstar": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/chevrotain-allstar/-/chevrotain-allstar-0.3.1.tgz", + "integrity": "sha512-b7g+y9A0v4mxCW1qUhf3BSVPg+/NvGErk/dOkrDaHA0nQIQGAtrOjlX//9OQtRlSCy+x9rfB5N8yC71lH1nvMw==", + "license": "MIT", + "dependencies": { + "lodash-es": "^4.17.21" + }, + "peerDependencies": { + "chevrotain": "^11.0.0" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chrome-trace-event": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.4.tgz", + "integrity": "sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==", + "license": "MIT", + "engines": { + "node": ">=6.0" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/clean-css": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.3.tgz", + "integrity": "sha512-D5J+kHaVb/wKSFcyyV75uCn8fiY4sV38XJoe4CUyGQ+mOU/fMVYUdH1hJC+CJQ5uY3EnW27SbJYS4X8BiLrAFg==", + "license": "MIT", + "dependencies": { + "source-map": "~0.6.0" + }, + "engines": { + "node": ">= 10.0" + } + }, + "node_modules/clean-css/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/cli-boxes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", + "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-table3": { + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz", + "integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==", + "license": "MIT", + "dependencies": { + "string-width": "^4.2.0" + }, + "engines": { + "node": "10.* || >= 12.*" + }, + "optionalDependencies": { + "@colors/colors": "1.5.0" + } + }, + "node_modules/cli-table3/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/cli-table3/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/clone-deep": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", + "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", + "license": "MIT", + "dependencies": { + "is-plain-object": "^2.0.4", + "kind-of": "^6.0.2", + "shallow-clone": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/collapse-white-space": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-2.1.0.tgz", + "integrity": "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "license": "MIT" + }, + "node_modules/colord": { + "version": "2.9.3", + "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz", + "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==", + "license": "MIT" + }, + "node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==", + "license": "MIT" + }, + "node_modules/combine-promises": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.2.0.tgz", + "integrity": "sha512-VcQB1ziGD0NXrhKxiwyNbCDmRzs/OShMs2GqW2DlU2A/Sd0nQxE1oWDAE5O0ygSx5mgQOn9eIFh7yKPgFRVkPQ==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/comlink": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/comlink/-/comlink-4.4.2.tgz", + "integrity": "sha512-OxGdvBmJuNKSCMO4NTl1L47VRp6xn2wG4F/2hYzB6tiCb709otOxtEYCSvK80PtjODfXXZu8ds+Nw5kVCjqd2g==", + "license": "Apache-2.0" + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/commander": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", + "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/common-path-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/common-path-prefix/-/common-path-prefix-3.0.0.tgz", + "integrity": "sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w==", + "license": "ISC" + }, + "node_modules/compressible": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", + "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "license": "MIT", + "dependencies": { + "mime-db": ">= 1.43.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compressible/node_modules/mime-db": { + "version": "1.54.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz", + "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/compression": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.8.1.tgz", + "integrity": "sha512-9mAqGPHLakhCLeNyxPkK4xVo746zQ/czLH1Ky+vkitMnWfWZps8r0qXuwhwizagCRttsL4lfG4pIOvaWLpAP0w==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "compressible": "~2.0.18", + "debug": "2.6.9", + "negotiator": "~0.6.4", + "on-headers": "~1.1.0", + "safe-buffer": "5.2.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/compression/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/compression/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/compression/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "license": "MIT" + }, + "node_modules/confbox": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.2.2.tgz", + "integrity": "sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ==", + "license": "MIT" + }, + "node_modules/config-chain": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", + "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", + "license": "MIT", + "dependencies": { + "ini": "^1.3.4", + "proto-list": "~1.2.1" + } + }, + "node_modules/config-chain/node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "license": "ISC" + }, + "node_modules/configstore": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/configstore/-/configstore-6.0.0.tgz", + "integrity": "sha512-cD31W1v3GqUlQvbBCGcXmd2Nj9SvLDOP1oQ0YFuLETufzSPaKp11rYBsSOm7rCsW3OnIRAFM3OxRhceaXNYHkA==", + "license": "BSD-2-Clause", + "dependencies": { + "dot-prop": "^6.0.1", + "graceful-fs": "^4.2.6", + "unique-string": "^3.0.0", + "write-file-atomic": "^3.0.3", + "xdg-basedir": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/yeoman/configstore?sponsor=1" + } + }, + "node_modules/connect-history-api-fallback": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz", + "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==", + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/consola": { + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/consola/-/consola-3.4.2.tgz", + "integrity": "sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==", + "license": "MIT", + "engines": { + "node": "^14.18.0 || >=16.10.0" + } + }, + "node_modules/content-disposition": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", + "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "license": "MIT" + }, + "node_modules/cookie": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", + "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==", + "license": "MIT" + }, + "node_modules/copy-text-to-clipboard": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/copy-text-to-clipboard/-/copy-text-to-clipboard-3.2.0.tgz", + "integrity": "sha512-RnJFp1XR/LOBDckxTib5Qjr/PMfkatD0MUCQgdpqS8MdKiNUzBjAQBEN6oUy+jW7LI93BBG3DtMB2KOOKpGs2Q==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/copy-webpack-plugin": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz", + "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==", + "license": "MIT", + "dependencies": { + "fast-glob": "^3.2.11", + "glob-parent": "^6.0.1", + "globby": "^13.1.1", + "normalize-path": "^3.0.0", + "schema-utils": "^4.0.0", + "serialize-javascript": "^6.0.0" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/copy-webpack-plugin/node_modules/globby": { + "version": "13.2.2", + "resolved": "https://registry.npmjs.org/globby/-/globby-13.2.2.tgz", + "integrity": "sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==", + "license": "MIT", + "dependencies": { + "dir-glob": "^3.0.1", + "fast-glob": "^3.3.0", + "ignore": "^5.2.4", + "merge2": "^1.4.1", + "slash": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/copy-webpack-plugin/node_modules/slash": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz", + "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/core-js": { + "version": "3.43.0", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.43.0.tgz", + "integrity": "sha512-N6wEbTTZSYOY2rYAn85CuvWWkCK6QweMn7/4Nr3w+gDBeBhk/x4EJeY6FPo4QzDoJZxVTv8U7CMvgWk6pOHHqA==", + "hasInstallScript": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-compat": { + "version": "3.43.0", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.43.0.tgz", + "integrity": "sha512-2GML2ZsCc5LR7hZYz4AXmjQw8zuy2T//2QntwdnpuYI7jteT6GVYJL7F6C2C57R7gSYrcqVW3lAALefdbhBLDA==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.25.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-pure": { + "version": "3.43.0", + "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.43.0.tgz", + "integrity": "sha512-i/AgxU2+A+BbJdMxh3v7/vxi2SbFqxiFmg6VsDwYB4jkucrd1BZNA9a9gphC0fYMG5IBSgQcbQnk865VCLe7xA==", + "hasInstallScript": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==", + "license": "MIT" + }, + "node_modules/cose-base": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-1.0.3.tgz", + "integrity": "sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==", + "license": "MIT", + "dependencies": { + "layout-base": "^1.0.0" + } + }, + "node_modules/cosmiconfig": { + "version": "8.3.6", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", + "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==", + "license": "MIT", + "dependencies": { + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0", + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/crypto-random-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-4.0.0.tgz", + "integrity": "sha512-x8dy3RnvYdlUcPOjkEHqozhiwzKNSq7GcPuXFbnyMOCHxX8V3OgIg/pYuabl2sbUPfIJaeAQB7PMOK8DFIdoRA==", + "license": "MIT", + "dependencies": { + "type-fest": "^1.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/crypto-random-string/node_modules/type-fest": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", + "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/css-blank-pseudo": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/css-blank-pseudo/-/css-blank-pseudo-7.0.1.tgz", + "integrity": "sha512-jf+twWGDf6LDoXDUode+nc7ZlrqfaNphrBIBrcmeP3D8yw1uPaix1gCC8LUQUGQ6CycuK2opkbFFWFuq/a94ag==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/css-blank-pseudo/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/css-declaration-sorter": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-7.2.0.tgz", + "integrity": "sha512-h70rUM+3PNFuaBDTLe8wF/cdWu+dOZmb7pJt8Z2sedYbAcQVQV/tEchueg3GWxwqS0cxtbxmaHEdkNACqcvsow==", + "license": "ISC", + "engines": { + "node": "^14 || ^16 || >=18" + }, + "peerDependencies": { + "postcss": "^8.0.9" + } + }, + "node_modules/css-has-pseudo": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/css-has-pseudo/-/css-has-pseudo-7.0.2.tgz", + "integrity": "sha512-nzol/h+E0bId46Kn2dQH5VElaknX2Sr0hFuB/1EomdC7j+OISt2ZzK7EHX9DZDY53WbIVAR7FYKSO2XnSf07MQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/selector-specificity": "^5.0.0", + "postcss-selector-parser": "^7.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/css-has-pseudo/node_modules/@csstools/selector-specificity": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-5.0.0.tgz", + "integrity": "sha512-PCqQV3c4CoVm3kdPhyeZ07VmBRdH2EpMFA/pd9OASpOEC3aXNGoqPDAZ80D0cLpMBxnmk0+yNhGsEx31hq7Gtw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss-selector-parser": "^7.0.0" + } + }, + "node_modules/css-has-pseudo/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/css-loader": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.11.0.tgz", + "integrity": "sha512-CTJ+AEQJjq5NzLga5pE39qdiSV56F8ywCIsqNIRF0r7BDgWsN25aazToqAFg7ZrtA/U016xudB3ffgweORxX7g==", + "license": "MIT", + "dependencies": { + "icss-utils": "^5.1.0", + "postcss": "^8.4.33", + "postcss-modules-extract-imports": "^3.1.0", + "postcss-modules-local-by-default": "^4.0.5", + "postcss-modules-scope": "^3.2.0", + "postcss-modules-values": "^4.0.0", + "postcss-value-parser": "^4.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "@rspack/core": "0.x || 1.x", + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "@rspack/core": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/css-minimizer-webpack-plugin": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-5.0.1.tgz", + "integrity": "sha512-3caImjKFQkS+ws1TGcFn0V1HyDJFq1Euy589JlD6/3rV2kj+w7r5G9WDMgSHvpvXHNZ2calVypZWuEDQd9wfLg==", + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "cssnano": "^6.0.1", + "jest-worker": "^29.4.3", + "postcss": "^8.4.24", + "schema-utils": "^4.0.1", + "serialize-javascript": "^6.0.1" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + }, + "peerDependenciesMeta": { + "@parcel/css": { + "optional": true + }, + "@swc/css": { + "optional": true + }, + "clean-css": { + "optional": true + }, + "csso": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "lightningcss": { + "optional": true + } + } + }, + "node_modules/css-prefers-color-scheme": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/css-prefers-color-scheme/-/css-prefers-color-scheme-10.0.0.tgz", + "integrity": "sha512-VCtXZAWivRglTZditUfB4StnsWr6YVZ2PRtuxQLKTNRdtAf8tpzaVPE9zXIF3VaSc7O70iK/j1+NXxyQCqdPjQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/css-select": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.2.2.tgz", + "integrity": "sha512-TizTzUddG/xYLA3NXodFM0fSbNizXjOKhqiQQwvhlspadZokn1KDy0NZFS0wuEubIYAV5/c1/lAr0TaaFXEXzw==", + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.1.0", + "domhandler": "^5.0.2", + "domutils": "^3.0.1", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/css-tree": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.3.1.tgz", + "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==", + "license": "MIT", + "dependencies": { + "mdn-data": "2.0.30", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, + "node_modules/css-what": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.2.2.tgz", + "integrity": "sha512-u/O3vwbptzhMs3L1fQE82ZSLHQQfto5gyZzwteVIEyeaY5Fc7R4dapF/BvRoSYFeqfBk4m0V1Vafq5Pjv25wvA==", + "license": "BSD-2-Clause", + "engines": { + "node": ">= 6" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/cssdb": { + "version": "8.3.1", + "resolved": "https://registry.npmjs.org/cssdb/-/cssdb-8.3.1.tgz", + "integrity": "sha512-XnDRQMXucLueX92yDe0LPKupXetWoFOgawr4O4X41l5TltgK2NVbJJVDnnOywDYfW1sTJ28AcXGKOqdRKwCcmQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + } + ], + "license": "MIT-0" + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cssnano": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-6.1.2.tgz", + "integrity": "sha512-rYk5UeX7VAM/u0lNqewCdasdtPK81CgX8wJFLEIXHbV2oldWRgJAsZrdhRXkV1NJzA2g850KiFm9mMU2HxNxMA==", + "license": "MIT", + "dependencies": { + "cssnano-preset-default": "^6.1.2", + "lilconfig": "^3.1.1" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/cssnano" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/cssnano-preset-advanced": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-6.1.2.tgz", + "integrity": "sha512-Nhao7eD8ph2DoHolEzQs5CfRpiEP0xa1HBdnFZ82kvqdmbwVBUr2r1QuQ4t1pi+D1ZpqpcO4T+wy/7RxzJ/WPQ==", + "license": "MIT", + "dependencies": { + "autoprefixer": "^10.4.19", + "browserslist": "^4.23.0", + "cssnano-preset-default": "^6.1.2", + "postcss-discard-unused": "^6.0.5", + "postcss-merge-idents": "^6.0.3", + "postcss-reduce-idents": "^6.0.3", + "postcss-zindex": "^6.0.2" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/cssnano-preset-default": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-6.1.2.tgz", + "integrity": "sha512-1C0C+eNaeN8OcHQa193aRgYexyJtU8XwbdieEjClw+J9d94E41LwT6ivKH0WT+fYwYWB0Zp3I3IZ7tI/BbUbrg==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "css-declaration-sorter": "^7.2.0", + "cssnano-utils": "^4.0.2", + "postcss-calc": "^9.0.1", + "postcss-colormin": "^6.1.0", + "postcss-convert-values": "^6.1.0", + "postcss-discard-comments": "^6.0.2", + "postcss-discard-duplicates": "^6.0.3", + "postcss-discard-empty": "^6.0.3", + "postcss-discard-overridden": "^6.0.2", + "postcss-merge-longhand": "^6.0.5", + "postcss-merge-rules": "^6.1.1", + "postcss-minify-font-values": "^6.1.0", + "postcss-minify-gradients": "^6.0.3", + "postcss-minify-params": "^6.1.0", + "postcss-minify-selectors": "^6.0.4", + "postcss-normalize-charset": "^6.0.2", + "postcss-normalize-display-values": "^6.0.2", + "postcss-normalize-positions": "^6.0.2", + "postcss-normalize-repeat-style": "^6.0.2", + "postcss-normalize-string": "^6.0.2", + "postcss-normalize-timing-functions": "^6.0.2", + "postcss-normalize-unicode": "^6.1.0", + "postcss-normalize-url": "^6.0.2", + "postcss-normalize-whitespace": "^6.0.2", + "postcss-ordered-values": "^6.0.2", + "postcss-reduce-initial": "^6.1.0", + "postcss-reduce-transforms": "^6.0.2", + "postcss-svgo": "^6.0.3", + "postcss-unique-selectors": "^6.0.4" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/cssnano-utils": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-4.0.2.tgz", + "integrity": "sha512-ZR1jHg+wZ8o4c3zqf1SIUSTIvm/9mU343FMR6Obe/unskbvpGhZOo1J6d/r8D1pzkRQYuwbcH3hToOuoA2G7oQ==", + "license": "MIT", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/csso": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/csso/-/csso-5.0.5.tgz", + "integrity": "sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ==", + "license": "MIT", + "dependencies": { + "css-tree": "~2.2.0" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", + "npm": ">=7.0.0" + } + }, + "node_modules/csso/node_modules/css-tree": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.2.1.tgz", + "integrity": "sha512-OA0mILzGc1kCOCSJerOeqDxDQ4HOh+G8NbOJFOTgOCzpw7fCBubk0fEyxp8AgOL/jvLgYA/uV0cMbe43ElF1JA==", + "license": "MIT", + "dependencies": { + "mdn-data": "2.0.28", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0", + "npm": ">=7.0.0" + } + }, + "node_modules/csso/node_modules/mdn-data": { + "version": "2.0.28", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.28.tgz", + "integrity": "sha512-aylIc7Z9y4yzHYAJNuESG3hfhC+0Ibp/MAMiaOZgNv4pmEdFyfZhhhny4MNiAfWdBQ1RQ2mfDWmM1x8SvGyp8g==", + "license": "CC0-1.0" + }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", + "license": "MIT" + }, + "node_modules/cytoscape": { + "version": "3.32.1", + "resolved": "https://registry.npmjs.org/cytoscape/-/cytoscape-3.32.1.tgz", + "integrity": "sha512-dbeqFTLYEwlFg7UGtcZhCCG/2WayX72zK3Sq323CEX29CY81tYfVhw1MIdduCtpstB0cTOhJswWlM/OEB3Xp+Q==", + "license": "MIT", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/cytoscape-cose-bilkent": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/cytoscape-cose-bilkent/-/cytoscape-cose-bilkent-4.1.0.tgz", + "integrity": "sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==", + "license": "MIT", + "dependencies": { + "cose-base": "^1.0.0" + }, + "peerDependencies": { + "cytoscape": "^3.2.0" + } + }, + "node_modules/cytoscape-fcose": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/cytoscape-fcose/-/cytoscape-fcose-2.2.0.tgz", + "integrity": "sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ==", + "license": "MIT", + "dependencies": { + "cose-base": "^2.2.0" + }, + "peerDependencies": { + "cytoscape": "^3.2.0" + } + }, + "node_modules/cytoscape-fcose/node_modules/cose-base": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-2.2.0.tgz", + "integrity": "sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g==", + "license": "MIT", + "dependencies": { + "layout-base": "^2.0.0" + } + }, + "node_modules/cytoscape-fcose/node_modules/layout-base": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-2.0.1.tgz", + "integrity": "sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg==", + "license": "MIT" + }, + "node_modules/d3": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/d3/-/d3-7.9.0.tgz", + "integrity": "sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==", + "license": "ISC", + "dependencies": { + "d3-array": "3", + "d3-axis": "3", + "d3-brush": "3", + "d3-chord": "3", + "d3-color": "3", + "d3-contour": "4", + "d3-delaunay": "6", + "d3-dispatch": "3", + "d3-drag": "3", + "d3-dsv": "3", + "d3-ease": "3", + "d3-fetch": "3", + "d3-force": "3", + "d3-format": "3", + "d3-geo": "3", + "d3-hierarchy": "3", + "d3-interpolate": "3", + "d3-path": "3", + "d3-polygon": "3", + "d3-quadtree": "3", + "d3-random": "3", + "d3-scale": "4", + "d3-scale-chromatic": "3", + "d3-selection": "3", + "d3-shape": "3", + "d3-time": "3", + "d3-time-format": "4", + "d3-timer": "3", + "d3-transition": "3", + "d3-zoom": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-array": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz", + "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==", + "license": "ISC", + "dependencies": { + "internmap": "1 - 2" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-axis": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-3.0.0.tgz", + "integrity": "sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-brush": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-3.0.0.tgz", + "integrity": "sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "3", + "d3-transition": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-chord": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-3.0.1.tgz", + "integrity": "sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==", + "license": "ISC", + "dependencies": { + "d3-path": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-color": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", + "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-contour": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-4.0.2.tgz", + "integrity": "sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==", + "license": "ISC", + "dependencies": { + "d3-array": "^3.2.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-delaunay": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/d3-delaunay/-/d3-delaunay-6.0.4.tgz", + "integrity": "sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==", + "license": "ISC", + "dependencies": { + "delaunator": "5" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dispatch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", + "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-drag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", + "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-selection": "3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dsv": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-3.0.1.tgz", + "integrity": "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==", + "license": "ISC", + "dependencies": { + "commander": "7", + "iconv-lite": "0.6", + "rw": "1" + }, + "bin": { + "csv2json": "bin/dsv2json.js", + "csv2tsv": "bin/dsv2dsv.js", + "dsv2dsv": "bin/dsv2dsv.js", + "dsv2json": "bin/dsv2json.js", + "json2csv": "bin/json2dsv.js", + "json2dsv": "bin/json2dsv.js", + "json2tsv": "bin/json2dsv.js", + "tsv2csv": "bin/dsv2dsv.js", + "tsv2json": "bin/dsv2json.js" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-dsv/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/d3-dsv/node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/d3-ease": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", + "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-fetch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-3.0.1.tgz", + "integrity": "sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==", + "license": "ISC", + "dependencies": { + "d3-dsv": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-force": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-3.0.0.tgz", + "integrity": "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-quadtree": "1 - 3", + "d3-timer": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-format": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz", + "integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-geo": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-3.1.1.tgz", + "integrity": "sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==", + "license": "ISC", + "dependencies": { + "d3-array": "2.5.0 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-hierarchy": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz", + "integrity": "sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-interpolate": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", + "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-path": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz", + "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-polygon": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-3.0.1.tgz", + "integrity": "sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-quadtree": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-3.0.1.tgz", + "integrity": "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-random": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-3.0.1.tgz", + "integrity": "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-sankey": { + "version": "0.12.3", + "resolved": "https://registry.npmjs.org/d3-sankey/-/d3-sankey-0.12.3.tgz", + "integrity": "sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "1 - 2", + "d3-shape": "^1.2.0" + } + }, + "node_modules/d3-sankey/node_modules/d3-array": { + "version": "2.12.1", + "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-2.12.1.tgz", + "integrity": "sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==", + "license": "BSD-3-Clause", + "dependencies": { + "internmap": "^1.0.0" + } + }, + "node_modules/d3-sankey/node_modules/d3-path": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.9.tgz", + "integrity": "sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==", + "license": "BSD-3-Clause" + }, + "node_modules/d3-sankey/node_modules/d3-shape": { + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.3.7.tgz", + "integrity": "sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-path": "1" + } + }, + "node_modules/d3-sankey/node_modules/internmap": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/internmap/-/internmap-1.0.1.tgz", + "integrity": "sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==", + "license": "ISC" + }, + "node_modules/d3-scale": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz", + "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==", + "license": "ISC", + "dependencies": { + "d3-array": "2.10.0 - 3", + "d3-format": "1 - 3", + "d3-interpolate": "1.2.0 - 3", + "d3-time": "2.1.1 - 3", + "d3-time-format": "2 - 4" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-scale-chromatic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz", + "integrity": "sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3", + "d3-interpolate": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-selection": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", + "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-shape": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz", + "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==", + "license": "ISC", + "dependencies": { + "d3-path": "^3.1.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz", + "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==", + "license": "ISC", + "dependencies": { + "d3-array": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-time-format": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz", + "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==", + "license": "ISC", + "dependencies": { + "d3-time": "1 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-timer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", + "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-transition": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", + "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", + "license": "ISC", + "dependencies": { + "d3-color": "1 - 3", + "d3-dispatch": "1 - 3", + "d3-ease": "1 - 3", + "d3-interpolate": "1 - 3", + "d3-timer": "1 - 3" + }, + "engines": { + "node": ">=12" + }, + "peerDependencies": { + "d3-selection": "2 - 3" + } + }, + "node_modules/d3-zoom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", + "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "2 - 3", + "d3-transition": "2 - 3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/dagre-d3-es": { + "version": "7.0.11", + "resolved": "https://registry.npmjs.org/dagre-d3-es/-/dagre-d3-es-7.0.11.tgz", + "integrity": "sha512-tvlJLyQf834SylNKax8Wkzco/1ias1OPw8DcUMDE7oUIoSEW25riQVuiu/0OWEFqT0cxHT3Pa9/D82Jr47IONw==", + "license": "MIT", + "dependencies": { + "d3": "^7.9.0", + "lodash-es": "^4.17.21" + } + }, + "node_modules/dayjs": { + "version": "1.11.13", + "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.13.tgz", + "integrity": "sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==", + "license": "MIT" + }, + "node_modules/debounce": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/debounce/-/debounce-1.2.1.tgz", + "integrity": "sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==", + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.2.0.tgz", + "integrity": "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==", + "license": "MIT", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "license": "MIT", + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/decompress-response/node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "license": "MIT", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/default-gateway": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", + "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", + "license": "BSD-2-Clause", + "dependencies": { + "execa": "^5.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/defer-to-connect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", + "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-lazy-prop": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", + "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "license": "MIT", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/delaunator": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/delaunator/-/delaunator-5.0.1.tgz", + "integrity": "sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==", + "license": "ISC", + "dependencies": { + "robust-predicates": "^3.0.2" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "license": "MIT", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/detect-node": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", + "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==", + "license": "MIT" + }, + "node_modules/detect-port": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.6.1.tgz", + "integrity": "sha512-CmnVc+Hek2egPx1PeTFVta2W78xy2K/9Rkf6cC4T59S50tVnzKj+tnx5mmx5lwvCkujZ4uRrpRSuV+IVs3f90Q==", + "license": "MIT", + "dependencies": { + "address": "^1.0.1", + "debug": "4" + }, + "bin": { + "detect": "bin/detect-port.js", + "detect-port": "bin/detect-port.js" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "license": "MIT", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dns-packet": { + "version": "5.6.1", + "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", + "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", + "license": "MIT", + "dependencies": { + "@leichtgewicht/ip-codec": "^2.0.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/dom-converter": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", + "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", + "license": "MIT", + "dependencies": { + "utila": "~0.4" + } + }, + "node_modules/dom-serializer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", + "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", + "license": "MIT", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.2", + "entities": "^4.2.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "license": "BSD-2-Clause" + }, + "node_modules/domhandler": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", + "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "license": "BSD-2-Clause", + "dependencies": { + "domelementtype": "^2.3.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/dompurify": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.2.6.tgz", + "integrity": "sha512-/2GogDQlohXPZe6D6NOgQvXLPSYBqIWMnZ8zzOhn09REE4eyAzb+Hed3jhoM9OkuaJ8P6ZGTTVWQKAi8ieIzfQ==", + "license": "(MPL-2.0 OR Apache-2.0)", + "optionalDependencies": { + "@types/trusted-types": "^2.0.7" + } + }, + "node_modules/domutils": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.2.2.tgz", + "integrity": "sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==", + "license": "BSD-2-Clause", + "dependencies": { + "dom-serializer": "^2.0.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/dot-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", + "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", + "license": "MIT", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/dot-prop": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-6.0.1.tgz", + "integrity": "sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==", + "license": "MIT", + "dependencies": { + "is-obj": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/dot-prop/node_modules/is-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", + "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/duplexer": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", + "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==", + "license": "MIT" + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "license": "MIT" + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" + }, + "node_modules/electron-to-chromium": { + "version": "1.5.178", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.178.tgz", + "integrity": "sha512-wObbz/ar3Bc6e4X5vf0iO8xTN8YAjN/tgiAOJLr7yjYFtP9wAjq8Mb5h0yn6kResir+VYx2DXBj9NNobs0ETSA==", + "license": "ISC" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "license": "MIT" + }, + "node_modules/emojilib": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/emojilib/-/emojilib-2.4.0.tgz", + "integrity": "sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw==", + "license": "MIT" + }, + "node_modules/emojis-list": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", + "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/emoticon": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.1.0.tgz", + "integrity": "sha512-VWZfnxqwNcc51hIy/sbOdEem6D+cVtpPzEEtVAFdaas30+1dgkyaOQ4sQ6Bp0tOMqWO1v+HQfYaoodOkdhK6SQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/encoding-sniffer": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/encoding-sniffer/-/encoding-sniffer-0.2.1.tgz", + "integrity": "sha512-5gvq20T6vfpekVtqrYQsSCFZ1wEg5+wW0/QaZMWkFr6BqD3NfKs0rLCx4rrVlSWJeZb5NBJgVLswK/w2MWU+Gw==", + "license": "MIT", + "dependencies": { + "iconv-lite": "^0.6.3", + "whatwg-encoding": "^3.1.1" + }, + "funding": { + "url": "https://github.com/fb55/encoding-sniffer?sponsor=1" + } + }, + "node_modules/encoding-sniffer/node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.18.2", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.2.tgz", + "integrity": "sha512-6Jw4sE1maoRJo3q8MsSIn2onJFbLTOjY9hlx4DZXmOKvLRd1Ok2kXmAGXaafL2+ijsJZ1ClYbl/pmqr9+k4iUQ==", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "license": "MIT" + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/esast-util-from-estree": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/esast-util-from-estree/-/esast-util-from-estree-2.0.0.tgz", + "integrity": "sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/esast-util-from-js": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/esast-util-from-js/-/esast-util-from-js-2.0.1.tgz", + "integrity": "sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "acorn": "^8.0.0", + "esast-util-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-goat": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-4.0.0.tgz", + "integrity": "sha512-2Sd4ShcWxbx6OY1IHyla/CVNwvg7XwZVoXZHcSu9w9SReNP1EzzD5T8NWKIR38fIqEns9kDWKUQTXXAmlDrdPg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + "license": "MIT" + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-util-attach-comments": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz", + "integrity": "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-build-jsx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz", + "integrity": "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-walker": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-scope": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/estree-util-scope/-/estree-util-scope-1.0.0.tgz", + "integrity": "sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-to-js": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz", + "integrity": "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "astring": "^1.8.0", + "source-map": "^0.7.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-value-to-estree": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.4.0.tgz", + "integrity": "sha512-Zlp+gxis+gCfK12d3Srl2PdX2ybsEA8ZYy6vQGVQTNNYLEGRQQ56XB64bjemN8kxIKXP1nC9ip4Z+ILy9LGzvQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/remcohaszing" + } + }, + "node_modules/estree-util-visit": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-2.0.0.tgz", + "integrity": "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eta": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz", + "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + }, + "funding": { + "url": "https://github.com/eta-dev/eta?sponsor=1" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/eval": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz", + "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==", + "dependencies": { + "@types/node": "*", + "require-like": ">= 0.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", + "license": "MIT" + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "license": "MIT", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/express": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz", + "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==", + "license": "MIT", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.3", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.7.1", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.3.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.3", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.12", + "proxy-addr": "~2.0.7", + "qs": "6.13.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.19.0", + "serve-static": "1.16.2", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/express/node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "license": "MIT", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/express/node_modules/path-to-regexp": { + "version": "0.1.12", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", + "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", + "license": "MIT" + }, + "node_modules/express/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/exsolve": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/exsolve/-/exsolve-1.0.7.tgz", + "integrity": "sha512-VO5fQUzZtI6C+vx4w/4BWJpg3s/5l+6pRQEHzFRM8WFi4XffSP1Z+4qi7GbjWbvRQEbdIco5mIMq+zX4rPuLrw==", + "license": "MIT" + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "license": "MIT" + }, + "node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", + "license": "MIT", + "dependencies": { + "is-extendable": "^0.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "license": "MIT" + }, + "node_modules/fast-uri": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.0.6.tgz", + "integrity": "sha512-Atfo14OibSv5wAp4VWNsFYE1AchQRTv9cBGWET4pZWHzYshFSS9NQI6I57rdKn9croWVMbYFbLhJ+yJvmZIIHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/fastq": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", + "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fault": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/fault/-/fault-2.0.1.tgz", + "integrity": "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ==", + "license": "MIT", + "dependencies": { + "format": "^0.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/faye-websocket": { + "version": "0.11.4", + "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", + "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", + "license": "Apache-2.0", + "dependencies": { + "websocket-driver": ">=0.5.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/feed": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/feed/-/feed-4.2.2.tgz", + "integrity": "sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ==", + "license": "MIT", + "dependencies": { + "xml-js": "^1.6.11" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/figures": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/figures/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "license": "MIT", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/file-loader": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz", + "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==", + "license": "MIT", + "dependencies": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/file-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/file-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "license": "MIT", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/file-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "license": "MIT" + }, + "node_modules/file-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "license": "MIT", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/finalhandler": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", + "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/finalhandler/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/finalhandler/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/find-cache-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-4.0.0.tgz", + "integrity": "sha512-9ZonPT4ZAK4a+1pUPVPZJapbi7O5qbbJPdYw/NOQWZZbVLdDTYM3A4R9z/DpAM08IDaFGsvPgiGZ82WEwUDWjg==", + "license": "MIT", + "dependencies": { + "common-path-prefix": "^3.0.0", + "pkg-dir": "^7.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/find-up": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-6.3.0.tgz", + "integrity": "sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw==", + "license": "MIT", + "dependencies": { + "locate-path": "^7.1.0", + "path-exists": "^5.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "license": "BSD-3-Clause", + "bin": { + "flat": "cli.js" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.9", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz", + "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/form-data-encoder": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-2.1.4.tgz", + "integrity": "sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw==", + "license": "MIT", + "engines": { + "node": ">= 14.17" + } + }, + "node_modules/format": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", + "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fraction.js": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", + "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==", + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fs-extra": { + "version": "11.3.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.0.tgz", + "integrity": "sha512-Z4XaCL6dUDHfP/jT25jJKMmtxvuwbkrD1vNSMFlo9lNLY2c5FHYSQgHPRZUjAB26TpDEoW9HCOgplrdbaPV/ew==", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/fs-monkey": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.6.tgz", + "integrity": "sha512-b1FMfwetIKymC0eioW7mTywihSQE4oLzQn1dB6rZB5fx/3NpNEdAWeCSMB+60/AeT0TCXsxzAlcYVEFCTAksWg==", + "license": "Unlicense" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-own-enumerable-property-symbols": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", + "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==", + "license": "ISC" + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/github-slugger": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", + "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==", + "license": "ISC" + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", + "license": "BSD-2-Clause" + }, + "node_modules/global-dirs": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", + "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", + "license": "MIT", + "dependencies": { + "ini": "2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "license": "MIT", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/got": { + "version": "12.6.1", + "resolved": "https://registry.npmjs.org/got/-/got-12.6.1.tgz", + "integrity": "sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ==", + "license": "MIT", + "dependencies": { + "@sindresorhus/is": "^5.2.0", + "@szmarczak/http-timer": "^5.0.1", + "cacheable-lookup": "^7.0.0", + "cacheable-request": "^10.2.8", + "decompress-response": "^6.0.0", + "form-data-encoder": "^2.1.2", + "get-stream": "^6.0.1", + "http2-wrapper": "^2.1.10", + "lowercase-keys": "^3.0.0", + "p-cancelable": "^3.0.0", + "responselike": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sindresorhus/got?sponsor=1" + } + }, + "node_modules/got/node_modules/@sindresorhus/is": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-5.6.0.tgz", + "integrity": "sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==", + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "license": "ISC" + }, + "node_modules/gray-matter": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", + "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", + "license": "MIT", + "dependencies": { + "js-yaml": "^3.13.1", + "kind-of": "^6.0.2", + "section-matter": "^1.0.0", + "strip-bom-string": "^1.0.0" + }, + "engines": { + "node": ">=6.0" + } + }, + "node_modules/gray-matter/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "license": "MIT", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/gray-matter/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "license": "MIT", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/gzip-size": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", + "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", + "license": "MIT", + "dependencies": { + "duplexer": "^0.1.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/hachure-fill": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/hachure-fill/-/hachure-fill-0.5.2.tgz", + "integrity": "sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg==", + "license": "MIT" + }, + "node_modules/handle-thing": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", + "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==", + "license": "MIT" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-yarn": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-3.0.0.tgz", + "integrity": "sha512-IrsVwUHhEULx3R8f/aA8AHuEzAorplsab/v8HBzEiIukwq5i/EC+xmOW+HfP1OaDP+2JkgT1yILHN2O3UFIbcA==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hast-util-from-parse5": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.3.tgz", + "integrity": "sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "hastscript": "^9.0.0", + "property-information": "^7.0.0", + "vfile": "^6.0.0", + "vfile-location": "^5.0.0", + "web-namespaces": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz", + "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-raw": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.1.0.tgz", + "integrity": "sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "@ungap/structured-clone": "^1.0.0", + "hast-util-from-parse5": "^8.0.0", + "hast-util-to-parse5": "^8.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "parse5": "^7.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-estree": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-3.1.3.tgz", + "integrity": "sha512-48+B/rJWAp0jamNbAAf9M7Uf//UVqAoMmgXhBdxTDJLGKY+LRnZ99qcG+Qjl5HfMpYNzS5v4EAwVEF34LeAj7w==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-attach-comments": "^3.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz", + "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-parse5": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.0.tgz", + "integrity": "sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-parse5/node_modules/property-information": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz", + "integrity": "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-9.0.1.tgz", + "integrity": "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^4.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "license": "MIT", + "bin": { + "he": "bin/he" + } + }, + "node_modules/history": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", + "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.1.2", + "loose-envify": "^1.2.0", + "resolve-pathname": "^3.0.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0", + "value-equal": "^1.0.1" + } + }, + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "license": "BSD-3-Clause", + "dependencies": { + "react-is": "^16.7.0" + } + }, + "node_modules/hpack.js": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", + "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.1", + "obuf": "^1.0.0", + "readable-stream": "^2.0.1", + "wbuf": "^1.1.0" + } + }, + "node_modules/hpack.js/node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==", + "license": "MIT" + }, + "node_modules/hpack.js/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/hpack.js/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "license": "MIT" + }, + "node_modules/hpack.js/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/html-entities": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.6.0.tgz", + "integrity": "sha512-kig+rMn/QOVRvr7c86gQ8lWXq+Hkv6CbAH1hLu+RG338StTpE8Z0b44SDVaqVu7HGKf27frdmUYEs9hTUX/cLQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/mdevils" + }, + { + "type": "patreon", + "url": "https://patreon.com/mdevils" + } + ], + "license": "MIT" + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "license": "MIT" + }, + "node_modules/html-minifier-terser": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-7.2.0.tgz", + "integrity": "sha512-tXgn3QfqPIpGl9o+K5tpcj3/MN4SfLtsx2GWwBC3SSd0tXQGyF3gsSqad8loJgKZGM3ZxbYDd5yhiBIdWpmvLA==", + "license": "MIT", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "~5.3.2", + "commander": "^10.0.0", + "entities": "^4.4.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.15.1" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": "^14.13.1 || >=16.0.0" + } + }, + "node_modules/html-minifier-terser/node_modules/commander": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", + "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", + "license": "MIT", + "engines": { + "node": ">=14" + } + }, + "node_modules/html-tags": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz", + "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/html-void-elements": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", + "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/html-webpack-plugin": { + "version": "5.6.3", + "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.6.3.tgz", + "integrity": "sha512-QSf1yjtSAsmf7rYBV7XX86uua4W/vkhIt0xNXKbsi2foEeW7vjJQz4bhnpL3xH+l1ryl1680uNv968Z+X6jSYg==", + "license": "MIT", + "dependencies": { + "@types/html-minifier-terser": "^6.0.0", + "html-minifier-terser": "^6.0.2", + "lodash": "^4.17.21", + "pretty-error": "^4.0.0", + "tapable": "^2.0.0" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/html-webpack-plugin" + }, + "peerDependencies": { + "@rspack/core": "0.x || 1.x", + "webpack": "^5.20.0" + }, + "peerDependenciesMeta": { + "@rspack/core": { + "optional": true + }, + "webpack": { + "optional": true + } + } + }, + "node_modules/html-webpack-plugin/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/html-webpack-plugin/node_modules/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", + "license": "MIT", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "^5.2.2", + "commander": "^8.3.0", + "he": "^1.2.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.10.0" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/htmlparser2": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz", + "integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "license": "MIT", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "entities": "^4.4.0" + } + }, + "node_modules/http-cache-semantics": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.2.0.tgz", + "integrity": "sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ==", + "license": "BSD-2-Clause" + }, + "node_modules/http-deceiver": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", + "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==", + "license": "MIT" + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "license": "MIT", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/http-parser-js": { + "version": "0.5.10", + "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.10.tgz", + "integrity": "sha512-Pysuw9XpUq5dVc/2SMHpuTY01RFl8fttgcyunjL7eEMhGM3cI4eOmiCycJDVCo/7O7ClfQD3SaI6ftDzqOXYMA==", + "license": "MIT" + }, + "node_modules/http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "license": "MIT", + "dependencies": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/http-proxy-middleware": { + "version": "2.0.9", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.9.tgz", + "integrity": "sha512-c1IyJYLYppU574+YI7R4QyX2ystMtVXZwIdzazUIPIJsHuWNd+mho2j+bKoHftndicGj9yh+xjd+l0yj7VeT1Q==", + "license": "MIT", + "dependencies": { + "@types/http-proxy": "^1.17.8", + "http-proxy": "^1.18.1", + "is-glob": "^4.0.1", + "is-plain-obj": "^3.0.0", + "micromatch": "^4.0.2" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "@types/express": "^4.17.13" + }, + "peerDependenciesMeta": { + "@types/express": { + "optional": true + } + } + }, + "node_modules/http-proxy-middleware/node_modules/is-plain-obj": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", + "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/http2-wrapper": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.1.tgz", + "integrity": "sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==", + "license": "MIT", + "dependencies": { + "quick-lru": "^5.1.1", + "resolve-alpn": "^1.2.0" + }, + "engines": { + "node": ">=10.19.0" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/icss-utils": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", + "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", + "license": "ISC", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/image-size": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/image-size/-/image-size-2.0.2.tgz", + "integrity": "sha512-IRqXKlaXwgSMAMtpNzZa1ZAe8m+Sa1770Dhk8VkSsP9LS+iHD62Zd8FQKs8fbPiagBE7BzoFX23cxFnwshpV6w==", + "license": "MIT", + "bin": { + "image-size": "bin/image-size.js" + }, + "engines": { + "node": ">=16.x" + } + }, + "node_modules/immediate": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz", + "integrity": "sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q==", + "license": "MIT" + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-lazy": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", + "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/infima": { + "version": "0.2.0-alpha.45", + "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.45.tgz", + "integrity": "sha512-uyH0zfr1erU1OohLk0fT4Rrb94AOhguWNOcD9uGrSpRvNB+6gZXUoJX5J0NtvzBO10YZ9PgvA4NFgt+fYg8ojw==", + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/ini": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", + "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/inline-style-parser": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.4.tgz", + "integrity": "sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q==", + "license": "MIT" + }, + "node_modules/internmap": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", + "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.0.0" + } + }, + "node_modules/ipaddr.js": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.2.0.tgz", + "integrity": "sha512-Ag3wB2o37wslZS19hZqorUnrnzSkpOVy+IiiDEiTqNubEYpYuHWIf6K4psgN2ZWKExS4xhVCrRVfb/wfW8fWJA==", + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "license": "MIT", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "license": "MIT" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-ci": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-3.0.1.tgz", + "integrity": "sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==", + "license": "MIT", + "dependencies": { + "ci-info": "^3.2.0" + }, + "bin": { + "is-ci": "bin.js" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-installed-globally": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", + "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", + "license": "MIT", + "dependencies": { + "global-dirs": "^3.0.0", + "is-path-inside": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-npm": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-6.0.0.tgz", + "integrity": "sha512-JEjxbSmtPSt1c8XTkVrlujcXdKV1/tvuQ7GwKcAlyiVLeYFQ2VHat8xfrDJsIkhCdF/tZ7CiIR3sy141c6+gPQ==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", + "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "license": "MIT", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-regexp": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", + "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==", + "license": "MIT" + }, + "node_modules/is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "license": "MIT", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-yarn-global": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.4.1.tgz", + "integrity": "sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==", + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==", + "license": "MIT" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "license": "ISC" + }, + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "license": "MIT", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/jiti": { + "version": "1.21.7", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", + "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/joi": { + "version": "17.13.3", + "resolved": "https://registry.npmjs.org/joi/-/joi-17.13.3.tgz", + "integrity": "sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA==", + "license": "BSD-3-Clause", + "dependencies": { + "@hapi/hoek": "^9.3.0", + "@hapi/topo": "^5.1.0", + "@sideway/address": "^4.1.5", + "@sideway/formula": "^3.0.1", + "@sideway/pinpoint": "^2.0.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "license": "MIT" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "license": "MIT" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/katex": { + "version": "0.16.22", + "resolved": "https://registry.npmjs.org/katex/-/katex-0.16.22.tgz", + "integrity": "sha512-XCHRdUw4lf3SKBaJe4EvgqIuWwkPSo9XoeO8GjQW94Bp7TWv9hNhzZjZ+OH9yf1UmLygb7DIT5GSFQiyt16zYg==", + "funding": [ + "https://opencollective.com/katex", + "https://github.com/sponsors/katex" + ], + "license": "MIT", + "dependencies": { + "commander": "^8.3.0" + }, + "bin": { + "katex": "cli.js" + } + }, + "node_modules/katex/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/khroma": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/khroma/-/khroma-2.1.0.tgz", + "integrity": "sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==" + }, + "node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/klaw-sync": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/klaw-sync/-/klaw-sync-6.0.0.tgz", + "integrity": "sha512-nIeuVSzdCCs6TDPTqI8w1Yre34sSq7AkZ4B3sfOBbI2CgVSB4Du4aLQijFU2+lhAFCwt9+42Hel6lQNIv6AntQ==", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.1.11" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/kolorist": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/kolorist/-/kolorist-1.8.0.tgz", + "integrity": "sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==", + "license": "MIT" + }, + "node_modules/langium": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/langium/-/langium-3.3.1.tgz", + "integrity": "sha512-QJv/h939gDpvT+9SiLVlY7tZC3xB2qK57v0J04Sh9wpMb6MP1q8gB21L3WIo8T5P1MSMg3Ep14L7KkDCFG3y4w==", + "license": "MIT", + "dependencies": { + "chevrotain": "~11.0.3", + "chevrotain-allstar": "~0.3.0", + "vscode-languageserver": "~9.0.1", + "vscode-languageserver-textdocument": "~1.0.11", + "vscode-uri": "~3.0.8" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/latest-version": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-7.0.0.tgz", + "integrity": "sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==", + "license": "MIT", + "dependencies": { + "package-json": "^8.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/launch-editor": { + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.10.0.tgz", + "integrity": "sha512-D7dBRJo/qcGX9xlvt/6wUYzQxjh5G1RvZPgPv8vi4KRU99DVQL/oW7tnVOCCTm2HGeo3C5HvGE5Yrh6UBoZ0vA==", + "license": "MIT", + "dependencies": { + "picocolors": "^1.0.0", + "shell-quote": "^1.8.1" + } + }, + "node_modules/layout-base": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-1.0.2.tgz", + "integrity": "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==", + "license": "MIT" + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/lilconfig": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", + "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "license": "MIT" + }, + "node_modules/loader-runner": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", + "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", + "license": "MIT", + "engines": { + "node": ">=6.11.5" + } + }, + "node_modules/loader-utils": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz", + "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", + "license": "MIT", + "dependencies": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + }, + "engines": { + "node": ">=8.9.0" + } + }, + "node_modules/local-pkg": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-1.1.1.tgz", + "integrity": "sha512-WunYko2W1NcdfAFpuLUoucsgULmgDBRkdxHxWQ7mK0cQqwPiy8E1enjuRBrhLtZkB5iScJ1XIPdhVEFK8aOLSg==", + "license": "MIT", + "dependencies": { + "mlly": "^1.7.4", + "pkg-types": "^2.0.1", + "quansync": "^0.2.8" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/locate-path": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz", + "integrity": "sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==", + "license": "MIT", + "dependencies": { + "p-locate": "^6.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "license": "MIT" + }, + "node_modules/lodash-es": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==", + "license": "MIT" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==", + "license": "MIT" + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", + "license": "MIT" + }, + "node_modules/lodash.uniq": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", + "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==", + "license": "MIT" + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lower-case": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", + "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.3" + } + }, + "node_modules/lowercase-keys": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", + "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/lunr": { + "version": "2.3.9", + "resolved": "https://registry.npmjs.org/lunr/-/lunr-2.3.9.tgz", + "integrity": "sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==", + "license": "MIT" + }, + "node_modules/lunr-languages": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/lunr-languages/-/lunr-languages-1.14.0.tgz", + "integrity": "sha512-hWUAb2KqM3L7J5bcrngszzISY4BxrXn/Xhbb9TTCJYEGqlR1nG67/M14sp09+PTIRklobrn57IAxcdcO/ZFyNA==", + "license": "MPL-1.1" + }, + "node_modules/mark.js": { + "version": "8.11.1", + "resolved": "https://registry.npmjs.org/mark.js/-/mark.js-8.11.1.tgz", + "integrity": "sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==", + "license": "MIT" + }, + "node_modules/markdown-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", + "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", + "license": "MIT", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/markdown-table": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz", + "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/marked": { + "version": "16.1.1", + "resolved": "https://registry.npmjs.org/marked/-/marked-16.1.1.tgz", + "integrity": "sha512-ij/2lXfCRT71L6u0M29tJPhP0bM5shLL3u5BePhFwPELj2blMJ6GDtD7PfJhRLhJ/c2UwrK17ySVcDzy2YHjHQ==", + "license": "MIT", + "bin": { + "marked": "bin/marked.js" + }, + "engines": { + "node": ">= 20" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/mdast-util-directive": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-3.1.0.tgz", + "integrity": "sha512-I3fNFt+DHmpWCYAT7quoM6lHf9wuqtI+oCOfvILnoicNIqjh5E3dEJWiXuYME2gNe8vl1iMQwyUHa7bgFmak6Q==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz", + "integrity": "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz", + "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-from-markdown/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/mdast-util-frontmatter": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz", + "integrity": "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "escape-string-regexp": "^5.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-frontmatter/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz", + "integrity": "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz", + "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", + "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz", + "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "license": "MIT", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", + "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz", + "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdn-data": { + "version": "2.0.30", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", + "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==", + "license": "CC0-1.0" + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/memfs": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", + "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", + "license": "Unlicense", + "dependencies": { + "fs-monkey": "^1.0.4" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "license": "MIT" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/mermaid": { + "version": "11.9.0", + "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-11.9.0.tgz", + "integrity": "sha512-YdPXn9slEwO0omQfQIsW6vS84weVQftIyyTGAZCwM//MGhPzL1+l6vO6bkf0wnP4tHigH1alZ5Ooy3HXI2gOag==", + "license": "MIT", + "dependencies": { + "@braintree/sanitize-url": "^7.0.4", + "@iconify/utils": "^2.1.33", + "@mermaid-js/parser": "^0.6.2", + "@types/d3": "^7.4.3", + "cytoscape": "^3.29.3", + "cytoscape-cose-bilkent": "^4.1.0", + "cytoscape-fcose": "^2.2.0", + "d3": "^7.9.0", + "d3-sankey": "^0.12.3", + "dagre-d3-es": "7.0.11", + "dayjs": "^1.11.13", + "dompurify": "^3.2.5", + "katex": "^0.16.22", + "khroma": "^2.1.0", + "lodash-es": "^4.17.21", + "marked": "^16.0.0", + "roughjs": "^4.6.6", + "stylis": "^4.3.6", + "ts-dedent": "^2.2.0", + "uuid": "^11.1.0" + } + }, + "node_modules/mermaid/node_modules/uuid": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz", + "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/esm/bin/uuid" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/micromark": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", + "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz", + "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-directive": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/micromark-extension-directive/-/micromark-extension-directive-3.0.2.tgz", + "integrity": "sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "parse-entities": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-directive/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-frontmatter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-frontmatter/-/micromark-extension-frontmatter-2.0.0.tgz", + "integrity": "sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg==", + "license": "MIT", + "dependencies": { + "fault": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-frontmatter/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "license": "MIT", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-footnote/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz", + "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-table/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-gfm-task-list-item/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-mdx-expression": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-3.0.1.tgz", + "integrity": "sha512-dD/ADLJ1AeMvSAKBwO22zG22N4ybhe7kFIZ3LsDI0GlsNr2A3KYxb0LdC1u5rj4Nw+CHKY0RVdnHX8vj8ejm4Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-expression/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-mdx-jsx": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.2.tgz", + "integrity": "sha512-e5+q1DjMh62LZAJOnDraSSbDMvGJ8x3cbjygy2qFEi7HCeUT4BDKCvMozPozcD6WmOt6sVvYDNBKhFSz3kjOVQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-jsx/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-extension-mdx-md": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-md/-/micromark-extension-mdx-md-2.0.0.tgz", + "integrity": "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs/-/micromark-extension-mdxjs-3.0.0.tgz", + "integrity": "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==", + "license": "MIT", + "dependencies": { + "acorn": "^8.0.0", + "acorn-jsx": "^5.0.0", + "micromark-extension-mdx-expression": "^3.0.0", + "micromark-extension-mdx-jsx": "^3.0.0", + "micromark-extension-mdx-md": "^2.0.0", + "micromark-extension-mdxjs-esm": "^3.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-3.0.0.tgz", + "integrity": "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdxjs-esm/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", + "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-destination/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-destination/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-factory-label": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz", + "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-factory-mdx-expression": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.3.tgz", + "integrity": "sha512-kQnEtA3vzucU2BkrIa8/VaSAsP+EJ3CKOvhMuJgOEGg9KDC6OAY6nSnNDVRiVNRqj7Y4SlSzcStaH/5jge8JdQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-factory-space": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.1.0.tgz", + "integrity": "sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-factory-space/node_modules/micromark-util-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", + "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-factory-title": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz", + "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz", + "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-character": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.2.0.tgz", + "integrity": "sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-character/node_modules/micromark-util-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", + "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz", + "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz", + "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz", + "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz", + "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz", + "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-encode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz", + "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-events-to-acorn": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-2.0.3.tgz", + "integrity": "sha512-jmsiEIiZ1n7X1Rr5k8wVExBQCg5jy4UXVADItHmNk1zkwEVhBuIUKRu3fqv+hs4nxLISi2DQGlqIOGiFxgbfHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-util-events-to-acorn/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz", + "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz", + "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-normalize-identifier/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz", + "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz", + "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz", + "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-symbol": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.1.0.tgz", + "integrity": "sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark-util-types": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz", + "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromark/node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark/node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark/node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", + "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", + "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "license": "MIT", + "dependencies": { + "mime-db": "~1.33.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/mimic-response": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-4.0.0.tgz", + "integrity": "sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mini-css-extract-plugin": { + "version": "2.9.2", + "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.9.2.tgz", + "integrity": "sha512-GJuACcS//jtq4kCtd5ii/M0SZf7OZRH+BxdqXZHaJfb8TJiVl+NgQRPwiYt2EuqeSkNydn/7vP+bcE27C5mb9w==", + "license": "MIT", + "dependencies": { + "schema-utils": "^4.0.0", + "tapable": "^2.2.1" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.0.0" + } + }, + "node_modules/minimalistic-assert": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz", + "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==", + "license": "ISC" + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mlly": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.7.4.tgz", + "integrity": "sha512-qmdSIPC4bDJXgZTCR7XosJiNKySV7O215tsPtDN9iEO/7q/76b/ijtgRu/+epFXSJhijtTCCGp3DWS549P3xKw==", + "license": "MIT", + "dependencies": { + "acorn": "^8.14.0", + "pathe": "^2.0.1", + "pkg-types": "^1.3.0", + "ufo": "^1.5.4" + } + }, + "node_modules/mlly/node_modules/confbox": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.1.8.tgz", + "integrity": "sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==", + "license": "MIT" + }, + "node_modules/mlly/node_modules/pkg-types": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.3.1.tgz", + "integrity": "sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==", + "license": "MIT", + "dependencies": { + "confbox": "^0.1.8", + "mlly": "^1.7.4", + "pathe": "^2.0.1" + } + }, + "node_modules/mrmime": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.1.tgz", + "integrity": "sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/multicast-dns": { + "version": "7.2.5", + "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", + "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", + "license": "MIT", + "dependencies": { + "dns-packet": "^5.2.2", + "thunky": "^1.0.2" + }, + "bin": { + "multicast-dns": "cli.js" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/negotiator": { + "version": "0.6.4", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.4.tgz", + "integrity": "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "license": "MIT" + }, + "node_modules/no-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", + "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==", + "license": "MIT", + "dependencies": { + "lower-case": "^2.0.2", + "tslib": "^2.0.3" + } + }, + "node_modules/node-emoji": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-2.2.0.tgz", + "integrity": "sha512-Z3lTE9pLaJF47NyMhd4ww1yFTAP8YhYI8SleJiHzM46Fgpm5cnNzSl9XfzFNqbaz+VlJrIj3fXQ4DeN1Rjm6cw==", + "license": "MIT", + "dependencies": { + "@sindresorhus/is": "^4.6.0", + "char-regex": "^1.0.2", + "emojilib": "^2.4.0", + "skin-tone": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/node-forge": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", + "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==", + "license": "(BSD-3-Clause OR GPL-2.0)", + "engines": { + "node": ">= 6.13.0" + } + }, + "node_modules/node-releases": { + "version": "2.0.19", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.19.tgz", + "integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==", + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-range": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", + "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-url": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-8.0.2.tgz", + "integrity": "sha512-Ee/R3SyN4BuynXcnTaekmaVdbDAEiNrHqjQIA37mHU8G9pf7aaAD4ZX3XjBLo6rsdcxA/gtkcNYZLt30ACgynw==", + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/npm-to-yarn": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/npm-to-yarn/-/npm-to-yarn-3.0.1.tgz", + "integrity": "sha512-tt6PvKu4WyzPwWUzy/hvPFqn+uwXO0K1ZHka8az3NnrhWJDmSqI8ncWq0fkL0k/lmmi5tAC11FXwXuh0rFbt1A==", + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/nebrelbug/npm-to-yarn?sponsor=1" + } + }, + "node_modules/nprogress": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz", + "integrity": "sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA==", + "license": "MIT" + }, + "node_modules/nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/null-loader": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/null-loader/-/null-loader-4.0.1.tgz", + "integrity": "sha512-pxqVbi4U6N26lq+LmgIbB5XATP0VdZKOG25DhHi8btMmJJefGArFyDg1yc4U3hWCJbMqSrw0qyrz1UQX+qYXqg==", + "license": "MIT", + "dependencies": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/null-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/null-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "license": "MIT", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/null-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "license": "MIT" + }, + "node_modules/null-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "license": "MIT", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.7", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.7.tgz", + "integrity": "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0", + "has-symbols": "^1.1.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/obuf": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz", + "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==", + "license": "MIT" + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/on-headers": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.1.0.tgz", + "integrity": "sha512-737ZY3yNnXy37FHkQxPzt4UZ2UWPWiCZWLvFZ4fu5cueciegX0zGPnrlY6bwRg4FdQOe9YU8MkmJwGhoMybl8A==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/open": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", + "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", + "license": "MIT", + "dependencies": { + "define-lazy-prop": "^2.0.0", + "is-docker": "^2.1.1", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/opener": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz", + "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==", + "license": "(WTFPL OR MIT)", + "bin": { + "opener": "bin/opener-bin.js" + } + }, + "node_modules/p-cancelable": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-3.0.0.tgz", + "integrity": "sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==", + "license": "MIT", + "engines": { + "node": ">=12.20" + } + }, + "node_modules/p-finally": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", + "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/p-limit": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz", + "integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==", + "license": "MIT", + "dependencies": { + "yocto-queue": "^1.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-6.0.0.tgz", + "integrity": "sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw==", + "license": "MIT", + "dependencies": { + "p-limit": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "license": "MIT", + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-queue": { + "version": "6.6.2", + "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz", + "integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==", + "license": "MIT", + "dependencies": { + "eventemitter3": "^4.0.4", + "p-timeout": "^3.2.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-retry": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", + "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", + "license": "MIT", + "dependencies": { + "@types/retry": "0.12.0", + "retry": "^0.13.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-timeout": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz", + "integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==", + "license": "MIT", + "dependencies": { + "p-finally": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/package-json": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/package-json/-/package-json-8.1.1.tgz", + "integrity": "sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA==", + "license": "MIT", + "dependencies": { + "got": "^12.1.0", + "registry-auth-token": "^5.0.1", + "registry-url": "^6.0.0", + "semver": "^7.3.7" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/package-manager-detector": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/package-manager-detector/-/package-manager-detector-1.3.0.tgz", + "integrity": "sha512-ZsEbbZORsyHuO00lY1kV3/t72yp6Ysay6Pd17ZAlNGuGwmWDLCJxFpRs0IzfXfj1o4icJOkUEioexFHzyPurSQ==", + "license": "MIT" + }, + "node_modules/param-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz", + "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==", + "license": "MIT", + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-entities": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz", + "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", + "license": "MIT" + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse-numeric-range": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz", + "integrity": "sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ==", + "license": "ISC" + }, + "node_modules/parse5": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz", + "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==", + "license": "MIT", + "dependencies": { + "entities": "^6.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parse5-htmlparser2-tree-adapter": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.1.0.tgz", + "integrity": "sha512-ruw5xyKs6lrpo9x9rCZqZZnIUntICjQAd0Wsmp396Ul9lN/h+ifgVV1x1gZHi8euej6wTfpqX8j+BFQxF0NS/g==", + "license": "MIT", + "dependencies": { + "domhandler": "^5.0.3", + "parse5": "^7.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parse5-parser-stream": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/parse5-parser-stream/-/parse5-parser-stream-7.1.2.tgz", + "integrity": "sha512-JyeQc9iwFLn5TbvvqACIF/VXG6abODeB3Fwmv/TGdLk2LfbWkaySGY72at4+Ty7EkPZj854u4CrICqNk2qIbow==", + "license": "MIT", + "dependencies": { + "parse5": "^7.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parse5/node_modules/entities": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/pascal-case": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz", + "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==", + "license": "MIT", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/path-data-parser": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/path-data-parser/-/path-data-parser-0.1.0.tgz", + "integrity": "sha512-NOnmBpt5Y2RWbuv0LMzsayp3lVylAHLPUTut412ZA3l+C4uw4ZVkQbjShYCQ8TCpUMdPapr4YjUqLYD6v68j+w==", + "license": "MIT" + }, + "node_modules/path-exists": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-5.0.0.tgz", + "integrity": "sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-is-inside": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", + "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==", + "license": "(WTFPL OR MIT)" + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "license": "MIT" + }, + "node_modules/path-to-regexp": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.9.0.tgz", + "integrity": "sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g==", + "license": "MIT", + "dependencies": { + "isarray": "0.0.1" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-dir": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-7.0.0.tgz", + "integrity": "sha512-Ie9z/WINcxxLp27BKOCHGde4ITq9UklYKDzVo1nhk5sqGEXU3FpkwP5GM2voTGJkGd9B3Otl+Q4uwSOeSUtOBA==", + "license": "MIT", + "dependencies": { + "find-up": "^6.3.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-types": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-2.2.0.tgz", + "integrity": "sha512-2SM/GZGAEkPp3KWORxQZns4M+WSeXbC2HEvmOIJe3Cmiv6ieAJvdVhDldtHqM5J1Y7MrR1XhkBT/rMlhh9FdqQ==", + "license": "MIT", + "dependencies": { + "confbox": "^0.2.2", + "exsolve": "^1.0.7", + "pathe": "^2.0.3" + } + }, + "node_modules/points-on-curve": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/points-on-curve/-/points-on-curve-0.2.0.tgz", + "integrity": "sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A==", + "license": "MIT" + }, + "node_modules/points-on-path": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/points-on-path/-/points-on-path-0.2.1.tgz", + "integrity": "sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g==", + "license": "MIT", + "dependencies": { + "path-data-parser": "0.1.0", + "points-on-curve": "0.2.0" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-attribute-case-insensitive": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/postcss-attribute-case-insensitive/-/postcss-attribute-case-insensitive-7.0.1.tgz", + "integrity": "sha512-Uai+SupNSqzlschRyNx3kbCTWgY/2hcwtHEI/ej2LJWc9JJ77qKgGptd8DHwY1mXtZ7Aoh4z4yxfwMBue9eNgw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-attribute-case-insensitive/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-calc": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-9.0.1.tgz", + "integrity": "sha512-TipgjGyzP5QzEhsOZUaIkeO5mKeMFpebWzRogWG/ysonUlnHcq5aJe0jOjpfzUU8PeSaBQnrE8ehR0QA5vs8PQ==", + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.0.11", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.2.2" + } + }, + "node_modules/postcss-clamp": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/postcss-clamp/-/postcss-clamp-4.1.0.tgz", + "integrity": "sha512-ry4b1Llo/9zz+PKC+030KUnPITTJAHeOwjfAyyB60eT0AorGLdzp52s31OsPRHRf8NchkgFoG2y6fCfn1IV1Ow==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=7.6.0" + }, + "peerDependencies": { + "postcss": "^8.4.6" + } + }, + "node_modules/postcss-color-functional-notation": { + "version": "7.0.10", + "resolved": "https://registry.npmjs.org/postcss-color-functional-notation/-/postcss-color-functional-notation-7.0.10.tgz", + "integrity": "sha512-k9qX+aXHBiLTRrWoCJuUFI6F1iF6QJQUXNVWJVSbqZgj57jDhBlOvD8gNUGl35tgqDivbGLhZeW3Ongz4feuKA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.0.10", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.1.0", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-color-hex-alpha": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/postcss-color-hex-alpha/-/postcss-color-hex-alpha-10.0.0.tgz", + "integrity": "sha512-1kervM2cnlgPs2a8Vt/Qbe5cQ++N7rkYo/2rz2BkqJZIHQwaVuJgQH38REHrAi4uM0b1fqxMkWYmese94iMp3w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-color-rebeccapurple": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/postcss-color-rebeccapurple/-/postcss-color-rebeccapurple-10.0.0.tgz", + "integrity": "sha512-JFta737jSP+hdAIEhk1Vs0q0YF5P8fFcj+09pweS8ktuGuZ8pPlykHsk6mPxZ8awDl4TrcxUqJo9l1IhVr/OjQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-colormin": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-6.1.0.tgz", + "integrity": "sha512-x9yX7DOxeMAR+BgGVnNSAxmAj98NX/YxEMNFP+SDCEeNLb2r3i6Hh1ksMsnW8Ub5SLCpbescQqn9YEbE9554Sw==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-api": "^3.0.0", + "colord": "^2.9.3", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-convert-values": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-6.1.0.tgz", + "integrity": "sha512-zx8IwP/ts9WvUM6NkVSkiU902QZL1bwPhaVaLynPtCsOTqp+ZKbNi+s6XJg3rfqpKGA/oc7Oxk5t8pOQJcwl/w==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-custom-media": { + "version": "11.0.6", + "resolved": "https://registry.npmjs.org/postcss-custom-media/-/postcss-custom-media-11.0.6.tgz", + "integrity": "sha512-C4lD4b7mUIw+RZhtY7qUbf4eADmb7Ey8BFA2px9jUbwg7pjTZDl4KY4bvlUV+/vXQvzQRfiGEVJyAbtOsCMInw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/cascade-layer-name-parser": "^2.0.5", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/media-query-list-parser": "^4.0.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-custom-properties": { + "version": "14.0.6", + "resolved": "https://registry.npmjs.org/postcss-custom-properties/-/postcss-custom-properties-14.0.6.tgz", + "integrity": "sha512-fTYSp3xuk4BUeVhxCSJdIPhDLpJfNakZKoiTDx7yRGCdlZrSJR7mWKVOBS4sBF+5poPQFMj2YdXx1VHItBGihQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/cascade-layer-name-parser": "^2.0.5", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-custom-selectors": { + "version": "8.0.5", + "resolved": "https://registry.npmjs.org/postcss-custom-selectors/-/postcss-custom-selectors-8.0.5.tgz", + "integrity": "sha512-9PGmckHQswiB2usSO6XMSswO2yFWVoCAuih1yl9FVcwkscLjRKjwsjM3t+NIWpSU2Jx3eOiK2+t4vVTQaoCHHg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/cascade-layer-name-parser": "^2.0.5", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-custom-selectors/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-dir-pseudo-class": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/postcss-dir-pseudo-class/-/postcss-dir-pseudo-class-9.0.1.tgz", + "integrity": "sha512-tRBEK0MHYvcMUrAuYMEOa0zg9APqirBcgzi6P21OhxtJyJADo/SWBwY1CAwEohQ/6HDaa9jCjLRG7K3PVQYHEA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-dir-pseudo-class/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-discard-comments": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-6.0.2.tgz", + "integrity": "sha512-65w/uIqhSBBfQmYnG92FO1mWZjJ4GL5b8atm5Yw2UgrwD7HiNiSSNwJor1eCFGzUgYnN/iIknhNRVqjrrpuglw==", + "license": "MIT", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-duplicates": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-6.0.3.tgz", + "integrity": "sha512-+JA0DCvc5XvFAxwx6f/e68gQu/7Z9ud584VLmcgto28eB8FqSFZwtrLwB5Kcp70eIoWP/HXqz4wpo8rD8gpsTw==", + "license": "MIT", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-empty": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-6.0.3.tgz", + "integrity": "sha512-znyno9cHKQsK6PtxL5D19Fj9uwSzC2mB74cpT66fhgOadEUPyXFkbgwm5tvc3bt3NAy8ltE5MrghxovZRVnOjQ==", + "license": "MIT", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-overridden": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-6.0.2.tgz", + "integrity": "sha512-j87xzI4LUggC5zND7KdjsI25APtyMuynXZSujByMaav2roV6OZX+8AaCUcZSWqckZpjAjRyFDdpqybgjFO0HJQ==", + "license": "MIT", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-discard-unused": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-6.0.5.tgz", + "integrity": "sha512-wHalBlRHkaNnNwfC8z+ppX57VhvS+HWgjW508esjdaEYr3Mx7Gnn2xA4R/CKf5+Z9S5qsqC+Uzh4ueENWwCVUA==", + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-double-position-gradients": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-double-position-gradients/-/postcss-double-position-gradients-6.0.2.tgz", + "integrity": "sha512-7qTqnL7nfLRyJK/AHSVrrXOuvDDzettC+wGoienURV8v2svNbu6zJC52ruZtHaO6mfcagFmuTGFdzRsJKB3k5Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/postcss-progressive-custom-properties": "^4.1.0", + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-focus-visible": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/postcss-focus-visible/-/postcss-focus-visible-10.0.1.tgz", + "integrity": "sha512-U58wyjS/I1GZgjRok33aE8juW9qQgQUNwTSdxQGuShHzwuYdcklnvK/+qOWX1Q9kr7ysbraQ6ht6r+udansalA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-focus-visible/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-focus-within": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/postcss-focus-within/-/postcss-focus-within-9.0.1.tgz", + "integrity": "sha512-fzNUyS1yOYa7mOjpci/bR+u+ESvdar6hk8XNK/TRR0fiGTp2QT5N+ducP0n3rfH/m9I7H/EQU6lsa2BrgxkEjw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-focus-within/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-font-variant": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/postcss-font-variant/-/postcss-font-variant-5.0.0.tgz", + "integrity": "sha512-1fmkBaCALD72CK2a9i468mA/+tr9/1cBxRRMXOUaZqO43oWPR5imcyPjXwuv7PXbCid4ndlP5zWhidQVVa3hmA==", + "license": "MIT", + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-gap-properties": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/postcss-gap-properties/-/postcss-gap-properties-6.0.0.tgz", + "integrity": "sha512-Om0WPjEwiM9Ru+VhfEDPZJAKWUd0mV1HmNXqp2C29z80aQ2uP9UVhLc7e3aYMIor/S5cVhoPgYQ7RtfeZpYTRw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-image-set-function": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/postcss-image-set-function/-/postcss-image-set-function-7.0.0.tgz", + "integrity": "sha512-QL7W7QNlZuzOwBTeXEmbVckNt1FSmhQtbMRvGGqqU4Nf4xk6KUEQhAoWuMzwbSv5jxiRiSZ5Tv7eiDB9U87znA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/utilities": "^2.0.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-lab-function": { + "version": "7.0.10", + "resolved": "https://registry.npmjs.org/postcss-lab-function/-/postcss-lab-function-7.0.10.tgz", + "integrity": "sha512-tqs6TCEv9tC1Riq6fOzHuHcZyhg4k3gIAMB8GGY/zA1ssGdm6puHMVE7t75aOSoFg7UD2wyrFFhbldiCMyyFTQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/css-color-parser": "^3.0.10", + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4", + "@csstools/postcss-progressive-custom-properties": "^4.1.0", + "@csstools/utilities": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-loader": { + "version": "7.3.4", + "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.4.tgz", + "integrity": "sha512-iW5WTTBSC5BfsBJ9daFMPVrLT36MrNiC6fqOZTTaHjBNX6Pfd5p+hSBqe/fEeNd7pc13QiAyGt7VdGMw4eRC4A==", + "license": "MIT", + "dependencies": { + "cosmiconfig": "^8.3.5", + "jiti": "^1.20.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">= 14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "postcss": "^7.0.0 || ^8.0.1", + "webpack": "^5.0.0" + } + }, + "node_modules/postcss-logical": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/postcss-logical/-/postcss-logical-8.1.0.tgz", + "integrity": "sha512-pL1hXFQ2fEXNKiNiAgtfA005T9FBxky5zkX6s4GZM2D8RkVgRqz3f4g1JUoq925zXv495qk8UNldDwh8uGEDoA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-merge-idents": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-6.0.3.tgz", + "integrity": "sha512-1oIoAsODUs6IHQZkLQGO15uGEbK3EAl5wi9SS8hs45VgsxQfMnxvt+L+zIr7ifZFIH14cfAeVe2uCTa+SPRa3g==", + "license": "MIT", + "dependencies": { + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-merge-longhand": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-6.0.5.tgz", + "integrity": "sha512-5LOiordeTfi64QhICp07nzzuTDjNSO8g5Ksdibt44d+uvIIAE1oZdRn8y/W5ZtYgRH/lnLDlvi9F8btZcVzu3w==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "stylehacks": "^6.1.1" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-merge-rules": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-6.1.1.tgz", + "integrity": "sha512-KOdWF0gju31AQPZiD+2Ar9Qjowz1LTChSjFFbS+e2sFgc4uHOp3ZvVX4sNeTlk0w2O31ecFGgrFzhO0RSWbWwQ==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-api": "^3.0.0", + "cssnano-utils": "^4.0.2", + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-font-values": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-6.1.0.tgz", + "integrity": "sha512-gklfI/n+9rTh8nYaSJXlCo3nOKqMNkxuGpTn/Qm0gstL3ywTr9/WRKznE+oy6fvfolH6dF+QM4nCo8yPLdvGJg==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-gradients": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-6.0.3.tgz", + "integrity": "sha512-4KXAHrYlzF0Rr7uc4VrfwDJ2ajrtNEpNEuLxFgwkhFZ56/7gaE4Nr49nLsQDZyUe+ds+kEhf+YAUolJiYXF8+Q==", + "license": "MIT", + "dependencies": { + "colord": "^2.9.3", + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-params": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-6.1.0.tgz", + "integrity": "sha512-bmSKnDtyyE8ujHQK0RQJDIKhQ20Jq1LYiez54WiaOoBtcSuflfK3Nm596LvbtlFcpipMjgClQGyGr7GAs+H1uA==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-minify-selectors": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-6.0.4.tgz", + "integrity": "sha512-L8dZSwNLgK7pjTto9PzWRoMbnLq5vsZSTu8+j1P/2GB8qdtGQfn+K1uSvFgYvgh83cbyxT5m43ZZhUMTJDSClQ==", + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-modules-extract-imports": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.1.0.tgz", + "integrity": "sha512-k3kNe0aNFQDAZGbin48pL2VNidTF0w4/eASDsxlyspobzU3wZQLOGj7L9gfRe0Jo9/4uud09DsjFNH7winGv8Q==", + "license": "ISC", + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-local-by-default": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.2.0.tgz", + "integrity": "sha512-5kcJm/zk+GJDSfw+V/42fJ5fhjL5YbFDl8nVdXkJPLLW+Vf9mTD5Xe0wqIaDnLuL2U6cDNpTr+UQ+v2HWIBhzw==", + "license": "MIT", + "dependencies": { + "icss-utils": "^5.0.0", + "postcss-selector-parser": "^7.0.0", + "postcss-value-parser": "^4.1.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-local-by-default/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-modules-scope": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.2.1.tgz", + "integrity": "sha512-m9jZstCVaqGjTAuny8MdgE88scJnCiQSlSrOWcTQgM2t32UBe+MUmFSO5t7VMSfAf/FJKImAxBav8ooCHJXCJA==", + "license": "ISC", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-modules-scope/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-modules-values": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz", + "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==", + "license": "ISC", + "dependencies": { + "icss-utils": "^5.0.0" + }, + "engines": { + "node": "^10 || ^12 || >= 14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/postcss-nesting": { + "version": "13.0.2", + "resolved": "https://registry.npmjs.org/postcss-nesting/-/postcss-nesting-13.0.2.tgz", + "integrity": "sha512-1YCI290TX+VP0U/K/aFxzHzQWHWURL+CtHMSbex1lCdpXD1SoR2sYuxDu5aNI9lPoXpKTCggFZiDJbwylU0LEQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/selector-resolve-nested": "^3.1.0", + "@csstools/selector-specificity": "^5.0.0", + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-nesting/node_modules/@csstools/selector-resolve-nested": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@csstools/selector-resolve-nested/-/selector-resolve-nested-3.1.0.tgz", + "integrity": "sha512-mf1LEW0tJLKfWyvn5KdDrhpxHyuxpbNwTIwOYLIvsTffeyOf85j5oIzfG0yosxDgx/sswlqBnESYUcQH0vgZ0g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss-selector-parser": "^7.0.0" + } + }, + "node_modules/postcss-nesting/node_modules/@csstools/selector-specificity": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-5.0.0.tgz", + "integrity": "sha512-PCqQV3c4CoVm3kdPhyeZ07VmBRdH2EpMFA/pd9OASpOEC3aXNGoqPDAZ80D0cLpMBxnmk0+yNhGsEx31hq7Gtw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss-selector-parser": "^7.0.0" + } + }, + "node_modules/postcss-nesting/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-normalize-charset": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-6.0.2.tgz", + "integrity": "sha512-a8N9czmdnrjPHa3DeFlwqst5eaL5W8jYu3EBbTTkI5FHkfMhFZh1EGbku6jhHhIzTA6tquI2P42NtZ59M/H/kQ==", + "license": "MIT", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-display-values": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-6.0.2.tgz", + "integrity": "sha512-8H04Mxsb82ON/aAkPeq8kcBbAtI5Q2a64X/mnRRfPXBq7XeogoQvReqxEfc0B4WPq1KimjezNC8flUtC3Qz6jg==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-positions": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-6.0.2.tgz", + "integrity": "sha512-/JFzI441OAB9O7VnLA+RtSNZvQ0NCFZDOtp6QPFo1iIyawyXg0YI3CYM9HBy1WvwCRHnPep/BvI1+dGPKoXx/Q==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-repeat-style": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-6.0.2.tgz", + "integrity": "sha512-YdCgsfHkJ2jEXwR4RR3Tm/iOxSfdRt7jplS6XRh9Js9PyCR/aka/FCb6TuHT2U8gQubbm/mPmF6L7FY9d79VwQ==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-string": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-6.0.2.tgz", + "integrity": "sha512-vQZIivlxlfqqMp4L9PZsFE4YUkWniziKjQWUtsxUiVsSSPelQydwS8Wwcuw0+83ZjPWNTl02oxlIvXsmmG+CiQ==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-timing-functions": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-6.0.2.tgz", + "integrity": "sha512-a+YrtMox4TBtId/AEwbA03VcJgtyW4dGBizPl7e88cTFULYsprgHWTbfyjSLyHeBcK/Q9JhXkt2ZXiwaVHoMzA==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-unicode": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-6.1.0.tgz", + "integrity": "sha512-QVC5TQHsVj33otj8/JD869Ndr5Xcc/+fwRh4HAsFsAeygQQXm+0PySrKbr/8tkDKzW+EVT3QkqZMfFrGiossDg==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-url": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-6.0.2.tgz", + "integrity": "sha512-kVNcWhCeKAzZ8B4pv/DnrU1wNh458zBNp8dh4y5hhxih5RZQ12QWMuQrDgPRw3LRl8mN9vOVfHl7uhvHYMoXsQ==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-normalize-whitespace": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-6.0.2.tgz", + "integrity": "sha512-sXZ2Nj1icbJOKmdjXVT9pnyHQKiSAyuNQHSgRCUgThn2388Y9cGVDR+E9J9iAYbSbLHI+UUwLVl1Wzco/zgv0Q==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-opacity-percentage": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/postcss-opacity-percentage/-/postcss-opacity-percentage-3.0.0.tgz", + "integrity": "sha512-K6HGVzyxUxd/VgZdX04DCtdwWJ4NGLG212US4/LA1TLAbHgmAsTWVR86o+gGIbFtnTkfOpb9sCRBx8K7HO66qQ==", + "funding": [ + { + "type": "kofi", + "url": "https://ko-fi.com/mrcgrtz" + }, + { + "type": "liberapay", + "url": "https://liberapay.com/mrcgrtz" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-ordered-values": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-6.0.2.tgz", + "integrity": "sha512-VRZSOB+JU32RsEAQrO94QPkClGPKJEL/Z9PCBImXMhIeK5KAYo6slP/hBYlLgrCjFxyqvn5VC81tycFEDBLG1Q==", + "license": "MIT", + "dependencies": { + "cssnano-utils": "^4.0.2", + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-overflow-shorthand": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/postcss-overflow-shorthand/-/postcss-overflow-shorthand-6.0.0.tgz", + "integrity": "sha512-BdDl/AbVkDjoTofzDQnwDdm/Ym6oS9KgmO7Gr+LHYjNWJ6ExORe4+3pcLQsLA9gIROMkiGVjjwZNoL/mpXHd5Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-page-break": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/postcss-page-break/-/postcss-page-break-3.0.4.tgz", + "integrity": "sha512-1JGu8oCjVXLa9q9rFTo4MbeeA5FMe00/9C7lN4va606Rdb+HkxXtXsmEDrIraQ11fGz/WvKWa8gMuCKkrXpTsQ==", + "license": "MIT", + "peerDependencies": { + "postcss": "^8" + } + }, + "node_modules/postcss-place": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/postcss-place/-/postcss-place-10.0.0.tgz", + "integrity": "sha512-5EBrMzat2pPAxQNWYavwAfoKfYcTADJ8AXGVPcUZ2UkNloUTWzJQExgrzrDkh3EKzmAx1evfTAzF9I8NGcc+qw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-preset-env": { + "version": "10.2.4", + "resolved": "https://registry.npmjs.org/postcss-preset-env/-/postcss-preset-env-10.2.4.tgz", + "integrity": "sha512-q+lXgqmTMdB0Ty+EQ31SuodhdfZetUlwCA/F0zRcd/XdxjzI+Rl2JhZNz5US2n/7t9ePsvuhCnEN4Bmu86zXlA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "@csstools/postcss-cascade-layers": "^5.0.2", + "@csstools/postcss-color-function": "^4.0.10", + "@csstools/postcss-color-mix-function": "^3.0.10", + "@csstools/postcss-color-mix-variadic-function-arguments": "^1.0.0", + "@csstools/postcss-content-alt-text": "^2.0.6", + "@csstools/postcss-exponential-functions": "^2.0.9", + "@csstools/postcss-font-format-keywords": "^4.0.0", + "@csstools/postcss-gamut-mapping": "^2.0.10", + "@csstools/postcss-gradients-interpolation-method": "^5.0.10", + "@csstools/postcss-hwb-function": "^4.0.10", + "@csstools/postcss-ic-unit": "^4.0.2", + "@csstools/postcss-initial": "^2.0.1", + "@csstools/postcss-is-pseudo-class": "^5.0.3", + "@csstools/postcss-light-dark-function": "^2.0.9", + "@csstools/postcss-logical-float-and-clear": "^3.0.0", + "@csstools/postcss-logical-overflow": "^2.0.0", + "@csstools/postcss-logical-overscroll-behavior": "^2.0.0", + "@csstools/postcss-logical-resize": "^3.0.0", + "@csstools/postcss-logical-viewport-units": "^3.0.4", + "@csstools/postcss-media-minmax": "^2.0.9", + "@csstools/postcss-media-queries-aspect-ratio-number-values": "^3.0.5", + "@csstools/postcss-nested-calc": "^4.0.0", + "@csstools/postcss-normalize-display-values": "^4.0.0", + "@csstools/postcss-oklab-function": "^4.0.10", + "@csstools/postcss-progressive-custom-properties": "^4.1.0", + "@csstools/postcss-random-function": "^2.0.1", + "@csstools/postcss-relative-color-syntax": "^3.0.10", + "@csstools/postcss-scope-pseudo-class": "^4.0.1", + "@csstools/postcss-sign-functions": "^1.1.4", + "@csstools/postcss-stepped-value-functions": "^4.0.9", + "@csstools/postcss-text-decoration-shorthand": "^4.0.2", + "@csstools/postcss-trigonometric-functions": "^4.0.9", + "@csstools/postcss-unset-value": "^4.0.0", + "autoprefixer": "^10.4.21", + "browserslist": "^4.25.0", + "css-blank-pseudo": "^7.0.1", + "css-has-pseudo": "^7.0.2", + "css-prefers-color-scheme": "^10.0.0", + "cssdb": "^8.3.0", + "postcss-attribute-case-insensitive": "^7.0.1", + "postcss-clamp": "^4.1.0", + "postcss-color-functional-notation": "^7.0.10", + "postcss-color-hex-alpha": "^10.0.0", + "postcss-color-rebeccapurple": "^10.0.0", + "postcss-custom-media": "^11.0.6", + "postcss-custom-properties": "^14.0.6", + "postcss-custom-selectors": "^8.0.5", + "postcss-dir-pseudo-class": "^9.0.1", + "postcss-double-position-gradients": "^6.0.2", + "postcss-focus-visible": "^10.0.1", + "postcss-focus-within": "^9.0.1", + "postcss-font-variant": "^5.0.0", + "postcss-gap-properties": "^6.0.0", + "postcss-image-set-function": "^7.0.0", + "postcss-lab-function": "^7.0.10", + "postcss-logical": "^8.1.0", + "postcss-nesting": "^13.0.2", + "postcss-opacity-percentage": "^3.0.0", + "postcss-overflow-shorthand": "^6.0.0", + "postcss-page-break": "^3.0.4", + "postcss-place": "^10.0.0", + "postcss-pseudo-class-any-link": "^10.0.1", + "postcss-replace-overflow-wrap": "^4.0.0", + "postcss-selector-not": "^8.0.1" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-pseudo-class-any-link": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/postcss-pseudo-class-any-link/-/postcss-pseudo-class-any-link-10.0.1.tgz", + "integrity": "sha512-3el9rXlBOqTFaMFkWDOkHUTQekFIYnaQY55Rsp8As8QQkpiSgIYEcF/6Ond93oHiDsGb4kad8zjt+NPlOC1H0Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-pseudo-class-any-link/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-reduce-idents": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-6.0.3.tgz", + "integrity": "sha512-G3yCqZDpsNPoQgbDUy3T0E6hqOQ5xigUtBQyrmq3tn2GxlyiL0yyl7H+T8ulQR6kOcHJ9t7/9H4/R2tv8tJbMA==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-reduce-initial": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-6.1.0.tgz", + "integrity": "sha512-RarLgBK/CrL1qZags04oKbVbrrVK2wcxhvta3GCxrZO4zveibqbRPmm2VI8sSgCXwoUHEliRSbOfpR0b/VIoiw==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "caniuse-api": "^3.0.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-reduce-transforms": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-6.0.2.tgz", + "integrity": "sha512-sB+Ya++3Xj1WaT9+5LOOdirAxP7dJZms3GRcYheSPi1PiTMigsxHAdkrbItHxwYHr4kt1zL7mmcHstgMYT+aiA==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-replace-overflow-wrap": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/postcss-replace-overflow-wrap/-/postcss-replace-overflow-wrap-4.0.0.tgz", + "integrity": "sha512-KmF7SBPphT4gPPcKZc7aDkweHiKEEO8cla/GjcBK+ckKxiZslIu3C4GCRW3DNfL0o7yW7kMQu9xlZ1kXRXLXtw==", + "license": "MIT", + "peerDependencies": { + "postcss": "^8.0.3" + } + }, + "node_modules/postcss-selector-not": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/postcss-selector-not/-/postcss-selector-not-8.0.1.tgz", + "integrity": "sha512-kmVy/5PYVb2UOhy0+LqUYAhKj7DUGDpSWa5LZqlkWJaaAV+dxxsOG3+St0yNLu6vsKD7Dmqx+nWQt0iil89+WA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/postcss-selector-not/node_modules/postcss-selector-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", + "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-sort-media-queries": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-5.2.0.tgz", + "integrity": "sha512-AZ5fDMLD8SldlAYlvi8NIqo0+Z8xnXU2ia0jxmuhxAU+Lqt9K+AlmLNJ/zWEnE9x+Zx3qL3+1K20ATgNOr3fAA==", + "license": "MIT", + "dependencies": { + "sort-css-media-queries": "2.2.0" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.4.23" + } + }, + "node_modules/postcss-svgo": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-6.0.3.tgz", + "integrity": "sha512-dlrahRmxP22bX6iKEjOM+c8/1p+81asjKT+V5lrgOH944ryx/OHpclnIbGsKVd3uWOXFLYJwCVf0eEkJGvO96g==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.2.0", + "svgo": "^3.2.0" + }, + "engines": { + "node": "^14 || ^16 || >= 18" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-unique-selectors": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-6.0.4.tgz", + "integrity": "sha512-K38OCaIrO8+PzpArzkLKB42dSARtC2tmG6PvD4b1o1Q2E9Os8jzfWFfSy/rixsHwohtsDdFtAWGjFVFUdwYaMg==", + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "license": "MIT" + }, + "node_modules/postcss-zindex": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-6.0.2.tgz", + "integrity": "sha512-5BxW9l1evPB/4ZIc+2GobEBoKC+h8gPGCMi+jxsYvd2x0mjq7wazk6DrP71pStqxE9Foxh5TVnonbWpFZzXaYg==", + "license": "MIT", + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/pretty-error": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz", + "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==", + "license": "MIT", + "dependencies": { + "lodash": "^4.17.20", + "renderkid": "^3.0.0" + } + }, + "node_modules/pretty-time": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz", + "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/prism-react-renderer": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-2.4.1.tgz", + "integrity": "sha512-ey8Ls/+Di31eqzUxC46h8MksNuGx/n0AAC8uKpwFau4RPDYLuE3EXTp8N8G2vX2N7UC/+IXeNUnlWBGGcAG+Ig==", + "license": "MIT", + "dependencies": { + "@types/prismjs": "^1.26.0", + "clsx": "^2.0.0" + }, + "peerDependencies": { + "react": ">=16.0.0" + } + }, + "node_modules/prismjs": { + "version": "1.30.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.30.0.tgz", + "integrity": "sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", + "license": "MIT" + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "license": "MIT", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/property-information": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", + "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/proto-list": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", + "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==", + "license": "ISC" + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "license": "MIT", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/proxy-addr/node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/pupa": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/pupa/-/pupa-3.1.0.tgz", + "integrity": "sha512-FLpr4flz5xZTSJxSeaheeMKN/EDzMdK7b8PTOC6a5PYFKTucWbdqjgqaEyH0shFiSJrVB1+Qqi4Tk19ccU6Aug==", + "license": "MIT", + "dependencies": { + "escape-goat": "^4.0.0" + }, + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/qs": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/quansync": { + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/quansync/-/quansync-0.2.10.tgz", + "integrity": "sha512-t41VRkMYbkHyCYmOvx/6URnN80H7k4X0lLdBMGsz+maAwrJQYB1djpV6vHrQIBE0WBSGqhtEHrK9U3DWWH8v7A==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/antfu" + }, + { + "type": "individual", + "url": "https://github.com/sponsors/sxzz" + } + ], + "license": "MIT" + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/quick-lru": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "license": "MIT", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/range-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", + "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/raw-body/node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/rc/node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "license": "ISC" + }, + "node_modules/rc/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react": { + "version": "19.1.1", + "resolved": "https://registry.npmjs.org/react/-/react-19.1.1.tgz", + "integrity": "sha512-w8nqGImo45dmMIfljjMwOGtbmC/mk4CMYhWIicdSflH91J9TyCyczcPFXJzrZ/ZXcgGRFeP6BU0BEJTw6tZdfQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.1.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.1.tgz", + "integrity": "sha512-Dlq/5LAZgF0Gaz6yiqZCf6VCcZs1ghAJyrsu84Q/GT0gV+mCxbfmKNoGRKBYMJ8IEdGPqu49YWXD02GCknEDkw==", + "license": "MIT", + "dependencies": { + "scheduler": "^0.26.0" + }, + "peerDependencies": { + "react": "^19.1.1" + } + }, + "node_modules/react-fast-compare": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz", + "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==", + "license": "MIT" + }, + "node_modules/react-helmet-async": { + "name": "@slorber/react-helmet-async", + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@slorber/react-helmet-async/-/react-helmet-async-1.3.0.tgz", + "integrity": "sha512-e9/OK8VhwUSc67diWI8Rb3I0YgI9/SBQtnhe9aEuK6MhZm7ntZZimXgwXnd8W96YTmSOb9M4d8LwhRZyhWr/1A==", + "license": "Apache-2.0", + "dependencies": { + "@babel/runtime": "^7.12.5", + "invariant": "^2.2.4", + "prop-types": "^15.7.2", + "react-fast-compare": "^3.2.0", + "shallowequal": "^1.1.0" + }, + "peerDependencies": { + "react": "^16.6.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.6.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "license": "MIT" + }, + "node_modules/react-json-view-lite": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/react-json-view-lite/-/react-json-view-lite-2.4.1.tgz", + "integrity": "sha512-fwFYknRIBxjbFm0kBDrzgBy1xa5tDg2LyXXBepC5f1b+MY3BUClMCsvanMPn089JbV1Eg3nZcrp0VCuH43aXnA==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/react-loadable": { + "name": "@docusaurus/react-loadable", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-6.0.0.tgz", + "integrity": "sha512-YMMxTUQV/QFSnbgrP3tjDzLHRg7vsbMn8e9HAa8o/1iXoiomo48b7sk/kkmWEuWNDPJVlKSJRB6Y2fHqdJk+SQ==", + "license": "MIT", + "dependencies": { + "@types/react": "*" + }, + "peerDependencies": { + "react": "*" + } + }, + "node_modules/react-loadable-ssr-addon-v5-slorber": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz", + "integrity": "sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.3" + }, + "engines": { + "node": ">=10.13.0" + }, + "peerDependencies": { + "react-loadable": "*", + "webpack": ">=4.41.1 || 5.x" + } + }, + "node_modules/react-router": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz", + "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "hoist-non-react-statics": "^3.1.0", + "loose-envify": "^1.3.1", + "path-to-regexp": "^1.7.0", + "prop-types": "^15.6.2", + "react-is": "^16.6.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/react-router-config": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz", + "integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.1.2" + }, + "peerDependencies": { + "react": ">=15", + "react-router": ">=5" + } + }, + "node_modules/react-router-dom": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.4.tgz", + "integrity": "sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.13", + "history": "^4.9.0", + "loose-envify": "^1.3.1", + "prop-types": "^15.6.2", + "react-router": "5.3.4", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0" + }, + "peerDependencies": { + "react": ">=15" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/recma-build-jsx": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-build-jsx/-/recma-build-jsx-1.0.0.tgz", + "integrity": "sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-util-build-jsx": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/recma-jsx": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-jsx/-/recma-jsx-1.0.0.tgz", + "integrity": "sha512-5vwkv65qWwYxg+Atz95acp8DMu1JDSqdGkA2Of1j6rCreyFUE/gp15fC8MnGEuG1W68UKjM6x6+YTWIh7hZM/Q==", + "license": "MIT", + "dependencies": { + "acorn-jsx": "^5.0.0", + "estree-util-to-js": "^2.0.0", + "recma-parse": "^1.0.0", + "recma-stringify": "^1.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/recma-parse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-parse/-/recma-parse-1.0.0.tgz", + "integrity": "sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "esast-util-from-js": "^2.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/recma-stringify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-stringify/-/recma-stringify-1.0.0.tgz", + "integrity": "sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-util-to-js": "^2.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/regenerate": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==", + "license": "MIT" + }, + "node_modules/regenerate-unicode-properties": { + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.2.0.tgz", + "integrity": "sha512-DqHn3DwbmmPVzeKj9woBadqmXxLvQoQIwu7nopMc72ztvxVmVk2SBhSnx67zuye5TP+lJsb/TBQsjLKhnDf3MA==", + "license": "MIT", + "dependencies": { + "regenerate": "^1.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regexpu-core": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-6.2.0.tgz", + "integrity": "sha512-H66BPQMrv+V16t8xtmq+UC0CBpiTBA60V8ibS1QVReIp8T1z8hwFxqcGzm9K6lgsN7sB5edVH8a+ze6Fqm4weA==", + "license": "MIT", + "dependencies": { + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^10.2.0", + "regjsgen": "^0.8.0", + "regjsparser": "^0.12.0", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/registry-auth-token": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-5.1.0.tgz", + "integrity": "sha512-GdekYuwLXLxMuFTwAPg5UKGLW/UXzQrZvH/Zj791BQif5T05T0RsaLfHc9q3ZOKi7n+BoprPD9mJ0O0k4xzUlw==", + "license": "MIT", + "dependencies": { + "@pnpm/npm-conf": "^2.1.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/registry-url": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-6.0.1.tgz", + "integrity": "sha512-+crtS5QjFRqFCoQmvGduwYWEBng99ZvmFvF+cUJkGYF1L1BfU8C6Zp9T7f5vPAwyLkUExpvK+ANVZmGU49qi4Q==", + "license": "MIT", + "dependencies": { + "rc": "1.2.8" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-RvwtGe3d7LvWiDQXeQw8p5asZUmfU1G/l6WbUXeHta7Y2PEIvBTwH6E2EfmYUK8pxcxEdEmaomqyp0vZZ7C+3Q==", + "license": "MIT" + }, + "node_modules/regjsparser": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.12.0.tgz", + "integrity": "sha512-cnE+y8bz4NhMjISKbgeVJtqNbtf5QpjZP+Bslo+UqkIt9QPnX9q095eiRRASJG1/tz6dlNr6Z5NsBiWYokp6EQ==", + "license": "BSD-2-Clause", + "dependencies": { + "jsesc": "~3.0.2" + }, + "bin": { + "regjsparser": "bin/parser" + } + }, + "node_modules/regjsparser/node_modules/jsesc": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.0.2.tgz", + "integrity": "sha512-xKqzzWXDttJuOcawBt4KnKHHIf5oQ/Cxax+0PWFG+DFDgHNAdi+TXECADI+RYiFUMmx8792xsMbbgXj4CwnP4g==", + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/rehype-raw": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-7.0.0.tgz", + "integrity": "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-raw": "^9.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-recma": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/rehype-recma/-/rehype-recma-1.0.0.tgz", + "integrity": "sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "hast-util-to-estree": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/relateurl": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz", + "integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/remark-directive": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/remark-directive/-/remark-directive-3.0.1.tgz", + "integrity": "sha512-gwglrEQEZcZYgVyG1tQuA+h58EZfq5CSULw7J90AFuCTyib1thgHPoqQ+h9iFvU6R+vnZ5oNFQR5QKgGpk741A==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-directive": "^3.0.0", + "micromark-extension-directive": "^3.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-emoji": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-4.0.1.tgz", + "integrity": "sha512-fHdvsTR1dHkWKev9eNyhTo4EFwbUvJ8ka9SgeWkMPYFX4WoI7ViVBms3PjlQYgw5TLvNQso3GUB/b/8t3yo+dg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.2", + "emoticon": "^4.0.1", + "mdast-util-find-and-replace": "^3.0.1", + "node-emoji": "^2.1.0", + "unified": "^11.0.4" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/remark-frontmatter": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/remark-frontmatter/-/remark-frontmatter-5.0.0.tgz", + "integrity": "sha512-XTFYvNASMe5iPN0719nPrdItC9aU0ssC4v14mH1BCi1u0n1gAocqcujWUrByftZTbLhRtiKRyjYTSIOcr69UVQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-frontmatter": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-gfm": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz", + "integrity": "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-mdx": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-3.1.0.tgz", + "integrity": "sha512-Ngl/H3YXyBV9RcRNdlYsZujAmhsxwzxpDzpDEhFBVAGthS4GDgnctpDjgFl/ULx5UEDzqtW1cyBSNKqYYrqLBA==", + "license": "MIT", + "dependencies": { + "mdast-util-mdx": "^3.0.0", + "micromark-extension-mdxjs": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.2", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz", + "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==", + "license": "MIT", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/renderkid": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz", + "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==", + "license": "MIT", + "dependencies": { + "css-select": "^4.1.3", + "dom-converter": "^0.2.0", + "htmlparser2": "^6.1.0", + "lodash": "^4.17.21", + "strip-ansi": "^6.0.1" + } + }, + "node_modules/renderkid/node_modules/css-select": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz", + "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==", + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0", + "css-what": "^6.0.1", + "domhandler": "^4.3.1", + "domutils": "^2.8.0", + "nth-check": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/fb55" + } + }, + "node_modules/renderkid/node_modules/dom-serializer": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz", + "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==", + "license": "MIT", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.2.0", + "entities": "^2.0.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domhandler": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz", + "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==", + "license": "BSD-2-Clause", + "dependencies": { + "domelementtype": "^2.2.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/domutils": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz", + "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==", + "license": "BSD-2-Clause", + "dependencies": { + "dom-serializer": "^1.0.1", + "domelementtype": "^2.2.0", + "domhandler": "^4.2.0" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/entities": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", + "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "license": "BSD-2-Clause", + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/renderkid/node_modules/htmlparser2": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz", + "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "license": "MIT", + "dependencies": { + "domelementtype": "^2.0.1", + "domhandler": "^4.0.0", + "domutils": "^2.5.2", + "entities": "^2.0.0" + } + }, + "node_modules/repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==", + "license": "MIT", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-like": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz", + "integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==", + "engines": { + "node": "*" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "license": "MIT" + }, + "node_modules/resolve": { + "version": "1.22.10", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", + "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-alpn": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", + "integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==", + "license": "MIT" + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve-pathname": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", + "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==", + "license": "MIT" + }, + "node_modules/responselike": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-3.0.0.tgz", + "integrity": "sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==", + "license": "MIT", + "dependencies": { + "lowercase-keys": "^3.0.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/retry": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", + "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/robust-predicates": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/robust-predicates/-/robust-predicates-3.0.2.tgz", + "integrity": "sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==", + "license": "Unlicense" + }, + "node_modules/roughjs": { + "version": "4.6.6", + "resolved": "https://registry.npmjs.org/roughjs/-/roughjs-4.6.6.tgz", + "integrity": "sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ==", + "license": "MIT", + "dependencies": { + "hachure-fill": "^0.5.2", + "path-data-parser": "^0.1.0", + "points-on-curve": "^0.2.0", + "points-on-path": "^0.2.1" + } + }, + "node_modules/rtlcss": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-4.3.0.tgz", + "integrity": "sha512-FI+pHEn7Wc4NqKXMXFM+VAYKEj/mRIcW4h24YVwVtyjI+EqGrLc2Hx/Ny0lrZ21cBWU2goLy36eqMcNj3AQJig==", + "license": "MIT", + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0", + "postcss": "^8.4.21", + "strip-json-comments": "^3.1.1" + }, + "bin": { + "rtlcss": "bin/rtlcss.js" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/rw": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz", + "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==", + "license": "BSD-3-Clause" + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" + }, + "node_modules/sax": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.4.1.tgz", + "integrity": "sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==", + "license": "ISC" + }, + "node_modules/scheduler": { + "version": "0.26.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.26.0.tgz", + "integrity": "sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA==", + "license": "MIT" + }, + "node_modules/schema-dts": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/schema-dts/-/schema-dts-1.1.5.tgz", + "integrity": "sha512-RJr9EaCmsLzBX2NDiO5Z3ux2BVosNZN5jo0gWgsyKvxKIUL5R3swNvoorulAeL9kLB0iTSX7V6aokhla2m7xbg==", + "license": "Apache-2.0" + }, + "node_modules/schema-utils": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.3.2.tgz", + "integrity": "sha512-Gn/JaSk/Mt9gYubxTtSn/QCV4em9mpAPiR1rqy/Ocu19u/G9J5WWdNoUT4SiV6mFC3y6cxyFcFwdzPM3FgxGAQ==", + "license": "MIT", + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/search-insights": { + "version": "2.17.3", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.17.3.tgz", + "integrity": "sha512-RQPdCYTa8A68uM2jwxoY842xDhvx3E5LFL1LxvxCNMev4o5mLuokczhzjAgGwUZBAmOKZknArSxLKmXtIi2AxQ==", + "license": "MIT", + "peer": true + }, + "node_modules/section-matter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", + "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", + "license": "MIT", + "dependencies": { + "extend-shallow": "^2.0.1", + "kind-of": "^6.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/select-hose": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz", + "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg==", + "license": "MIT" + }, + "node_modules/selfsigned": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.4.1.tgz", + "integrity": "sha512-th5B4L2U+eGLq1TVh7zNRGBapioSORUeymIydxgFpwww9d2qyKvtuPU2jJuHvYAwwqi2Y596QBL3eEqcPEYL8Q==", + "license": "MIT", + "dependencies": { + "@types/node-forge": "^1.3.0", + "node-forge": "^1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver-diff": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-4.0.0.tgz", + "integrity": "sha512-0Ju4+6A8iOnpL/Thra7dZsSlOHYAHIeMxfhWQRI1/VLcT3WDBZKKtQt/QkBOsiIN9ZpuvHE6cGZ0x4glCMmfiA==", + "license": "MIT", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/send": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/send/node_modules/debug/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/send/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "license": "BSD-3-Clause", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/serve-handler": { + "version": "6.1.6", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.6.tgz", + "integrity": "sha512-x5RL9Y2p5+Sh3D38Fh9i/iQ5ZK+e4xuXRd/pGbM4D13tgo/MGwbttUk8emytcr1YYzBYs+apnUngBDFYfpjPuQ==", + "license": "MIT", + "dependencies": { + "bytes": "3.0.0", + "content-disposition": "0.5.2", + "mime-types": "2.1.18", + "minimatch": "3.1.2", + "path-is-inside": "1.0.2", + "path-to-regexp": "3.3.0", + "range-parser": "1.2.0" + } + }, + "node_modules/serve-handler/node_modules/path-to-regexp": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-3.3.0.tgz", + "integrity": "sha512-qyCH421YQPS2WFDxDjftfc1ZR5WKQzVzqsp4n9M2kQhVOo/ByahFoUNJfl58kOcEGfQ//7weFTDhm+ss8Ecxgw==", + "license": "MIT" + }, + "node_modules/serve-index": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz", + "integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==", + "license": "MIT", + "dependencies": { + "accepts": "~1.3.4", + "batch": "0.6.1", + "debug": "2.6.9", + "escape-html": "~1.0.3", + "http-errors": "~1.6.2", + "mime-types": "~2.1.17", + "parseurl": "~1.3.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/serve-index/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/serve-index/node_modules/depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/http-errors": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", + "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==", + "license": "MIT", + "dependencies": { + "depd": "~1.1.2", + "inherits": "2.0.3", + "setprototypeof": "1.1.0", + "statuses": ">= 1.4.0 < 2" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-index/node_modules/inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==", + "license": "ISC" + }, + "node_modules/serve-index/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/serve-index/node_modules/setprototypeof": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", + "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==", + "license": "ISC" + }, + "node_modules/serve-index/node_modules/statuses": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", + "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/serve-static": { + "version": "1.16.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", + "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", + "license": "MIT", + "dependencies": { + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.19.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" + }, + "node_modules/shallow-clone": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz", + "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", + "license": "MIT", + "dependencies": { + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==", + "license": "MIT" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/shell-quote": { + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.3.tgz", + "integrity": "sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "license": "ISC" + }, + "node_modules/sirv": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/sirv/-/sirv-2.0.4.tgz", + "integrity": "sha512-94Bdh3cC2PKrbgSOUqTiGPWVZeSiXfKOVZNJniWoqrWrRkB1CJzBU3NEbiTsPcYy1lDsANA/THzS+9WBiy5nfQ==", + "license": "MIT", + "dependencies": { + "@polka/url": "^1.0.0-next.24", + "mrmime": "^2.0.0", + "totalist": "^3.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "license": "MIT" + }, + "node_modules/sitemap": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.2.tgz", + "integrity": "sha512-ARCqzHJ0p4gWt+j7NlU5eDlIO9+Rkr/JhPFZKKQ1l5GCus7rJH4UdrlVAh0xC/gDS/Qir2UMxqYNHtsKr2rpCw==", + "license": "MIT", + "dependencies": { + "@types/node": "^17.0.5", + "@types/sax": "^1.2.1", + "arg": "^5.0.0", + "sax": "^1.2.4" + }, + "bin": { + "sitemap": "dist/cli.js" + }, + "engines": { + "node": ">=12.0.0", + "npm": ">=5.6.0" + } + }, + "node_modules/sitemap/node_modules/@types/node": { + "version": "17.0.45", + "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz", + "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==", + "license": "MIT" + }, + "node_modules/skin-tone": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/skin-tone/-/skin-tone-2.0.0.tgz", + "integrity": "sha512-kUMbT1oBJCpgrnKoSr0o6wPtvRWT9W9UKvGLwfJYO2WuahZRHOpEyL1ckyMGgMWh0UdpmaoFqKKD29WTomNEGA==", + "license": "MIT", + "dependencies": { + "unicode-emoji-modifier-base": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/snake-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/snake-case/-/snake-case-3.0.4.tgz", + "integrity": "sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==", + "license": "MIT", + "dependencies": { + "dot-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/sockjs": { + "version": "0.3.24", + "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz", + "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==", + "license": "MIT", + "dependencies": { + "faye-websocket": "^0.11.3", + "uuid": "^8.3.2", + "websocket-driver": "^0.7.4" + } + }, + "node_modules/sort-css-media-queries": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.2.0.tgz", + "integrity": "sha512-0xtkGhWCC9MGt/EzgnvbbbKhqWjl1+/rncmhTh5qCpbYguXh6S/qwePfv/JQ8jePXXmqingylxoC49pCkSPIbA==", + "license": "MIT", + "engines": { + "node": ">= 6.3.0" + } + }, + "node_modules/source-map": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.4.tgz", + "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">= 8" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/source-map-support/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/spdy": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", + "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", + "license": "MIT", + "dependencies": { + "debug": "^4.1.0", + "handle-thing": "^2.0.0", + "http-deceiver": "^1.2.7", + "select-hose": "^2.0.0", + "spdy-transport": "^3.0.0" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/spdy-transport": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz", + "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==", + "license": "MIT", + "dependencies": { + "debug": "^4.1.0", + "detect-node": "^2.0.4", + "hpack.js": "^2.1.6", + "obuf": "^1.1.2", + "readable-stream": "^3.0.6", + "wbuf": "^1.7.3" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "license": "BSD-3-Clause" + }, + "node_modules/srcset": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/srcset/-/srcset-4.0.0.tgz", + "integrity": "sha512-wvLeHgcVHKO8Sc/H/5lkGreJQVeYMm9rlmt8PuR1xE31rIuXhuzznUUqAt8MqLhB3MqJdFzlNAfpcWnxiFUcPw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/std-env": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.9.0.tgz", + "integrity": "sha512-UGvjygr6F6tpH7o2qyqR6QYpwraIjKSdtzyBdyytFOHmPZY917kwdwLG0RbOjWOnKmnm3PeHjaoLLMie7kPLQw==", + "license": "MIT" + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width/node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "license": "MIT", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/stringify-object": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", + "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==", + "license": "BSD-2-Clause", + "dependencies": { + "get-own-enumerable-property-symbols": "^3.0.0", + "is-obj": "^1.0.1", + "is-regexp": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom-string": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", + "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/style-to-js": { + "version": "1.1.17", + "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.17.tgz", + "integrity": "sha512-xQcBGDxJb6jjFCTzvQtfiPn6YvvP2O8U1MDIPNfJQlWMYfktPy+iGsHE7cssjs7y84d9fQaK4UF3RIJaAHSoYA==", + "license": "MIT", + "dependencies": { + "style-to-object": "1.0.9" + } + }, + "node_modules/style-to-object": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.9.tgz", + "integrity": "sha512-G4qppLgKu/k6FwRpHiGiKPaPTFcG3g4wNVX/Qsfu+RqQM30E7Tyu/TEgxcL9PNLF5pdRLwQdE3YKKf+KF2Dzlw==", + "license": "MIT", + "dependencies": { + "inline-style-parser": "0.2.4" + } + }, + "node_modules/stylehacks": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-6.1.1.tgz", + "integrity": "sha512-gSTTEQ670cJNoaeIp9KX6lZmm8LJ3jPB5yJmX8Zq/wQxOsAFXV3qjWzHas3YYk1qesuVIyYWWUpZ0vSE/dTSGg==", + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.0", + "postcss-selector-parser": "^6.0.16" + }, + "engines": { + "node": "^14 || ^16 || >=18.0" + }, + "peerDependencies": { + "postcss": "^8.4.31" + } + }, + "node_modules/stylis": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.6.tgz", + "integrity": "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==", + "license": "MIT" + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/svg-parser": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz", + "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==", + "license": "MIT" + }, + "node_modules/svgo": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/svgo/-/svgo-3.3.2.tgz", + "integrity": "sha512-OoohrmuUlBs8B8o6MB2Aevn+pRIH9zDALSR+6hhqVfa6fRwG/Qw9VUMSMW9VNg2CFc/MTIfabtdOVl9ODIJjpw==", + "license": "MIT", + "dependencies": { + "@trysound/sax": "0.2.0", + "commander": "^7.2.0", + "css-select": "^5.1.0", + "css-tree": "^2.3.1", + "css-what": "^6.1.0", + "csso": "^5.0.5", + "picocolors": "^1.0.0" + }, + "bin": { + "svgo": "bin/svgo" + }, + "engines": { + "node": ">=14.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/svgo" + } + }, + "node_modules/svgo/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/tapable": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.2.tgz", + "integrity": "sha512-Re10+NauLTMCudc7T5WLFLAwDhQ0JWdrMK+9B2M8zR5hRExKmsRDCBA7/aV/pNJFltmBFO5BAMlQFi/vq3nKOg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/terser": { + "version": "5.43.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.43.1.tgz", + "integrity": "sha512-+6erLbBm0+LROX2sPXlUYx/ux5PyE9K/a92Wrt6oA+WDAoFTdpHE5tCYCI5PNzq2y8df4rA+QgHLJuR4jNymsg==", + "license": "BSD-2-Clause", + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.14.0", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser-webpack-plugin": { + "version": "5.3.14", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.14.tgz", + "integrity": "sha512-vkZjpUjb6OMS7dhV+tILUW6BhpDR7P2L/aQSAv+Uwk+m8KATX9EccViHTJR2qDtACKPIYndLGCyl3FMo+r2LMw==", + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.25", + "jest-worker": "^27.4.5", + "schema-utils": "^4.3.0", + "serialize-javascript": "^6.0.2", + "terser": "^5.31.1" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "uglify-js": { + "optional": true + } + } + }, + "node_modules/terser-webpack-plugin/node_modules/jest-worker": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "license": "MIT", + "dependencies": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/terser-webpack-plugin/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/terser/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "license": "MIT" + }, + "node_modules/thunky": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", + "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==", + "license": "MIT" + }, + "node_modules/tiny-invariant": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", + "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", + "license": "MIT" + }, + "node_modules/tiny-warning": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", + "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==", + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.1.tgz", + "integrity": "sha512-5uC6DDlmeqiOwCPmK9jMSdOuZTh8bU39Ys6yidB+UTt5hfZUPGAypSgFRiEp+jbi9qH40BLDvy85jIU88wKSqw==", + "license": "MIT" + }, + "node_modules/tinypool": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", + "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", + "license": "MIT", + "engines": { + "node": "^18.0.0 || >=20.0.0" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/totalist": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.1.tgz", + "integrity": "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/ts-dedent": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/ts-dedent/-/ts-dedent-2.2.0.tgz", + "integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==", + "license": "MIT", + "engines": { + "node": ">=6.10" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/type-fest": { + "version": "2.19.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", + "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "license": "MIT", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/type-is/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/typedarray-to-buffer": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", + "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", + "license": "MIT", + "dependencies": { + "is-typedarray": "^1.0.0" + } + }, + "node_modules/typescript": { + "version": "5.9.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.2.tgz", + "integrity": "sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A==", + "devOptional": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/ufo": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.6.1.tgz", + "integrity": "sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA==", + "license": "MIT" + }, + "node_modules/undici": { + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/undici/-/undici-7.11.0.tgz", + "integrity": "sha512-heTSIac3iLhsmZhUCjyS3JQEkZELateufzZuBaVM5RHXdSBMb1LPMQf5x+FH7qjsZYDP0ttAc3nnVpUB+wYbOg==", + "license": "MIT", + "engines": { + "node": ">=20.18.1" + } + }, + "node_modules/undici-types": { + "version": "7.8.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.8.0.tgz", + "integrity": "sha512-9UJ2xGDvQ43tYyVMpuHlsgApydB8ZKfVYTsLDhXkFL/6gfkp+U8xTGdh8pMJv1SpZna0zxG1DwsKZsreLbXBxw==", + "license": "MIT" + }, + "node_modules/unicode-canonical-property-names-ecmascript": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.1.tgz", + "integrity": "sha512-dA8WbNeb2a6oQzAQ55YlT5vQAWGV9WXOsi3SskE3bcCdM0P4SDd+24zS/OCacdRq5BkdsRj9q3Pg6YyQoxIGqg==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-emoji-modifier-base": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unicode-emoji-modifier-base/-/unicode-emoji-modifier-base-1.0.0.tgz", + "integrity": "sha512-yLSH4py7oFH3oG/9K+XWrz1pSi3dfUrWEnInbxMfArOfc1+33BlGPQtLsOYwvdMy11AwUBetYuaRxSPqgkq+8g==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "license": "MIT", + "dependencies": { + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-value-ecmascript": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.2.0.tgz", + "integrity": "sha512-4IehN3V/+kkr5YeSSDDQG8QLqO26XpL2XP3GQtqwlT/QYSECAwFztxVHjlbh0+gjJ3XmNLS0zDsbgs9jWKExLg==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-property-aliases-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", + "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unique-string": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-3.0.0.tgz", + "integrity": "sha512-VGXBUVwxKMBUznyffQweQABPRRW1vHZAbadFZud4pLFAqRGvv/96vafgjWFqzourzr8YonlQiPgH0YCJfawoGQ==", + "license": "MIT", + "dependencies": { + "crypto-random-string": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", + "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position-from-estree": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position-from-estree/-/unist-util-position-from-estree-2.0.0.tgz", + "integrity": "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", + "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", + "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz", + "integrity": "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/update-notifier": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-6.0.2.tgz", + "integrity": "sha512-EDxhTEVPZZRLWYcJ4ZXjGFN0oP7qYvbXWzEgRm/Yql4dHX5wDbvh89YHP6PK1lzZJYrMtXUuZZz8XGK+U6U1og==", + "license": "BSD-2-Clause", + "dependencies": { + "boxen": "^7.0.0", + "chalk": "^5.0.1", + "configstore": "^6.0.0", + "has-yarn": "^3.0.0", + "import-lazy": "^4.0.0", + "is-ci": "^3.0.1", + "is-installed-globally": "^0.4.0", + "is-npm": "^6.0.0", + "is-yarn-global": "^0.4.0", + "latest-version": "^7.0.0", + "pupa": "^3.1.0", + "semver": "^7.3.7", + "semver-diff": "^4.0.0", + "xdg-basedir": "^5.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/yeoman/update-notifier?sponsor=1" + } + }, + "node_modules/update-notifier/node_modules/boxen": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-7.1.1.tgz", + "integrity": "sha512-2hCgjEmP8YLWQ130n2FerGv7rYpfBmnmp9Uy2Le1vge6X3gZIfSmEzP5QTDElFxcvVcXlEn8Aq6MU/PZygIOog==", + "license": "MIT", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^7.0.1", + "chalk": "^5.2.0", + "cli-boxes": "^3.0.0", + "string-width": "^5.1.2", + "type-fest": "^2.13.0", + "widest-line": "^4.0.1", + "wrap-ansi": "^8.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/camelcase": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-7.0.1.tgz", + "integrity": "sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==", + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-notifier/node_modules/chalk": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.4.1.tgz", + "integrity": "sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==", + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/url-loader": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz", + "integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==", + "license": "MIT", + "dependencies": { + "loader-utils": "^2.0.0", + "mime-types": "^2.1.27", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "file-loader": "*", + "webpack": "^4.0.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "file-loader": { + "optional": true + } + } + }, + "node_modules/url-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/url-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "license": "MIT", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/url-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "license": "MIT" + }, + "node_modules/url-loader/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/url-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", + "license": "MIT", + "dependencies": { + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, + "node_modules/utila": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz", + "integrity": "sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA==", + "license": "MIT" + }, + "node_modules/utility-types": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.11.0.tgz", + "integrity": "sha512-6Z7Ma2aVEWisaL6TvBCy7P8rm2LQoPv6dJ7ecIaIixHcwfbJ0x7mWdbcwlIM5IGQxPZSFYeqRCqlOOeKoJYMkw==", + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/value-equal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", + "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==", + "license": "MIT" + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-location": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.3.tgz", + "integrity": "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.2.tgz", + "integrity": "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw==", + "license": "MIT", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vscode-jsonrpc": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/vscode-jsonrpc/-/vscode-jsonrpc-8.2.0.tgz", + "integrity": "sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA==", + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/vscode-languageserver": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/vscode-languageserver/-/vscode-languageserver-9.0.1.tgz", + "integrity": "sha512-woByF3PDpkHFUreUa7Hos7+pUWdeWMXRd26+ZX2A8cFx6v/JPTtd4/uN0/jB6XQHYaOlHbio03NTHCqrgG5n7g==", + "license": "MIT", + "dependencies": { + "vscode-languageserver-protocol": "3.17.5" + }, + "bin": { + "installServerIntoExtension": "bin/installServerIntoExtension" + } + }, + "node_modules/vscode-languageserver-protocol": { + "version": "3.17.5", + "resolved": "https://registry.npmjs.org/vscode-languageserver-protocol/-/vscode-languageserver-protocol-3.17.5.tgz", + "integrity": "sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg==", + "license": "MIT", + "dependencies": { + "vscode-jsonrpc": "8.2.0", + "vscode-languageserver-types": "3.17.5" + } + }, + "node_modules/vscode-languageserver-textdocument": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/vscode-languageserver-textdocument/-/vscode-languageserver-textdocument-1.0.12.tgz", + "integrity": "sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA==", + "license": "MIT" + }, + "node_modules/vscode-languageserver-types": { + "version": "3.17.5", + "resolved": "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-3.17.5.tgz", + "integrity": "sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg==", + "license": "MIT" + }, + "node_modules/vscode-uri": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-3.0.8.tgz", + "integrity": "sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw==", + "license": "MIT" + }, + "node_modules/watchpack": { + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.4.tgz", + "integrity": "sha512-c5EGNOiyxxV5qmTtAB7rbiXxi1ooX1pQKMLX/MIabJjRA0SJBQOjKF+KSVfHkr9U1cADPon0mRiVe/riyaiDUA==", + "license": "MIT", + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/wbuf": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz", + "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==", + "license": "MIT", + "dependencies": { + "minimalistic-assert": "^1.0.0" + } + }, + "node_modules/web-namespaces": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", + "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/webpack": { + "version": "5.99.9", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.99.9.tgz", + "integrity": "sha512-brOPwM3JnmOa+7kd3NsmOUOwbDAj8FT9xDsG3IW0MgbN9yZV7Oi/s/+MNQ/EcSMqw7qfoRyXPoeEWT8zLVdVGg==", + "license": "MIT", + "dependencies": { + "@types/eslint-scope": "^3.7.7", + "@types/estree": "^1.0.6", + "@types/json-schema": "^7.0.15", + "@webassemblyjs/ast": "^1.14.1", + "@webassemblyjs/wasm-edit": "^1.14.1", + "@webassemblyjs/wasm-parser": "^1.14.1", + "acorn": "^8.14.0", + "browserslist": "^4.24.0", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.17.1", + "es-module-lexer": "^1.2.1", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.11", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.2.0", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^4.3.2", + "tapable": "^2.1.1", + "terser-webpack-plugin": "^5.3.11", + "watchpack": "^2.4.1", + "webpack-sources": "^3.2.3" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-bundle-analyzer": { + "version": "4.10.2", + "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.10.2.tgz", + "integrity": "sha512-vJptkMm9pk5si4Bv922ZbKLV8UTT4zib4FPgXMhgzUny0bfDDkLXAVQs3ly3fS4/TN9ROFtb0NFrm04UXFE/Vw==", + "license": "MIT", + "dependencies": { + "@discoveryjs/json-ext": "0.5.7", + "acorn": "^8.0.4", + "acorn-walk": "^8.0.0", + "commander": "^7.2.0", + "debounce": "^1.2.1", + "escape-string-regexp": "^4.0.0", + "gzip-size": "^6.0.0", + "html-escaper": "^2.0.2", + "opener": "^1.5.2", + "picocolors": "^1.0.0", + "sirv": "^2.0.3", + "ws": "^7.3.1" + }, + "bin": { + "webpack-bundle-analyzer": "lib/bin/analyzer.js" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/webpack-bundle-analyzer/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/webpack-dev-middleware": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz", + "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==", + "license": "MIT", + "dependencies": { + "colorette": "^2.0.10", + "memfs": "^3.4.3", + "mime-types": "^2.1.31", + "range-parser": "^1.2.1", + "schema-utils": "^4.0.0" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" + } + }, + "node_modules/webpack-dev-middleware/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-middleware/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack-dev-server": { + "version": "4.15.2", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.2.tgz", + "integrity": "sha512-0XavAZbNJ5sDrCbkpWL8mia0o5WPOd2YGtxrEiZkBK9FjLppIUK2TgxK6qGD2P3hUXTJNNPVibrerKcx5WkR1g==", + "license": "MIT", + "dependencies": { + "@types/bonjour": "^3.5.9", + "@types/connect-history-api-fallback": "^1.3.5", + "@types/express": "^4.17.13", + "@types/serve-index": "^1.9.1", + "@types/serve-static": "^1.13.10", + "@types/sockjs": "^0.3.33", + "@types/ws": "^8.5.5", + "ansi-html-community": "^0.0.8", + "bonjour-service": "^1.0.11", + "chokidar": "^3.5.3", + "colorette": "^2.0.10", + "compression": "^1.7.4", + "connect-history-api-fallback": "^2.0.0", + "default-gateway": "^6.0.3", + "express": "^4.17.3", + "graceful-fs": "^4.2.6", + "html-entities": "^2.3.2", + "http-proxy-middleware": "^2.0.3", + "ipaddr.js": "^2.0.1", + "launch-editor": "^2.6.0", + "open": "^8.0.9", + "p-retry": "^4.5.0", + "rimraf": "^3.0.2", + "schema-utils": "^4.0.0", + "selfsigned": "^2.1.1", + "serve-index": "^1.9.1", + "sockjs": "^0.3.24", + "spdy": "^4.0.2", + "webpack-dev-middleware": "^5.3.4", + "ws": "^8.13.0" + }, + "bin": { + "webpack-dev-server": "bin/webpack-dev-server.js" + }, + "engines": { + "node": ">= 12.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.37.0 || ^5.0.0" + }, + "peerDependenciesMeta": { + "webpack": { + "optional": true + }, + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-dev-server/node_modules/ws": { + "version": "8.18.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/webpack-merge": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-6.0.1.tgz", + "integrity": "sha512-hXXvrjtx2PLYx4qruKl+kyRSLc52V+cCvMxRjmKwoA+CBbbF5GfIBtR6kCvl0fYGqTUPKB+1ktVmTHqMOzgCBg==", + "license": "MIT", + "dependencies": { + "clone-deep": "^4.0.1", + "flat": "^5.0.2", + "wildcard": "^2.0.1" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/webpack-sources": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.3.3.tgz", + "integrity": "sha512-yd1RBzSGanHkitROoPFd6qsrxt+oFhg/129YzheDGqeustzX0vTZJZsSsQjVQC4yzBQ56K55XU8gaNCtIzOnTg==", + "license": "MIT", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/webpack/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpack/node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/webpackbar": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-6.0.1.tgz", + "integrity": "sha512-TnErZpmuKdwWBdMoexjio3KKX6ZtoKHRVvLIU0A47R0VVBDtx3ZyOJDktgYixhoJokZTYTt1Z37OkO9pnGJa9Q==", + "license": "MIT", + "dependencies": { + "ansi-escapes": "^4.3.2", + "chalk": "^4.1.2", + "consola": "^3.2.3", + "figures": "^3.2.0", + "markdown-table": "^2.0.0", + "pretty-time": "^1.1.0", + "std-env": "^3.7.0", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=14.21.3" + }, + "peerDependencies": { + "webpack": "3 || 4 || 5" + } + }, + "node_modules/webpackbar/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/webpackbar/node_modules/markdown-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-2.0.0.tgz", + "integrity": "sha512-Ezda85ToJUBhM6WGaG6veasyym+Tbs3cMAw/ZhOPqXiYsr0jgocBV3j3nx+4lk47plLlIqjwuTm/ywVI+zjJ/A==", + "license": "MIT", + "dependencies": { + "repeat-string": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/webpackbar/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/webpackbar/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/websocket-driver": { + "version": "0.7.4", + "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", + "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==", + "license": "Apache-2.0", + "dependencies": { + "http-parser-js": ">=0.5.1", + "safe-buffer": ">=5.1.0", + "websocket-extensions": ">=0.1.1" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/websocket-extensions": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", + "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==", + "license": "Apache-2.0", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/whatwg-encoding": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz", + "integrity": "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==", + "license": "MIT", + "dependencies": { + "iconv-lite": "0.6.3" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/whatwg-encoding/node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/whatwg-mimetype": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz", + "integrity": "sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==", + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/widest-line": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", + "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", + "license": "MIT", + "dependencies": { + "string-width": "^5.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/wildcard": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz", + "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==", + "license": "MIT" + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "license": "ISC" + }, + "node_modules/write-file-atomic": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4", + "is-typedarray": "^1.0.0", + "signal-exit": "^3.0.2", + "typedarray-to-buffer": "^3.1.5" + } + }, + "node_modules/ws": { + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", + "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", + "license": "MIT", + "engines": { + "node": ">=8.3.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": "^5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xdg-basedir": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-5.1.0.tgz", + "integrity": "sha512-GCPAHLvrIH13+c0SuacwvRYj2SxJXQ4kaVTT5xgL3kPrz56XxkF21IGhjSE1+W0aw7gpBWRGXLCPnPby6lSpmQ==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/xml-js": { + "version": "1.6.11", + "resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz", + "integrity": "sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==", + "license": "MIT", + "dependencies": { + "sax": "^1.2.4" + }, + "bin": { + "xml-js": "bin/cli.js" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "license": "ISC" + }, + "node_modules/yocto-queue": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.2.1.tgz", + "integrity": "sha512-AyeEbWOu/TAXdxlV9wmGcR0+yh2j3vYPGOECcIj2S7MkrLyC7ne+oye2BKTItt0ii2PHk4cDy+95+LshzbXnGg==", + "license": "MIT", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/site/package.json b/site/package.json new file mode 100644 index 00000000..6472c80a --- /dev/null +++ b/site/package.json @@ -0,0 +1,50 @@ +{ + "name": "site", + "version": "0.0.0", + "private": true, + "scripts": { + "docusaurus": "docusaurus", + "start": "docusaurus start", + "build": "docusaurus build", + "version": "node scripts/cut-version.js", + "swizzle": "docusaurus swizzle", + "deploy": "docusaurus deploy", + "clear": "docusaurus clear", + "serve": "docusaurus serve", + "typecheck": "tsc" + }, + "dependencies": { + "@docusaurus/core": "^3.8.1", + "@docusaurus/plugin-client-redirects": "^3.8.1", + "@docusaurus/preset-classic": "^3.8.1", + "@docusaurus/remark-plugin-npm2yarn": "^3.8.1", + "@docusaurus/theme-mermaid": "^3.8.1", + "@easyops-cn/docusaurus-search-local": "^0.52.1", + "@mdx-js/react": "^3.0.0", + "clsx": "^2.0.0", + "prism-react-renderer": "^2.3.0", + "react": "^19.1.1", + "react-dom": "^19.1.1" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "^3.8.1", + "@docusaurus/tsconfig": "^3.8.1", + "@docusaurus/types": "^3.8.1", + "typescript": "^5.9.2" + }, + "browserslist": { + "production": [ + ">0.5%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 3 chrome version", + "last 3 firefox version", + "last 5 safari version" + ] + }, + "engines": { + "node": ">=18.0" + } +} diff --git a/site/redirects.ts b/site/redirects.ts new file mode 100644 index 00000000..5dac947a --- /dev/null +++ b/site/redirects.ts @@ -0,0 +1,157 @@ +// Redirect configuration for Docusaurus client-side redirects +// Based on GitBook .gitbook.yaml redirects + +type RedirectRule = { + to: string; + from: string | string[]; +}; + +// Function to generate redirects with the appropriate base path +export function generateRedirects(basePath: string): RedirectRule[] { + // Helper to add base path to a route + const withBase = (path: string) => `${basePath}${path}`; + + return [ + // Operations API + { from: withBase('/developers/operations-api/utilities'), to: withBase('/developers/operations-api/system-operations') }, + + // Installation paths + { from: withBase('/install-harperdb'), to: withBase('/deployments/install-harper/') }, + { from: withBase('/install-harperdb/linux'), to: withBase('/deployments/install-harper/linux') }, + { from: withBase('/install-harperdb/other'), to: withBase('/deployments/install-harper/') }, + { from: withBase('/install-harperdb/docker'), to: withBase('/deployments/install-harper/') }, + { from: withBase('/install-harperdb/mac'), to: withBase('/deployments/install-harper/') }, + { from: withBase('/install-harperdb/windows'), to: withBase('/deployments/install-harper/') }, + { from: withBase('/install-harperdb/linux-quickstart'), to: withBase('/deployments/install-harper/linux') }, + { from: withBase('/install-harperdb/offline'), to: withBase('/deployments/install-harper/') }, + { from: withBase('/install-harperdb/node-ver-requirement'), to: withBase('/deployments/install-harper/') }, + { from: withBase('/deployments/install-harperdb'), to: withBase('/deployments/install-harper/') }, + { from: withBase('/deployments/install-harperdb/linux'), to: withBase('/deployments/install-harper/linux') }, + + // Harper Studio (old HarperDB Studio paths) + { from: withBase('/harperdb-studio'), to: withBase('/administration/harper-studio/') }, + { from: withBase('/harperdb-studio/create-account'), to: withBase('/administration/harper-studio/create-account') }, + { from: withBase('/harperdb-studio/login-password-reset'), to: withBase('/administration/harper-studio/login-password-reset') }, + { from: [withBase('/harperdb-studio/resources'), withBase('/administration/harper-studio/resources')], to: withBase('/administration/harper-studio/') }, + { from: withBase('/harperdb-studio/organizations'), to: withBase('/administration/harper-studio/organizations') }, + { from: withBase('/harperdb-studio/instances'), to: withBase('/administration/harper-studio/instances') }, + { from: withBase('/harperdb-studio/query-instance-data'), to: withBase('/administration/harper-studio/query-instance-data') }, + { from: withBase('/harperdb-studio/manage-schemas-browse-data'), to: withBase('/administration/harper-studio/manage-databases-browse-data') }, + { from: [withBase('/harperdb-studio/manage-charts'), withBase('/administration/harper-studio/manage-charts')], to: withBase('/administration/harper-studio/query-instance-data') }, + { from: withBase('/harperdb-studio/manage-clustering'), to: withBase('/administration/harper-studio/manage-replication') }, + { from: withBase('/harperdb-studio/manage-instance-users'), to: withBase('/administration/harper-studio/manage-instance-users') }, + { from: withBase('/harperdb-studio/manage-instance-roles'), to: withBase('/administration/harper-studio/manage-instance-users') }, + { from: withBase('/harperdb-studio/manage-functions'), to: withBase('/administration/harper-studio/manage-applications') }, + { from: withBase('/harperdb-studio/instance-metrics'), to: withBase('/administration/harper-studio/instance-metrics') }, + { from: withBase('/harperdb-studio/instance-configuration'), to: withBase('/administration/harper-studio/instance-configuration') }, + { from: withBase('/harperdb-studio/enable-mixed-content'), to: withBase('/administration/harper-studio/enable-mixed-content') }, + + // Harper Cloud (old HarperDB Cloud paths) + { from: withBase('/harperdb-cloud'), to: withBase('/deployments/harper-cloud/') }, + + // Security + { from: withBase('/security'), to: withBase('/developers/security/') }, + { from: withBase('/security/jwt-auth'), to: withBase('/developers/security/jwt-auth') }, + { from: withBase('/security/basic-auth'), to: withBase('/developers/security/basic-auth') }, + { from: withBase('/security/configuration'), to: withBase('/developers/security/configuration') }, + { from: withBase('/security/users-and-roles'), to: withBase('/developers/security/users-and-roles') }, + + // Custom Functions → Applications + { from: withBase('/custom-functions'), to: withBase('/developers/applications/') }, + { from: withBase('/custom-functions/define-routes'), to: withBase('/developers/applications/define-routes') }, + { from: [withBase('/custom-functions/using-npm-git'), withBase('/developers/custom-functions/create-project')], to: withBase('/developers/applications/') }, + { from: withBase('/custom-functions/custom-functions-operations'), to: withBase('/developers/operations-api/') }, + { from: withBase('/custom-functions/debugging-custom-function'), to: withBase('/developers/applications/debugging') }, + { from: withBase('/custom-functions/example-projects'), to: withBase('/developers/applications/example-projects') }, + + // Add-ons and SDKs + { from: withBase('/add-ons-and-sdks'), to: withBase('/developers/applications/') }, + { from: withBase('/add-ons-and-sdks/google-data-studio'), to: withBase('/developers/miscellaneous/google-data-studio') }, + + // SQL Guide + { from: withBase('/sql-guide'), to: withBase('/developers/sql-guide/') }, + + // CLI + { from: withBase('/harperdb-cli'), to: withBase('/deployments/harper-cli') }, + { from: withBase('/deployments/harperdb-cli'), to: withBase('/deployments/harper-cli') }, + + // Top-level paths + { from: withBase('/configuration'), to: withBase('/deployments/configuration') }, + { from: withBase('/logging'), to: withBase('/administration/logging/standard-logging') }, + { from: withBase('/transaction-logging'), to: withBase('/administration/logging/transaction-logging') }, + { from: withBase('/audit-logging'), to: withBase('/administration/logging/audit-logging') }, + { from: withBase('/jobs'), to: withBase('/administration/jobs') }, + { from: withBase('/upgrade-hdb-instance'), to: withBase('/deployments/upgrade-hdb-instance') }, + { from: withBase('/reference'), to: withBase('/technical-details/reference/') }, + { from: withBase('/operations-api'), to: withBase('/developers/operations-api/') }, + { from: withBase('/rest'), to: withBase('/developers/rest') }, + { from: withBase('/api'), to: withBase('/developers/operations-api/') }, + + // File rename redirect + { from: withBase('/administration/logging/logging'), to: withBase('/administration/logging/standard-logging') }, + ]; +} + +// For backward compatibility, export a default set with empty base path +export const redirects = generateRedirects(''); + +// Function to create wildcard redirects for moved sections +// This handles dynamic redirects for paths not explicitly defined in the main redirect list +export function createRedirects(existingPath: string, basePath: string = ''): string[] | undefined { + const redirects: string[] = []; + + // Only create wildcard redirects for paths that aren't already explicitly defined + // Check if this is a path we handle with wildcard redirects + + // Harper Studio - only for subpaths not already defined + if (existingPath.startsWith(`${basePath}/administration/harper-studio/`)) { + const subpath = existingPath.replace(`${basePath}/administration/harper-studio/`, ''); + // Skip paths that are already explicitly redirected + const explicitStudioPaths = [ + 'create-account', 'login-password-reset', 'organizations', 'instances', + 'query-instance-data', 'manage-databases-browse-data', 'manage-replication', + 'manage-instance-users', 'manage-applications', 'instance-metrics', + 'instance-configuration', 'enable-mixed-content' + ]; + if (subpath && !explicitStudioPaths.includes(subpath)) { + redirects.push(`${basePath}/administration/harperdb-studio/${subpath}`); + } + } + + // Harper Cloud - only for subpaths not already defined + if (existingPath.startsWith(`${basePath}/deployments/harper-cloud/`)) { + const subpath = existingPath.replace(`${basePath}/deployments/harper-cloud/`, ''); + // The main harper-cloud redirect is explicit, only handle other subpaths + if (subpath) { + redirects.push(`${basePath}/deployments/harperdb-cloud/${subpath}`); + } + } + + // Install Harper - only for subpaths not already defined + if (existingPath.startsWith(`${basePath}/deployments/install-harper/`)) { + const subpath = existingPath.replace(`${basePath}/deployments/install-harper/`, ''); + // Skip 'linux' as it's explicitly defined + if (subpath && subpath !== 'linux') { + redirects.push(`${basePath}/deployments/install-harperdb/${subpath}`); + } + } + + // Custom Functions - handle subpaths + if (existingPath.startsWith(`${basePath}/developers/custom-functions/`)) { + const subpath = existingPath.replace(`${basePath}/developers/custom-functions/`, ''); + // Skip paths that are explicitly defined + const explicitCustomPaths = ['define-routes', 'debugging-custom-function', 'example-projects']; + if (subpath && !explicitCustomPaths.includes(subpath)) { + redirects.push(`${basePath}/custom-functions/${subpath}`); + } + } + + // Don't create wildcard redirects for these as they're all explicitly defined: + // - /developers/security/* (all subpaths are explicit) + // - /deployments/harper-cli (explicit) + // - /developers/sql-guide/* (has explicit redirect) + // - /developers/operations-api/* (has explicit redirects) + // - /technical-details/reference/* (has explicit redirect) + + return redirects.length > 0 ? redirects : undefined; +} \ No newline at end of file diff --git a/site/scripts/cut-version.js b/site/scripts/cut-version.js new file mode 100644 index 00000000..b736bc43 --- /dev/null +++ b/site/scripts/cut-version.js @@ -0,0 +1,117 @@ +#!/usr/bin/env node + +/** + * Script to cut a new version from the repository's /docs directory + * This is used for creating new versions (4.7+) after the GitBook migration + * + * Usage: npm run version + * Example: npm run version 4.7 + */ + +const fs = require('node:fs'); +const path = require('node:path'); +const { execSync } = require('node:child_process'); + +const SCRIPT_DIR = __dirname; +const SITE_DIR = path.dirname(SCRIPT_DIR); +const REPO_ROOT = path.dirname(SITE_DIR); +const REPO_DOCS = path.join(REPO_ROOT, 'docs'); +const SITE_DOCS = path.join(SITE_DIR, 'docs'); + +function copyDirectory(src, dest) { + // Create destination directory + fs.mkdirSync(dest, { recursive: true }); + + // Read all items in source directory + const items = fs.readdirSync(src, { withFileTypes: true }); + + for (const item of items) { + const srcPath = path.join(src, item.name); + const destPath = path.join(dest, item.name); + + if (item.isDirectory()) { + // Recursively copy subdirectories + copyDirectory(srcPath, destPath); + } else { + // Copy file + fs.copyFileSync(srcPath, destPath); + } + } +} + +function removeDirectory(dir) { + if (fs.existsSync(dir)) { + fs.rmSync(dir, { recursive: true, force: true }); + } +} + +function main() { + const version = process.argv[2]; + + if (!version) { + console.error('Usage: npm run version '); + console.error('Example: npm run version 4.7'); + process.exit(1); + } + + // Validate version format + if (!/^\d+\.\d+$/.test(version)) { + console.error(`Error: Invalid version format "${version}". Expected format: X.Y (e.g., 4.7)`); + process.exit(1); + } + + console.log(`\nCutting version ${version} from repository docs...`); + + // Check if repo docs exist + if (!fs.existsSync(REPO_DOCS)) { + console.error(`Error: Repository docs not found at ${REPO_DOCS}`); + console.error('After migration, the repository /docs directory should contain vNext documentation.'); + process.exit(1); + } + + // Remove existing site/docs if it exists (it's just a build-time copy) + if (fs.existsSync(SITE_DOCS)) { + console.log('Removing existing site/docs (build-time copy)...'); + removeDirectory(SITE_DOCS); + } + + try { + // Copy repo docs to site docs + console.log('Copying repository docs to site/docs...'); + copyDirectory(REPO_DOCS, SITE_DOCS); + + // Run Docusaurus version command + console.log(`\nRunning Docusaurus version command for ${version}...`); + execSync(`npm run docusaurus docs:version ${version}`, { + cwd: SITE_DIR, + stdio: 'inherit' + }); + + console.log(`\n✅ Successfully created version ${version}`); + console.log(` - Versioned docs created at: versioned_docs/version-${version}/`); + console.log(` - Version added to versions.json`); + + // Clean up - remove the temporary site/docs (it's in .gitignore anyway) + console.log('\nCleaning up temporary site/docs...'); + removeDirectory(SITE_DOCS); + + console.log('\n🎉 Version creation complete!'); + console.log('\nNext steps:'); + console.log('1. Create a PR with the new versioned docs and updated versions.json'); + console.log('2. Site will deploy automatically when PR is merged'); + console.log(`\nNote: Version ${version} is now the latest and will be synced to site/docs during build`); + + } catch (error) { + console.error('\n❌ Error creating version:', error.message || error); + + // Clean up on error + if (fs.existsSync(SITE_DOCS)) { + console.log('Cleaning up temporary site/docs...'); + removeDirectory(SITE_DOCS); + } + + process.exit(1); + } +} + +main(); \ No newline at end of file diff --git a/site/sidebars.ts b/site/sidebars.ts new file mode 100644 index 00000000..80c935b5 --- /dev/null +++ b/site/sidebars.ts @@ -0,0 +1,44 @@ +import type {SidebarsConfig} from '@docusaurus/plugin-content-docs'; + +const sidebars: SidebarsConfig = { + docsSidebar: [ + { + type: 'doc', + id: 'index', + label: 'Harper Docs', + }, + { + type: 'category', + label: 'Getting Started', + items: [{type: 'autogenerated', dirName: 'getting-started'}], + }, + { + type: 'category', + label: 'Developers', + link: { + type: 'generated-index', + title: 'Developer Documentation', + description: 'Comprehensive guides and references for building applications with HarperDB', + keywords: ['developers', 'api', 'applications'] + }, + items: [{type: 'autogenerated', dirName: 'developers'}], + }, + { + type: 'category', + label: 'Administration', + items: [{type: 'autogenerated', dirName: 'administration'}], + }, + { + type: 'category', + label: 'Deployments', + items: [{type: 'autogenerated', dirName: 'deployments'}], + }, + { + type: 'category', + label: 'Technical Details', + items: [{type: 'autogenerated', dirName: 'technical-details'}], + }, + ], +}; + +export default sidebars; \ No newline at end of file diff --git a/site/src/css/custom.css b/site/src/css/custom.css new file mode 100644 index 00000000..6d134567 --- /dev/null +++ b/site/src/css/custom.css @@ -0,0 +1,30 @@ +/** + * Any CSS included here will be global. The classic template + * bundles Infima by default. Infima is a CSS framework designed to + * work well for content-centric websites. + */ + +/* You can override the default Infima variables here. */ +:root { + --ifm-color-primary: #403b8a; + --ifm-color-primary-dark: #37347a; + --ifm-color-primary-darker: #2f2c6a; + --ifm-color-primary-darkest: #27245a; + --ifm-color-primary-light: #4a46a0; + --ifm-color-primary-lighter: #5551b5; + --ifm-color-primary-lightest: #605dcb; + --ifm-code-font-size: 95%; + --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1); +} + +/* For readability concerns, you should choose a lighter palette in dark mode. */ +[data-theme='dark'] { + --ifm-color-primary: #67f3cd; + --ifm-color-primary-dark: #4fd1b2; + --ifm-color-primary-darker: #3fa48e; + --ifm-color-primary-darkest: #2f786a; + --ifm-color-primary-light: #85f6da; + --ifm-color-primary-lighter: #a3f8e4; + --ifm-color-primary-lightest: #c1faee; + --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3); +} diff --git a/site/src/pages/index.module.css b/site/src/pages/index.module.css new file mode 100644 index 00000000..9f71a5da --- /dev/null +++ b/site/src/pages/index.module.css @@ -0,0 +1,23 @@ +/** + * CSS files with the .module.css suffix will be treated as CSS modules + * and scoped locally. + */ + +.heroBanner { + padding: 4rem 0; + text-align: center; + position: relative; + overflow: hidden; +} + +@media screen and (max-width: 996px) { + .heroBanner { + padding: 2rem; + } +} + +.buttons { + display: flex; + align-items: center; + justify-content: center; +} diff --git a/site/src/pages/index.tsx b/site/src/pages/index.tsx new file mode 100644 index 00000000..71c03967 --- /dev/null +++ b/site/src/pages/index.tsx @@ -0,0 +1,12 @@ +import React from 'react'; +import { Redirect } from '@docusaurus/router'; +import useDocusaurusContext from '@docusaurus/useDocusaurusContext'; + +export default function Home(): JSX.Element { + const { siteConfig } = useDocusaurusContext(); + // Get the routeBasePath from the docs preset config + const docsPath = siteConfig?.presets?.[0]?.[1]?.docs?.routeBasePath || '/docs'; + + // Redirect to the configured docs path + return ; +} \ No newline at end of file diff --git a/site/static/.nojekyll b/site/static/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/site/static/img/HarperDogLogo.svg b/site/static/img/HarperDogLogo.svg new file mode 100644 index 00000000..78cdc822 --- /dev/null +++ b/site/static/img/HarperDogLogo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/site/static/img/HarperOpenGraph.jpg b/site/static/img/HarperOpenGraph.jpg new file mode 100644 index 00000000..f15cc15a Binary files /dev/null and b/site/static/img/HarperOpenGraph.jpg differ diff --git a/site/static/img/HarperPrimaryBlk.svg b/site/static/img/HarperPrimaryBlk.svg new file mode 100644 index 00000000..31e3d90e --- /dev/null +++ b/site/static/img/HarperPrimaryBlk.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/site/static/img/HarperPrimaryWht.svg b/site/static/img/HarperPrimaryWht.svg new file mode 100644 index 00000000..f2bb1f66 --- /dev/null +++ b/site/static/img/HarperPrimaryWht.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/site/static/img/v4.1/ave-age-per-owner-ex.png b/site/static/img/v4.1/ave-age-per-owner-ex.png new file mode 100644 index 00000000..8e39d22c Binary files /dev/null and b/site/static/img/v4.1/ave-age-per-owner-ex.png differ diff --git a/site/static/img/v4.1/clustering/figure1.png b/site/static/img/v4.1/clustering/figure1.png new file mode 100644 index 00000000..00c64580 Binary files /dev/null and b/site/static/img/v4.1/clustering/figure1.png differ diff --git a/site/static/img/v4.1/clustering/figure2.png b/site/static/img/v4.1/clustering/figure2.png new file mode 100644 index 00000000..688a3ba0 Binary files /dev/null and b/site/static/img/v4.1/clustering/figure2.png differ diff --git a/site/static/img/v4.1/clustering/figure3.png b/site/static/img/v4.1/clustering/figure3.png new file mode 100644 index 00000000..b21712e9 Binary files /dev/null and b/site/static/img/v4.1/clustering/figure3.png differ diff --git a/site/static/img/v4.1/clustering/figure4.png b/site/static/img/v4.1/clustering/figure4.png new file mode 100644 index 00000000..f578f632 Binary files /dev/null and b/site/static/img/v4.1/clustering/figure4.png differ diff --git a/site/static/img/v4.1/clustering/figure5.png b/site/static/img/v4.1/clustering/figure5.png new file mode 100644 index 00000000..f19e4de0 Binary files /dev/null and b/site/static/img/v4.1/clustering/figure5.png differ diff --git a/site/static/img/v4.1/clustering/figure6.png b/site/static/img/v4.1/clustering/figure6.png new file mode 100644 index 00000000..eff93613 Binary files /dev/null and b/site/static/img/v4.1/clustering/figure6.png differ diff --git a/site/static/img/v4.1/dogs/alby.webp b/site/static/img/v4.1/dogs/alby.webp new file mode 100644 index 00000000..bf73645a Binary files /dev/null and b/site/static/img/v4.1/dogs/alby.webp differ diff --git a/site/static/img/v4.1/dogs/monkey.webp b/site/static/img/v4.1/dogs/monkey.webp new file mode 100644 index 00000000..0a383707 Binary files /dev/null and b/site/static/img/v4.1/dogs/monkey.webp differ diff --git a/site/static/img/v4.1/dogs/penny.webp b/site/static/img/v4.1/dogs/penny.webp new file mode 100644 index 00000000..7f7dc546 Binary files /dev/null and b/site/static/img/v4.1/dogs/penny.webp differ diff --git a/site/static/img/v4.1/dogs/tucker.png b/site/static/img/v4.1/dogs/tucker.png new file mode 100644 index 00000000..4efd8671 Binary files /dev/null and b/site/static/img/v4.1/dogs/tucker.png differ diff --git a/site/static/img/v4.1/reference/HarperDB-3.0-Storage-Algorithm.png.webp b/site/static/img/v4.1/reference/HarperDB-3.0-Storage-Algorithm.png.webp new file mode 100644 index 00000000..28118e00 Binary files /dev/null and b/site/static/img/v4.1/reference/HarperDB-3.0-Storage-Algorithm.png.webp differ diff --git a/site/static/img/v4.1/reference/dynamic_schema_2_create_table.png.webp b/site/static/img/v4.1/reference/dynamic_schema_2_create_table.png.webp new file mode 100644 index 00000000..b3cdee75 Binary files /dev/null and b/site/static/img/v4.1/reference/dynamic_schema_2_create_table.png.webp differ diff --git a/site/static/img/v4.1/reference/dynamic_schema_3_insert_record.png.webp b/site/static/img/v4.1/reference/dynamic_schema_3_insert_record.png.webp new file mode 100644 index 00000000..4f79c0f4 Binary files /dev/null and b/site/static/img/v4.1/reference/dynamic_schema_3_insert_record.png.webp differ diff --git a/site/static/img/v4.1/reference/dynamic_schema_4_insert_additional_record.png.webp b/site/static/img/v4.1/reference/dynamic_schema_4_insert_additional_record.png.webp new file mode 100644 index 00000000..2caa7e75 Binary files /dev/null and b/site/static/img/v4.1/reference/dynamic_schema_4_insert_additional_record.png.webp differ diff --git a/site/static/img/v4.1/reference/dynamic_schema_5_update_existing_record.png.webp b/site/static/img/v4.1/reference/dynamic_schema_5_update_existing_record.png.webp new file mode 100644 index 00000000..041a71d7 Binary files /dev/null and b/site/static/img/v4.1/reference/dynamic_schema_5_update_existing_record.png.webp differ diff --git a/site/static/img/v4.1/reference/dynamic_schema_6_query_table_with_sql.png.webp b/site/static/img/v4.1/reference/dynamic_schema_6_query_table_with_sql.png.webp new file mode 100644 index 00000000..95dac39c Binary files /dev/null and b/site/static/img/v4.1/reference/dynamic_schema_6_query_table_with_sql.png.webp differ diff --git a/site/static/img/v4.2/ave-age-per-owner-ex.png b/site/static/img/v4.2/ave-age-per-owner-ex.png new file mode 100644 index 00000000..8e39d22c Binary files /dev/null and b/site/static/img/v4.2/ave-age-per-owner-ex.png differ diff --git a/site/static/img/v4.2/clustering/figure1.png b/site/static/img/v4.2/clustering/figure1.png new file mode 100644 index 00000000..00c64580 Binary files /dev/null and b/site/static/img/v4.2/clustering/figure1.png differ diff --git a/site/static/img/v4.2/clustering/figure2.png b/site/static/img/v4.2/clustering/figure2.png new file mode 100644 index 00000000..688a3ba0 Binary files /dev/null and b/site/static/img/v4.2/clustering/figure2.png differ diff --git a/site/static/img/v4.2/clustering/figure3.png b/site/static/img/v4.2/clustering/figure3.png new file mode 100644 index 00000000..b21712e9 Binary files /dev/null and b/site/static/img/v4.2/clustering/figure3.png differ diff --git a/site/static/img/v4.2/clustering/figure4.png b/site/static/img/v4.2/clustering/figure4.png new file mode 100644 index 00000000..f578f632 Binary files /dev/null and b/site/static/img/v4.2/clustering/figure4.png differ diff --git a/site/static/img/v4.2/clustering/figure5.png b/site/static/img/v4.2/clustering/figure5.png new file mode 100644 index 00000000..f19e4de0 Binary files /dev/null and b/site/static/img/v4.2/clustering/figure5.png differ diff --git a/site/static/img/v4.2/clustering/figure6.png b/site/static/img/v4.2/clustering/figure6.png new file mode 100644 index 00000000..eff93613 Binary files /dev/null and b/site/static/img/v4.2/clustering/figure6.png differ diff --git a/site/static/img/v4.2/dogs/alby.webp b/site/static/img/v4.2/dogs/alby.webp new file mode 100644 index 00000000..bf73645a Binary files /dev/null and b/site/static/img/v4.2/dogs/alby.webp differ diff --git a/site/static/img/v4.2/dogs/monkey.webp b/site/static/img/v4.2/dogs/monkey.webp new file mode 100644 index 00000000..0a383707 Binary files /dev/null and b/site/static/img/v4.2/dogs/monkey.webp differ diff --git a/site/static/img/v4.2/dogs/penny.webp b/site/static/img/v4.2/dogs/penny.webp new file mode 100644 index 00000000..7f7dc546 Binary files /dev/null and b/site/static/img/v4.2/dogs/penny.webp differ diff --git a/site/static/img/v4.2/dogs/tucker.png b/site/static/img/v4.2/dogs/tucker.png new file mode 100644 index 00000000..4efd8671 Binary files /dev/null and b/site/static/img/v4.2/dogs/tucker.png differ diff --git a/site/static/img/v4.2/reference/HarperDB-3.0-Storage-Algorithm.png.webp b/site/static/img/v4.2/reference/HarperDB-3.0-Storage-Algorithm.png.webp new file mode 100644 index 00000000..28118e00 Binary files /dev/null and b/site/static/img/v4.2/reference/HarperDB-3.0-Storage-Algorithm.png.webp differ diff --git a/site/static/img/v4.2/reference/dynamic_schema_2_create_table.png.webp b/site/static/img/v4.2/reference/dynamic_schema_2_create_table.png.webp new file mode 100644 index 00000000..b3cdee75 Binary files /dev/null and b/site/static/img/v4.2/reference/dynamic_schema_2_create_table.png.webp differ diff --git a/site/static/img/v4.2/reference/dynamic_schema_3_insert_record.png.webp b/site/static/img/v4.2/reference/dynamic_schema_3_insert_record.png.webp new file mode 100644 index 00000000..4f79c0f4 Binary files /dev/null and b/site/static/img/v4.2/reference/dynamic_schema_3_insert_record.png.webp differ diff --git a/site/static/img/v4.2/reference/dynamic_schema_4_insert_additional_record.png.webp b/site/static/img/v4.2/reference/dynamic_schema_4_insert_additional_record.png.webp new file mode 100644 index 00000000..2caa7e75 Binary files /dev/null and b/site/static/img/v4.2/reference/dynamic_schema_4_insert_additional_record.png.webp differ diff --git a/site/static/img/v4.2/reference/dynamic_schema_5_update_existing_record.png.webp b/site/static/img/v4.2/reference/dynamic_schema_5_update_existing_record.png.webp new file mode 100644 index 00000000..041a71d7 Binary files /dev/null and b/site/static/img/v4.2/reference/dynamic_schema_5_update_existing_record.png.webp differ diff --git a/site/static/img/v4.2/reference/dynamic_schema_6_query_table_with_sql.png.webp b/site/static/img/v4.2/reference/dynamic_schema_6_query_table_with_sql.png.webp new file mode 100644 index 00000000..95dac39c Binary files /dev/null and b/site/static/img/v4.2/reference/dynamic_schema_6_query_table_with_sql.png.webp differ diff --git a/site/static/img/v4.3/ave-age-per-owner-ex.png b/site/static/img/v4.3/ave-age-per-owner-ex.png new file mode 100644 index 00000000..8e39d22c Binary files /dev/null and b/site/static/img/v4.3/ave-age-per-owner-ex.png differ diff --git a/site/static/img/v4.3/clustering/figure1.png b/site/static/img/v4.3/clustering/figure1.png new file mode 100644 index 00000000..00c64580 Binary files /dev/null and b/site/static/img/v4.3/clustering/figure1.png differ diff --git a/site/static/img/v4.3/clustering/figure2.png b/site/static/img/v4.3/clustering/figure2.png new file mode 100644 index 00000000..688a3ba0 Binary files /dev/null and b/site/static/img/v4.3/clustering/figure2.png differ diff --git a/site/static/img/v4.3/clustering/figure3.png b/site/static/img/v4.3/clustering/figure3.png new file mode 100644 index 00000000..b21712e9 Binary files /dev/null and b/site/static/img/v4.3/clustering/figure3.png differ diff --git a/site/static/img/v4.3/clustering/figure4.png b/site/static/img/v4.3/clustering/figure4.png new file mode 100644 index 00000000..f578f632 Binary files /dev/null and b/site/static/img/v4.3/clustering/figure4.png differ diff --git a/site/static/img/v4.3/clustering/figure5.png b/site/static/img/v4.3/clustering/figure5.png new file mode 100644 index 00000000..f19e4de0 Binary files /dev/null and b/site/static/img/v4.3/clustering/figure5.png differ diff --git a/site/static/img/v4.3/clustering/figure6.png b/site/static/img/v4.3/clustering/figure6.png new file mode 100644 index 00000000..eff93613 Binary files /dev/null and b/site/static/img/v4.3/clustering/figure6.png differ diff --git a/site/static/img/v4.3/dogs/alby.webp b/site/static/img/v4.3/dogs/alby.webp new file mode 100644 index 00000000..bf73645a Binary files /dev/null and b/site/static/img/v4.3/dogs/alby.webp differ diff --git a/site/static/img/v4.3/dogs/monkey.webp b/site/static/img/v4.3/dogs/monkey.webp new file mode 100644 index 00000000..0a383707 Binary files /dev/null and b/site/static/img/v4.3/dogs/monkey.webp differ diff --git a/site/static/img/v4.3/dogs/penny.webp b/site/static/img/v4.3/dogs/penny.webp new file mode 100644 index 00000000..7f7dc546 Binary files /dev/null and b/site/static/img/v4.3/dogs/penny.webp differ diff --git a/site/static/img/v4.3/dogs/tucker.png b/site/static/img/v4.3/dogs/tucker.png new file mode 100644 index 00000000..4efd8671 Binary files /dev/null and b/site/static/img/v4.3/dogs/tucker.png differ diff --git a/site/static/img/v4.3/reference/HarperDB-3.0-Storage-Algorithm.png.webp b/site/static/img/v4.3/reference/HarperDB-3.0-Storage-Algorithm.png.webp new file mode 100644 index 00000000..28118e00 Binary files /dev/null and b/site/static/img/v4.3/reference/HarperDB-3.0-Storage-Algorithm.png.webp differ diff --git a/site/static/img/v4.3/reference/dynamic_schema_2_create_table.png.webp b/site/static/img/v4.3/reference/dynamic_schema_2_create_table.png.webp new file mode 100644 index 00000000..b3cdee75 Binary files /dev/null and b/site/static/img/v4.3/reference/dynamic_schema_2_create_table.png.webp differ diff --git a/site/static/img/v4.3/reference/dynamic_schema_3_insert_record.png.webp b/site/static/img/v4.3/reference/dynamic_schema_3_insert_record.png.webp new file mode 100644 index 00000000..4f79c0f4 Binary files /dev/null and b/site/static/img/v4.3/reference/dynamic_schema_3_insert_record.png.webp differ diff --git a/site/static/img/v4.3/reference/dynamic_schema_4_insert_additional_record.png.webp b/site/static/img/v4.3/reference/dynamic_schema_4_insert_additional_record.png.webp new file mode 100644 index 00000000..2caa7e75 Binary files /dev/null and b/site/static/img/v4.3/reference/dynamic_schema_4_insert_additional_record.png.webp differ diff --git a/site/static/img/v4.3/reference/dynamic_schema_5_update_existing_record.png.webp b/site/static/img/v4.3/reference/dynamic_schema_5_update_existing_record.png.webp new file mode 100644 index 00000000..041a71d7 Binary files /dev/null and b/site/static/img/v4.3/reference/dynamic_schema_5_update_existing_record.png.webp differ diff --git a/site/static/img/v4.3/reference/dynamic_schema_6_query_table_with_sql.png.webp b/site/static/img/v4.3/reference/dynamic_schema_6_query_table_with_sql.png.webp new file mode 100644 index 00000000..95dac39c Binary files /dev/null and b/site/static/img/v4.3/reference/dynamic_schema_6_query_table_with_sql.png.webp differ diff --git a/site/static/img/v4.4/ave-age-per-owner-ex.png b/site/static/img/v4.4/ave-age-per-owner-ex.png new file mode 100644 index 00000000..8e39d22c Binary files /dev/null and b/site/static/img/v4.4/ave-age-per-owner-ex.png differ diff --git a/site/static/img/v4.4/clustering/figure1.png b/site/static/img/v4.4/clustering/figure1.png new file mode 100644 index 00000000..00c64580 Binary files /dev/null and b/site/static/img/v4.4/clustering/figure1.png differ diff --git a/site/static/img/v4.4/clustering/figure2.png b/site/static/img/v4.4/clustering/figure2.png new file mode 100644 index 00000000..688a3ba0 Binary files /dev/null and b/site/static/img/v4.4/clustering/figure2.png differ diff --git a/site/static/img/v4.4/clustering/figure3.png b/site/static/img/v4.4/clustering/figure3.png new file mode 100644 index 00000000..b21712e9 Binary files /dev/null and b/site/static/img/v4.4/clustering/figure3.png differ diff --git a/site/static/img/v4.4/clustering/figure4.png b/site/static/img/v4.4/clustering/figure4.png new file mode 100644 index 00000000..f578f632 Binary files /dev/null and b/site/static/img/v4.4/clustering/figure4.png differ diff --git a/site/static/img/v4.4/clustering/figure5.png b/site/static/img/v4.4/clustering/figure5.png new file mode 100644 index 00000000..f19e4de0 Binary files /dev/null and b/site/static/img/v4.4/clustering/figure5.png differ diff --git a/site/static/img/v4.4/clustering/figure6.png b/site/static/img/v4.4/clustering/figure6.png new file mode 100644 index 00000000..eff93613 Binary files /dev/null and b/site/static/img/v4.4/clustering/figure6.png differ diff --git a/site/static/img/v4.4/dogs/alby.webp b/site/static/img/v4.4/dogs/alby.webp new file mode 100644 index 00000000..bf73645a Binary files /dev/null and b/site/static/img/v4.4/dogs/alby.webp differ diff --git a/site/static/img/v4.4/dogs/monkey.webp b/site/static/img/v4.4/dogs/monkey.webp new file mode 100644 index 00000000..0a383707 Binary files /dev/null and b/site/static/img/v4.4/dogs/monkey.webp differ diff --git a/site/static/img/v4.4/dogs/penny.webp b/site/static/img/v4.4/dogs/penny.webp new file mode 100644 index 00000000..7f7dc546 Binary files /dev/null and b/site/static/img/v4.4/dogs/penny.webp differ diff --git a/site/static/img/v4.4/dogs/tucker.png b/site/static/img/v4.4/dogs/tucker.png new file mode 100644 index 00000000..c2b11b99 Binary files /dev/null and b/site/static/img/v4.4/dogs/tucker.png differ diff --git a/docs/getting-started/images/harperstack.jpg b/site/static/img/v4.4/harperstack.jpg similarity index 100% rename from docs/getting-started/images/harperstack.jpg rename to site/static/img/v4.4/harperstack.jpg diff --git a/site/static/img/v4.4/reference/HarperDB-3.0-Storage-Algorithm.png.webp b/site/static/img/v4.4/reference/HarperDB-3.0-Storage-Algorithm.png.webp new file mode 100644 index 00000000..28118e00 Binary files /dev/null and b/site/static/img/v4.4/reference/HarperDB-3.0-Storage-Algorithm.png.webp differ diff --git a/site/static/img/v4.4/reference/dynamic_schema_2_create_table.png.webp b/site/static/img/v4.4/reference/dynamic_schema_2_create_table.png.webp new file mode 100644 index 00000000..b3cdee75 Binary files /dev/null and b/site/static/img/v4.4/reference/dynamic_schema_2_create_table.png.webp differ diff --git a/site/static/img/v4.4/reference/dynamic_schema_3_insert_record.png.webp b/site/static/img/v4.4/reference/dynamic_schema_3_insert_record.png.webp new file mode 100644 index 00000000..4f79c0f4 Binary files /dev/null and b/site/static/img/v4.4/reference/dynamic_schema_3_insert_record.png.webp differ diff --git a/site/static/img/v4.4/reference/dynamic_schema_4_insert_additional_record.png.webp b/site/static/img/v4.4/reference/dynamic_schema_4_insert_additional_record.png.webp new file mode 100644 index 00000000..2caa7e75 Binary files /dev/null and b/site/static/img/v4.4/reference/dynamic_schema_4_insert_additional_record.png.webp differ diff --git a/site/static/img/v4.4/reference/dynamic_schema_5_update_existing_record.png.webp b/site/static/img/v4.4/reference/dynamic_schema_5_update_existing_record.png.webp new file mode 100644 index 00000000..041a71d7 Binary files /dev/null and b/site/static/img/v4.4/reference/dynamic_schema_5_update_existing_record.png.webp differ diff --git a/site/static/img/v4.4/reference/dynamic_schema_6_query_table_with_sql.png.webp b/site/static/img/v4.4/reference/dynamic_schema_6_query_table_with_sql.png.webp new file mode 100644 index 00000000..95dac39c Binary files /dev/null and b/site/static/img/v4.4/reference/dynamic_schema_6_query_table_with_sql.png.webp differ diff --git a/site/static/img/v4.5/ave-age-per-owner-ex.png b/site/static/img/v4.5/ave-age-per-owner-ex.png new file mode 100644 index 00000000..8e39d22c Binary files /dev/null and b/site/static/img/v4.5/ave-age-per-owner-ex.png differ diff --git a/site/static/img/v4.5/clustering/figure1.png b/site/static/img/v4.5/clustering/figure1.png new file mode 100644 index 00000000..00c64580 Binary files /dev/null and b/site/static/img/v4.5/clustering/figure1.png differ diff --git a/site/static/img/v4.5/clustering/figure2.png b/site/static/img/v4.5/clustering/figure2.png new file mode 100644 index 00000000..688a3ba0 Binary files /dev/null and b/site/static/img/v4.5/clustering/figure2.png differ diff --git a/site/static/img/v4.5/clustering/figure3.png b/site/static/img/v4.5/clustering/figure3.png new file mode 100644 index 00000000..b21712e9 Binary files /dev/null and b/site/static/img/v4.5/clustering/figure3.png differ diff --git a/site/static/img/v4.5/clustering/figure4.png b/site/static/img/v4.5/clustering/figure4.png new file mode 100644 index 00000000..f578f632 Binary files /dev/null and b/site/static/img/v4.5/clustering/figure4.png differ diff --git a/site/static/img/v4.5/clustering/figure5.png b/site/static/img/v4.5/clustering/figure5.png new file mode 100644 index 00000000..f19e4de0 Binary files /dev/null and b/site/static/img/v4.5/clustering/figure5.png differ diff --git a/site/static/img/v4.5/clustering/figure6.png b/site/static/img/v4.5/clustering/figure6.png new file mode 100644 index 00000000..eff93613 Binary files /dev/null and b/site/static/img/v4.5/clustering/figure6.png differ diff --git a/site/static/img/v4.5/dogs/alby.webp b/site/static/img/v4.5/dogs/alby.webp new file mode 100644 index 00000000..bf73645a Binary files /dev/null and b/site/static/img/v4.5/dogs/alby.webp differ diff --git a/site/static/img/v4.5/dogs/monkey.webp b/site/static/img/v4.5/dogs/monkey.webp new file mode 100644 index 00000000..0a383707 Binary files /dev/null and b/site/static/img/v4.5/dogs/monkey.webp differ diff --git a/site/static/img/v4.5/dogs/penny.webp b/site/static/img/v4.5/dogs/penny.webp new file mode 100644 index 00000000..7f7dc546 Binary files /dev/null and b/site/static/img/v4.5/dogs/penny.webp differ diff --git a/site/static/img/v4.5/dogs/tucker.png b/site/static/img/v4.5/dogs/tucker.png new file mode 100644 index 00000000..c2b11b99 Binary files /dev/null and b/site/static/img/v4.5/dogs/tucker.png differ diff --git a/site/static/img/v4.5/harperstack.jpg b/site/static/img/v4.5/harperstack.jpg new file mode 100644 index 00000000..4fb155c2 Binary files /dev/null and b/site/static/img/v4.5/harperstack.jpg differ diff --git a/site/static/img/v4.5/reference/HarperDB-3.0-Storage-Algorithm.png.webp b/site/static/img/v4.5/reference/HarperDB-3.0-Storage-Algorithm.png.webp new file mode 100644 index 00000000..28118e00 Binary files /dev/null and b/site/static/img/v4.5/reference/HarperDB-3.0-Storage-Algorithm.png.webp differ diff --git a/site/static/img/v4.5/reference/dynamic_schema_2_create_table.png.webp b/site/static/img/v4.5/reference/dynamic_schema_2_create_table.png.webp new file mode 100644 index 00000000..b3cdee75 Binary files /dev/null and b/site/static/img/v4.5/reference/dynamic_schema_2_create_table.png.webp differ diff --git a/site/static/img/v4.5/reference/dynamic_schema_3_insert_record.png.webp b/site/static/img/v4.5/reference/dynamic_schema_3_insert_record.png.webp new file mode 100644 index 00000000..4f79c0f4 Binary files /dev/null and b/site/static/img/v4.5/reference/dynamic_schema_3_insert_record.png.webp differ diff --git a/site/static/img/v4.5/reference/dynamic_schema_4_insert_additional_record.png.webp b/site/static/img/v4.5/reference/dynamic_schema_4_insert_additional_record.png.webp new file mode 100644 index 00000000..2caa7e75 Binary files /dev/null and b/site/static/img/v4.5/reference/dynamic_schema_4_insert_additional_record.png.webp differ diff --git a/site/static/img/v4.5/reference/dynamic_schema_5_update_existing_record.png.webp b/site/static/img/v4.5/reference/dynamic_schema_5_update_existing_record.png.webp new file mode 100644 index 00000000..041a71d7 Binary files /dev/null and b/site/static/img/v4.5/reference/dynamic_schema_5_update_existing_record.png.webp differ diff --git a/site/static/img/v4.5/reference/dynamic_schema_6_query_table_with_sql.png.webp b/site/static/img/v4.5/reference/dynamic_schema_6_query_table_with_sql.png.webp new file mode 100644 index 00000000..95dac39c Binary files /dev/null and b/site/static/img/v4.5/reference/dynamic_schema_6_query_table_with_sql.png.webp differ diff --git a/site/static/img/v4.6/ave-age-per-owner-ex.png b/site/static/img/v4.6/ave-age-per-owner-ex.png new file mode 100644 index 00000000..8e39d22c Binary files /dev/null and b/site/static/img/v4.6/ave-age-per-owner-ex.png differ diff --git a/site/static/img/v4.6/clustering/figure1.png b/site/static/img/v4.6/clustering/figure1.png new file mode 100644 index 00000000..00c64580 Binary files /dev/null and b/site/static/img/v4.6/clustering/figure1.png differ diff --git a/site/static/img/v4.6/clustering/figure2.png b/site/static/img/v4.6/clustering/figure2.png new file mode 100644 index 00000000..688a3ba0 Binary files /dev/null and b/site/static/img/v4.6/clustering/figure2.png differ diff --git a/site/static/img/v4.6/clustering/figure3.png b/site/static/img/v4.6/clustering/figure3.png new file mode 100644 index 00000000..b21712e9 Binary files /dev/null and b/site/static/img/v4.6/clustering/figure3.png differ diff --git a/site/static/img/v4.6/clustering/figure4.png b/site/static/img/v4.6/clustering/figure4.png new file mode 100644 index 00000000..f578f632 Binary files /dev/null and b/site/static/img/v4.6/clustering/figure4.png differ diff --git a/site/static/img/v4.6/clustering/figure5.png b/site/static/img/v4.6/clustering/figure5.png new file mode 100644 index 00000000..f19e4de0 Binary files /dev/null and b/site/static/img/v4.6/clustering/figure5.png differ diff --git a/site/static/img/v4.6/clustering/figure6.png b/site/static/img/v4.6/clustering/figure6.png new file mode 100644 index 00000000..eff93613 Binary files /dev/null and b/site/static/img/v4.6/clustering/figure6.png differ diff --git a/site/static/img/v4.6/dogs/alby.webp b/site/static/img/v4.6/dogs/alby.webp new file mode 100644 index 00000000..bf73645a Binary files /dev/null and b/site/static/img/v4.6/dogs/alby.webp differ diff --git a/site/static/img/v4.6/dogs/monkey.webp b/site/static/img/v4.6/dogs/monkey.webp new file mode 100644 index 00000000..0a383707 Binary files /dev/null and b/site/static/img/v4.6/dogs/monkey.webp differ diff --git a/site/static/img/v4.6/dogs/penny.webp b/site/static/img/v4.6/dogs/penny.webp new file mode 100644 index 00000000..7f7dc546 Binary files /dev/null and b/site/static/img/v4.6/dogs/penny.webp differ diff --git a/site/static/img/v4.6/dogs/tucker.png b/site/static/img/v4.6/dogs/tucker.png new file mode 100644 index 00000000..c2b11b99 Binary files /dev/null and b/site/static/img/v4.6/dogs/tucker.png differ diff --git a/site/static/img/v4.6/harperstack.jpg b/site/static/img/v4.6/harperstack.jpg new file mode 100644 index 00000000..4fb155c2 Binary files /dev/null and b/site/static/img/v4.6/harperstack.jpg differ diff --git a/site/static/img/v4.6/reference/HarperDB-3.0-Storage-Algorithm.png.webp b/site/static/img/v4.6/reference/HarperDB-3.0-Storage-Algorithm.png.webp new file mode 100644 index 00000000..28118e00 Binary files /dev/null and b/site/static/img/v4.6/reference/HarperDB-3.0-Storage-Algorithm.png.webp differ diff --git a/site/static/img/v4.6/reference/dynamic_schema_2_create_table.png.webp b/site/static/img/v4.6/reference/dynamic_schema_2_create_table.png.webp new file mode 100644 index 00000000..b3cdee75 Binary files /dev/null and b/site/static/img/v4.6/reference/dynamic_schema_2_create_table.png.webp differ diff --git a/site/static/img/v4.6/reference/dynamic_schema_3_insert_record.png.webp b/site/static/img/v4.6/reference/dynamic_schema_3_insert_record.png.webp new file mode 100644 index 00000000..4f79c0f4 Binary files /dev/null and b/site/static/img/v4.6/reference/dynamic_schema_3_insert_record.png.webp differ diff --git a/site/static/img/v4.6/reference/dynamic_schema_4_insert_additional_record.png.webp b/site/static/img/v4.6/reference/dynamic_schema_4_insert_additional_record.png.webp new file mode 100644 index 00000000..2caa7e75 Binary files /dev/null and b/site/static/img/v4.6/reference/dynamic_schema_4_insert_additional_record.png.webp differ diff --git a/site/static/img/v4.6/reference/dynamic_schema_5_update_existing_record.png.webp b/site/static/img/v4.6/reference/dynamic_schema_5_update_existing_record.png.webp new file mode 100644 index 00000000..041a71d7 Binary files /dev/null and b/site/static/img/v4.6/reference/dynamic_schema_5_update_existing_record.png.webp differ diff --git a/site/static/img/v4.6/reference/dynamic_schema_6_query_table_with_sql.png.webp b/site/static/img/v4.6/reference/dynamic_schema_6_query_table_with_sql.png.webp new file mode 100644 index 00000000..95dac39c Binary files /dev/null and b/site/static/img/v4.6/reference/dynamic_schema_6_query_table_with_sql.png.webp differ diff --git a/site/static/js/reo.js b/site/static/js/reo.js new file mode 100644 index 00000000..dd419468 --- /dev/null +++ b/site/static/js/reo.js @@ -0,0 +1 @@ +!function(){var e,t,n;e="6565c3e84c377ad",t=function(){Reo.init({clientID:"6565c3e84c377ad"})},(n=document.createElement("script")).src="https://static.reo.dev/"+e+"/reo.js",n.async=!0,n.onload=t,document.head.appendChild(n)}(); diff --git a/site/tsconfig.json b/site/tsconfig.json new file mode 100644 index 00000000..920d7a65 --- /dev/null +++ b/site/tsconfig.json @@ -0,0 +1,8 @@ +{ + // This file is not used in compilation. It is here just for a nice editor experience. + "extends": "@docusaurus/tsconfig", + "compilerOptions": { + "baseUrl": "." + }, + "exclude": [".docusaurus", "build"] +} diff --git a/site/versioned_docs/version-4.1/add-ons-and-sdks/google-data-studio.md b/site/versioned_docs/version-4.1/add-ons-and-sdks/google-data-studio.md new file mode 100644 index 00000000..6c2c0b36 --- /dev/null +++ b/site/versioned_docs/version-4.1/add-ons-and-sdks/google-data-studio.md @@ -0,0 +1,37 @@ +--- +title: Google Data Studio +--- + +# Google Data Studio + +[Google Data Studio](https:/datastudio.google.com/) is a free collaborative visualization tool which enables users to build configurable charts and tables quickly. The HarperDB Google Data Studio connector seamlessly integrates your HarperDB data with Google Data Studio so you can build custom, real-time data visualizations. + +The HarperDB Google Data Studio Connector is subject to our [Terms of Use](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/) and [Privacy Policy](https:/harperdb.io/legal/privacy-policy/). + +## Requirements + +The HarperDB database must be accessible through the Internet in order for Google Data Studio servers to access it. The database may be hosted by you or via HarperDB Cloud. + +## Get Started + +Get started by selecting the HarperDB connector from the [Google Data Studio Partner Connector Gallery](https:/datastudio.google.com/u/0/datasources/create). + +1. Log in to https:/datastudio.google.com/. +1. Add a new Data Source using the HarperDB connector. The current release version can be added as a data source by following this link: [HarperDB Google Data Studio Connector](https:/datastudio.google.com/datasources/create?connectorId=AKfycbxBKgF8FI5R42WVxO-QCOq7dmUys0HJrUJMkBQRoGnCasY60\_VJeO3BhHJPvdd20-S76g). +1. Authorize the connector to access other servers on your behalf (this allows the connector to contact your database). +1. Enter the Web URL to access your database (preferably with HTTPS), as well as the Basic Auth key you use to access the database. Just include the key, not the word “Basic” at the start of it. +1. Check the box for “Secure Connections Only” if you want to always use HTTPS connections for this data source; entering a Web URL that starts with https:/ will do the same thing, if you prefer. +1. Check the box for “Allow Bad Certs” if your HarperDB instance does not have a valid SSL certificate. HarperDB Cloud always has valid certificates, and so will never require this to be checked. Instances you set up yourself may require this, if you are using self-signed certs. If you are using HarperDB Cloud or another instance you know should always have valid SSL certificates, do not check this box. +1. Choose your Query Type. This determines what information the configuration will ask for after pressing the Next button. + * Table will ask you for a Schema and a Table to return all fields of using `SELECT *`. + * SQL will ask you for the SQL query you’re using to retrieve fields from the database. You may `JOIN` multiple tables together, and use HarperDB specific SQL functions, along with the usual power SQL grants. +1. When all information is entered correctly, press the Connect button in the top right of the new Data Source view to generate the Schema. You may also want to name the data source at this point. If the connector encounters any errors, a dialog box will tell you what went wrong so you can correct the issue. +1. If there are no errors, you now have a data source you can use in your reports! You may change the types of the generated fields in the Schema view if you need to (for instance, changing a Number field to a specific currency), as well as creating new fields from the report view that do calculations on other fields. + +## Considerations + +* Both Postman and the [HarperDB Studio](../harperdb-studio/) app have ways to convert a user:password pair to a Basic Auth token. Use either to create the token for the connector’s user. + * You may sign out of your current user by going to the instances tab in HarperDB Studio, then clicking on the lock icon at the top-right of a given instance’s box. Click the lock again to sign in as any user. The Basic Auth token will be visible in the Authorization header portion of any code created in the Sample Code tab. +* It’s highly recommended that you create a read-only user role in HarperDB Studio, and create a user with that role for your data sources to use. This prevents that authorization token from being used to alter your database, should someone else ever get ahold of it. +* The RecordCount field is intended for use as a metric, for counting how many instances of a given set of values appear in a report’s data set. +* _Do not attempt to create fields with spaces in their names_ for any data sources! Google Data Studio will crash when attempting to retrieve a field with such a name, producing a System Error instead of a useful chart on your reports. Using CamelCase or snake\_case gets around this. diff --git a/site/versioned_docs/version-4.1/add-ons-and-sdks/index.md b/site/versioned_docs/version-4.1/add-ons-and-sdks/index.md new file mode 100644 index 00000000..db3eca55 --- /dev/null +++ b/site/versioned_docs/version-4.1/add-ons-and-sdks/index.md @@ -0,0 +1,7 @@ +--- +title: Add-ons & SDKs +--- + +# Add-ons & SDKs + +All HarperDB Add-Ons and SDKs can be found in the [HarperDB Marketplace](../harperdb-studio/resources#harperdb-marketplace) located in the [HarperDB Studio](../harperdb-studio/resources). \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/audit-logging.md b/site/versioned_docs/version-4.1/audit-logging.md new file mode 100644 index 00000000..86b2d068 --- /dev/null +++ b/site/versioned_docs/version-4.1/audit-logging.md @@ -0,0 +1,130 @@ +--- +title: Audit Logging +--- + +## Audit log + +The audit log uses a standard HarperDB table to track transactions. For each table a user creates, a corresponding table will be created to track transactions against that table. + +Audit log is disabled by default. To use the audit log, set `logging.auditLog` to true in the config file, `harperdb-config.yaml`. Then restart HarperDB for those changes to take place. + +## Audit Log Operations + +### read_audit_log + +The `read_audit_log` operation is flexible, enabling users to query with many parameters. All operations search on a single table. Filter options include timestamps, usernames, and table hash values. Additional examples found in the [HarperDB API documentation](https:/api.harperdb.io/). + +**Search by Timestamp** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "timestamp", + "search_values": [ + 1660585740558 + ] +} +``` + +There are three outcomes using timestamp. +* `"search_values": []` - All records returned for specified table +* `"search_values": [1660585740558]` - All records after provided timestamp +* `"search_values": [1660585740558, 1760585759710]` - Records "from" and "to" provided timestamp + +--- + +**Search by Username** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "username", + "search_values": [ + "admin" + ] +} +``` + +The above example will return all records whose `username` is "admin." + +--- + +**Search by Primary Key** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "hash_value", + "search_values": [ + 318 + ] +} +``` + +The above example will return all records whose primary key (`hash_value`) is 318. +___ + +### read_audit_log Response + +The example that follows provides records of operations performed on a table. One thing of note is that this the `read_audit_log` operation gives you the `original_records`. + +```json +{ + "operation": "update", + "user_name": "HDB_ADMIN", + "timestamp": 1607035559122.277, + "hash_values": [ + 1, + 2 + ], + "records": [ + { + "id": 1, + "breed": "Muttzilla", + "age": 6, + "__updatedtime__": 1607035559122 + }, + { + "id": 2, + "age": 7, + "__updatedtime__": 1607035559121 + } + ], + "original_records": [ + { + "__createdtime__": 1607035556801, + "__updatedtime__": 1607035556801, + "age": 5, + "breed": "Mutt", + "id": 2, + "name": "Penny" + }, + { + "__createdtime__": 1607035556801, + "__updatedtime__": 1607035556801, + "age": 5, + "breed": "Mutt", + "id": 1, + "name": "Harper" + } + ] +} +``` +### delete_audit_logs_before + +Just like with transaction logs, you can clean up your audit logs with the `delete_audit_logs_before` operation. It will delete audit log data according to the given parameters. The example below will delete records older than the timestamp provided. + +```json +{ + "operation": "delete_audit_logs_before", + "schema": "dev", + "table": "cat", + "timestamp": 1598290282817 +} +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/clustering/certificate-management.md b/site/versioned_docs/version-4.1/clustering/certificate-management.md new file mode 100644 index 00000000..3238a2c5 --- /dev/null +++ b/site/versioned_docs/version-4.1/clustering/certificate-management.md @@ -0,0 +1,64 @@ +--- +title: Certificate Management +--- + +# Certificate Management + +## Development + +Out of the box HarperDB generates certificates that are used when HarperDB nodes are clustered together to securely share data between nodes. These certificates are meant for testing and development purposes. Because these certificates do not have Common Names (CNs) that will match the Fully Qualified Domain Name (FQDN) of the HarperDB node, the following settings (see the full [configuration file](../configuration) docs for more details) are defaulted & recommended for ease of development: + +``` +clustering: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem + insecure: true + verify: true +``` + +The certificates that HarperDB generates are stored in your `/keys/`. + +`insecure` is set to `true` to accept the certificate CN mismatch due to development certificates. + +`verify` is set to `true` to enable mutual TLS between the nodes. + +## Production + +In a production environment, we recommend using your own certificate authority (CA), or a public CA such as LetsEncrypt to generate certs for your HarperDB cluster. This will let you generate certificates with CNs that match the FQDN of your nodes. + +Once you generate new certificates, to make HarperDB start using them you can either replace the generated files with your own, or update the configuration to point to your new certificates, and then restart HarperDB. + +Since these new certificates can be issued with correct CNs, you should set `insecure` to `false` so that nodes will do full validation of the certificates of the other nodes. + +### Certificate Requirements + +* Certificates must have an `Extended Key Usage` that defines both `TLS Web Server Authentication` and `TLS Web Client Authentication` as these certificates will be used to accept connections from other HarperDB nodes and to make requests to other HarperDB nodes. Example: + +``` +X509v3 Key Usage: critical + Digital Signature, Key Encipherment +X509v3 Extended Key Usage: + TLS Web Server Authentication, TLS Web Client Authentication +``` + +* If you are using an intermediate CA to issue the certificates, the entire certificate chain (to the root CA) must be included in the `certificateAuthority` file. +* If your certificates expire you will need a way to issue new certificates to the nodes and then restart HarperDB. If you are using a public CA such as LetsEncrypt, a tool like `certbot` can be used to renew certificates. + +### Certificate Troubleshooting +If you are having TLS issues with clustering, use the following steps to verify that your certificates are valid. + +1. Make sure certificates can be parsed and that you can view the contents: +``` +openssl x509 -in .pem -noout -text` +``` +1. Make sure the certificate validates with the CA: +``` +openssl verify -CAfile .pem .pem` +``` +1. Make sure the certificate and private key are a valid pair by verifying that the output of the following commands match: +``` +openssl rsa -modulus -noout -in .pem | openssl md5 +openssl x509 -modulus -noout -in .pem | openssl md5 +``` diff --git a/site/versioned_docs/version-4.1/clustering/creating-a-cluster-user.md b/site/versioned_docs/version-4.1/clustering/creating-a-cluster-user.md new file mode 100644 index 00000000..3edecd29 --- /dev/null +++ b/site/versioned_docs/version-4.1/clustering/creating-a-cluster-user.md @@ -0,0 +1,59 @@ +--- +title: Creating a Cluster User +--- + +# Creating a Cluster User + +Inter-node authentication takes place via HarperDB users. There is a special role type called `cluster_user` that exists by default and limits the user to only clustering functionality. + +A `cluster_user` must be created and added to the `harperdb-config.yaml` file for clustering to be enabled. + +All nodes that are intended to be clustered together need to share the same `cluster_user` credentials (i.e. username and password). + +There are multiple ways a `cluster_user` can be created, they are: + +1. Through the operations API by calling `add_user` + +```json +{ + "operation": "add_user", + "role": "cluster_user", + "username": "cluster_account", + "password": "letsCluster123!", + "active": true +} +``` + +When using the API to create a cluster user the `harperdb-config.yaml` file must be updated with the username of the new cluster user. + +This can be done through the API by calling `set_configuration` or by editing the `harperdb-config.yaml` file. + +```json +{ + "operation": "set_configuration", + "clustering_user": "cluster_account" +} +``` + +In the `harperdb-config.yaml` file under the top-level `clustering` element there will be a user element. Set this to the name of the cluster user. + +```yaml +clustering: + user: cluster_account +``` + +_Note: When making any changes to the `harperdb-config.yaml` file, HarperDB must be restarted for the changes to take effect._ + +1. Upon installation using **command line variables**. This will automatically set the user in the `harperdb-config.yaml` file. + +_Note: Using command line or environment variables for setting the cluster user only works on install._ + +``` +harperdb install --CLUSTERING_USER cluster_account --CLUSTERING_PASSWORD letsCluster123! +``` + +1. Upon installation using **environment variables**. This will automatically set the user in the `harperdb-config.yaml` file. + +``` +CLUSTERING_USER=cluster_account CLUSTERING_PASSWORD=letsCluster123 +``` diff --git a/site/versioned_docs/version-4.1/clustering/enabling-clustering.md b/site/versioned_docs/version-4.1/clustering/enabling-clustering.md new file mode 100644 index 00000000..6b563b19 --- /dev/null +++ b/site/versioned_docs/version-4.1/clustering/enabling-clustering.md @@ -0,0 +1,49 @@ +--- +title: Enabling Clustering +--- + +# Enabling Clustering + +Clustering does not run by default; it needs to be enabled. + +To enable clustering the `clustering.enabled` configuration element in the `harperdb-config.yaml` file must be set to `true`. + +There are multiple ways to update this element, they are: + +1. Directly editing the `harperdb-config.yaml` file and setting enabled to `true` + +```yaml +clustering: + enabled: true +``` + +_Note: When making any changes to the `harperdb-config.yaml` file HarperDB must be restarted for the changes to take effect._ + +1. Calling `set_configuration` through the operations API + +```json +{ + "operation": "set_configuration", + "clustering_enabled": true +} +``` + +_Note: When making any changes to HarperDB configuration HarperDB must be restarted for the changes to take effect._ + +1. Using **command line variables**. + +``` +harperdb --CLUSTERING_ENABLED true +``` + +1. Using **environment variables**. + +``` +CLUSTERING_ENABLED=true +``` + +An efficient way to **install HarperDB**, **create the cluster user**, **set the node name** and **enable clustering** in one operation is to combine the steps using command line and/or environment variables. Here is an example using command line variables. + +``` +harperdb install --CLUSTERING_ENABLED true --CLUSTERING_NODENAME Node1 --CLUSTERING_USER cluster_account --CLUSTERING_PASSWORD letsCluster123! +``` diff --git a/site/versioned_docs/version-4.1/clustering/establishing-routes.md b/site/versioned_docs/version-4.1/clustering/establishing-routes.md new file mode 100644 index 00000000..e4ca2a6d --- /dev/null +++ b/site/versioned_docs/version-4.1/clustering/establishing-routes.md @@ -0,0 +1,73 @@ +--- +title: Establishing Routes +--- + +# Establishing Routes + +A route is a connection between two nodes. It is how the clustering network is established. + +Routes do not need to cross connect all nodes in the cluster. You can select a leader node or a few leaders and all nodes connect to them, you can chain, etc… As long as there is one route connecting a node to the cluster all other nodes should be able to reach that node. + +Using routes the clustering servers will create a mesh network between nodes. This mesh network ensures that if a node drops out all other nodes can still communicate with each other. That being said, we recommend designing your routing with failover in mind, this means not storing all your routes on one node but dispersing them throughout the network. + +A simple route example is a two node topology, if Node1 adds a route to connect it to Node2, Node2 does not need to add a route to Node1. That one route configuration is all that’s needed to establish a bidirectional connection between the nodes. + +A route consists of a `port` and a `host`. + +`port` - the clustering port of the remote instance you are creating the connection with. This is going to be the `clustering.hubServer.cluster.network.port` in the HarperDB configuration on the node you are connecting with. + +`host` - the host of the remote instance you are creating the connection with.This can be an IP address or a URL. + +Routes are set in the `harperdb-config.yaml` file using the `clustering.hubServer.cluster.network.routes` element, which expects an object array, where each object has two properties, `port` and `host`. + +```yaml +clustering: + hubServer: + cluster: + network: + routes: + - host: 3.62.184.22 + port: 9932 + - host: 3.735.184.8 + port: 9932 +``` + +![figure 1](/img/v4.1/clustering/figure1.png) + +This diagram shows one way of using routes to connect a network of nodes. Node2 and Node3 do not reference any routes in their config. Node1 contains routes for Node2 and Node3, which is enough to establish a network between all three nodes. + +There are multiple ways to set routes, they are: + +1. Directly editing the `harperdb-config.yaml` file (refer to code snippet above). +1. Calling `cluster_set_routes` through the API. + +```json +{ + "operation": "cluster_set_routes", + "server": "hub", + "routes":[ {"host": "3.735.184.8", "port": 9932} ] +} +``` + +_Note: When making any changes to HarperDB configuration HarperDB must be restarted for the changes to take effect._ + +1. From the command line. + +```bash +--CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES "[{\"host\": \"3.735.184.8\", \"port\": 9932}]" +``` + +1. Using environment variables. + +```bash +CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES=[{"host": "3.735.184.8", "port": 9932}] +``` + +The API also has `cluster_get_routes` for getting all routes in the config and `cluster_delete_routes` for deleting routes. + +```json +{ + "operation": "cluster_delete_routes", + "routes":[ {"host": "3.735.184.8", "port": 9932} ] +} +``` diff --git a/site/versioned_docs/version-4.1/clustering/index.md b/site/versioned_docs/version-4.1/clustering/index.md new file mode 100644 index 00000000..7bde63a2 --- /dev/null +++ b/site/versioned_docs/version-4.1/clustering/index.md @@ -0,0 +1,40 @@ +--- +title: Clustering +--- + +# Clustering + +HarperDB clustering is the process of connecting multiple HarperDB databases together to create a database mesh network that enables users to define data replication patterns. + +HarperDB’s clustering engine replicates data between instances of HarperDB using a highly performant, bi-directional pub/sub model on a per-table basis. Data replicates asynchronously with eventual consistency across the cluster following the defined pub/sub configuration. Individual transactions are sent in the order in which they were transacted, once received by the destination instance, they are processed in an ACID-compliant manor. Conflict resolution follows a last writer wins model based on recorded transaction time on the transaction and the timestamp on the record on the node. + +--- +### Common Use Case + +A common use case is an edge application collecting and analyzing sensor data that creates an alert if a sensor value exceeds a given threshold: + +* The edge application should not be making outbound http requests for security purposes. + +* There may not be a reliable network connection. + +* Not all sensor data will be sent to the cloud--either because of the unreliable network connection, or maybe it’s just a pain to store it. + +* The edge node should be inaccessible from outside the firewall. + +* The edge node will send alerts to the cloud with a snippet of sensor data containing the offending sensor readings. + + +HarperDB simplifies the architecture of such an application with its bi-directional, table-level replication: + +* The edge instance subscribes to a “thresholds” table on the cloud instance, so the application only makes localhost calls to get the thresholds. + +* The application continually pushes sensor data into a “sensor_data” table via the localhost API, comparing it to the threshold values as it does so. + +* When a threshold violation occurs, the application adds a record to the “alerts” table. + +* The application appends to that record array “sensor_data” entries for the 60 seconds (or minutes, or days) leading up to the threshold violation. + +* The edge instance publishes the “alerts” table up to the cloud instance. + + +By letting HarperDB focus on the fault-tolerant logistics of transporting your data, you get to write less code. By moving data only when and where it’s needed, you lower storage and bandwidth costs. And by restricting your app to only making local calls to HarperDB, you reduce the overall exposure of your application to outside forces. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/clustering/managing-subscriptions.md b/site/versioned_docs/version-4.1/clustering/managing-subscriptions.md new file mode 100644 index 00000000..a1f8c56e --- /dev/null +++ b/site/versioned_docs/version-4.1/clustering/managing-subscriptions.md @@ -0,0 +1,168 @@ +--- +title: Managing subscriptions +--- + +# Managing subscriptions + +Subscriptions can be added, updated, or removed through the API. + +_Note: The schema and tables in the subscription must exist on either the local or the remote node. Any schema and tables that do not exist on one particular node, for example, the local node, will be automatically created on the local node._ + +To add a single node and create one or more subscriptions use `add_node`. + +```json +{ + "operation": "add_node", + "node_name": "Node2", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": false, + "subscribe": true + }, + { + "schema": "dev", + "table": "chicken", + "publish": true, + "subscribe": true + } + ] +} +``` + +This is an example of adding Node2 to your local node. Subscriptions are created for two tables, dog and chicken. + +To update one or more subscriptions with a single node use `update_node`. + +```json +{ + "operation": "update_node", + "node_name": "Node2", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ] +} +``` + +This call will update the subscription with the dog table. Any other subscriptions with Node2 will not change. + +To add or update subscriptions with one or more nodes in one API call use `configure_cluster`. + +```json +{ + "operation": "configure_cluster", + "connections": [ + { + "node_name": "Node2", + "subscriptions": [ + { + "schema": "dev", + "table": "chicken", + "publish": false, + "subscribe": true + }, + { + "schema": "prod", + "table": "dog", + "publish": true, + "subscribe": true + } + ] + }, + { + "node_name": "Node3", + "subscriptions": [ + { + "schema": "dev", + "table": "chicken", + "publish": true, + "subscribe": false + } + ] + } + ] +} +``` + +_Note: `configure_cluster` will override **any and all** existing subscriptions defined on the local node. This means that before going through the connections in the request and adding the subscriptions, it will first go through **all existing subscriptions the local node has** and remove them. To get all existing subscriptions use `cluster_status`._ + +#### Start time + +There is an optional property called `start_time` that can be passed in the subscription. This property accepts an ISO formatted UTC date. + +`start_time` can be used to set from what time you would like to source transactions from a table when creating or updating a subscription. + +```json +{ + "operation": "add_node", + "node_name": "Node2", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": false, + "subscribe": true, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] +} +``` + +This example will get all transactions on Node2’s dog table starting from `2022-09-02T20:06:35.993Z` and replicate them locally on the dog table. + +If no start time is passed it defaults to the current time. + +_Note: start time utilizes clustering to back source transactions. For this reason it can only source transactions that occurred when clustering was enabled._ + +#### Remove node + +To remove a node and all its subscriptions use `remove_node`. + +```json +{ + "operation":"remove_node", + "node_name":"Node2" +} +``` + +#### Cluster status + +To get the status of all connected nodes and see their subscriptions use `cluster_status`. + +```json +{ + "node_name": "Node1", + "is_enabled": true, + "connections": [ + { + "node_name": "Node2", + "status": "open", + "ports": { + "clustering": 9932, + "operations_api": 9925 + }, + "latency_ms": 65, + "uptime": "11m 19s", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ], + "system_info": { + "hdb_version": "4.0.0", + "node_version": "16.17.1", + "platform": "linux" + } + } + ] +} +``` diff --git a/site/versioned_docs/version-4.1/clustering/naming-a-node.md b/site/versioned_docs/version-4.1/clustering/naming-a-node.md new file mode 100644 index 00000000..d1ebdfb1 --- /dev/null +++ b/site/versioned_docs/version-4.1/clustering/naming-a-node.md @@ -0,0 +1,45 @@ +--- +title: Naming a Node +--- + +# Naming a Node + +Node name is the name given to a node. It is how nodes are identified within the cluster and must be unique to the cluster. + +The name cannot contain any of the following characters: `.,*>` . Dot, comma, asterisk, greater than, or whitespace. + +The name is set in the `harperdb-config.yaml` file using the `clustering.nodeName` configuration element. + +_Note: If you want to change the node name make sure there are no subscriptions in place before doing so. After the name has been changed a full restart is required._ + +There are multiple ways to update this element, they are: + +1. Directly editing the `harperdb-config.yaml` file. + +```yaml +clustering: + nodeName: Node1 +``` + +_Note: When making any changes to the `harperdb-config.yaml` file HarperDB must be restarted for the changes to take effect._ + +1. Calling `set_configuration` through the operations API + +```json +{ + "operation": "set_configuration", + "clustering_nodeName":"Node1" +} +``` + +1. Using command line variables. + +``` +harperdb --CLUSTERING_NODENAME Node1 +``` + +1. Using environment variables. + +``` +CLUSTERING_NODENAME=Node1 +``` diff --git a/site/versioned_docs/version-4.1/clustering/requirements-and-definitions.md b/site/versioned_docs/version-4.1/clustering/requirements-and-definitions.md new file mode 100644 index 00000000..1e2dd6af --- /dev/null +++ b/site/versioned_docs/version-4.1/clustering/requirements-and-definitions.md @@ -0,0 +1,11 @@ +--- +title: Requirements and Definitions +--- + +# Requirements and Definitions + +To create a cluster you must have two or more nodes\* (aka instances) of HarperDB running. + +\*_A node is a single instance/installation of HarperDB. A node of HarperDB can operate independently with clustering on or off._ + +On the following pages we'll walk you through the steps required, in order, to set up a HarperDB cluster. diff --git a/site/versioned_docs/version-4.1/clustering/subscription-overview.md b/site/versioned_docs/version-4.1/clustering/subscription-overview.md new file mode 100644 index 00000000..76292f4a --- /dev/null +++ b/site/versioned_docs/version-4.1/clustering/subscription-overview.md @@ -0,0 +1,45 @@ +--- +title: Subscriptions +--- + +# Subscriptions + +A subscription defines how data should move between two nodes. They are exclusively table level and operate independently. They connect a table on one node to a table on another node, the subscription will apply to a matching schema name and table name on both nodes. + +_Note: ‘local’ and ‘remote’ will often be referred to. In the context of these docs ‘local’ is the node that is receiving the API request to create/update a subscription and remote is the other node that is referred to in the request, the node on the other end of the subscription._ + +A subscription consists of: + +`schema` - the name of the schema that the table you are creating the subscription for belongs to. + +`table` - the name of the table the subscription will apply to. + +`publish` - a boolean which determines if transactions on the local table should be replicated on the remote table. + +`subscribe` - a boolean which determines if transactions on the remote table should be replicated on the local table. + +#### Publish subscription + +![figure 2](/img/v4.1/clustering/figure2.png) + +This diagram is an example of a `publish` subscription from the perspective of Node1. + +The record with id 2 has been inserted in the dog table on Node1, after it has completed that insert it is sent to Node 2 and inserted in the dog table there. + +#### Subscribe subscription + +![figure 3](/img/v4.1/clustering/figure3.png) + +This diagram is an example of a `subscribe` subscription from the perspective of Node1. + +The record with id 3 has been inserted in the dog table on Node2, after it has completed that insert it is sent to Node1 and inserted there. + +#### Subscribe and Publish + +![figure 4](/img/v4.1/clustering/figure4.png) + +This diagram shows both subscribe and publish but publish is set to false. You can see that because subscribe is true the insert on Node2 is being replicated on Node1 but because publish is set to false the insert on Node1 is _**not**_ being replicated on Node2. + +![figure 5](/img/v4.1/clustering/figure5.png) + +This shows both subscribe and publish set to true. The insert on Node1 is replicated on Node2 and the update on Node2 is replicated on Node1. diff --git a/site/versioned_docs/version-4.1/clustering/things-worth-knowing.md b/site/versioned_docs/version-4.1/clustering/things-worth-knowing.md new file mode 100644 index 00000000..cb01b8b8 --- /dev/null +++ b/site/versioned_docs/version-4.1/clustering/things-worth-knowing.md @@ -0,0 +1,43 @@ +--- +title: Things worth Knowing +--- + +# Things worth Knowing + +Additional information that will help you define your clustering topology. + +*** + +### Transactions + +Transactions that are replicated across the cluster are: + +* Insert +* Update +* Upsert +* Delete +* Bulk loads + * CSV data load + * CSV file load + * CSV URL load + * Import from S3 + +When adding or updating a node any schemas and tables in the subscription that don’t exist on the remote node will be automatically created. + +**Destructive schema operations do not replicate across a cluster**. Those operations include `drop_schema`, `drop_table`, and `drop_attribute`. If the desired outcome is to drop schema information from any nodes then the operation(s) will need to be run on each node independently. + +Users and roles are not replicated across the cluster. + +*** + +### Queueing + +HarperDB has built-in resiliency for when network connectivity is lost within a subscription. When connections are reestablished, a catchup routine is executed to ensure data that was missed, specific to the subscription, is sent/received as defined. + +*** + +### Topologies + +HarperDB clustering creates a mesh network between nodes giving end users the ability to create an infinite number of topologies. subscription topologies can be simple or as complex as needed. + +![figure 6](/img/v4.1/clustering/figure6.png) diff --git a/site/versioned_docs/version-4.1/configuration.md b/site/versioned_docs/version-4.1/configuration.md new file mode 100644 index 00000000..2079c9fe --- /dev/null +++ b/site/versioned_docs/version-4.1/configuration.md @@ -0,0 +1,785 @@ +--- +title: Configuration File +--- + +# Configuration File + +HarperDB is configured through a [YAML](https:/yaml.org/) file called `harperdb-config.yaml` located in the operations API root directory (by default this is a directory named `hdb` located in the home directory of the current user). + +All available configuration will be populated by default in the config file on install, regardless of whether it is used. + +--- + +## Using the Configuration File and Naming Conventions + +The configuration elements in `harperdb-config.yaml` use camelcase: `operationsApi`. + +To change a configuration value edit the `harperdb-config.yaml` file and save any changes. HarperDB must be restarted for changes to take effect. + +Alternately, configuration can be changed via environment and/or command line variables or via the API. To access lower level elements, use underscores to append parent/child elements (when used this way elements are case insensitive): + + - Environment variables: `OPERATIONSAPI_NETWORK_PORT=9925` + - Command line variables: `--OPERATIONSAPI_NETWORK_PORT 9925` + - Calling `set_configuration` through the API: `operationsApi_network_port: 9925` + +--- + +## Configuration Options + +### `clustering` + +The `clustering` section configures the clustering engine, this is used to replicate data between instances of HarperDB. + +Clustering offers a lot of different configurations, however in a majority of cases the only options you will need to pay attention to are: + +- `clustering.enabled` Enable the clustering processes. +- `clustering.hubServer.cluster.network.port` The port other nodes will connect to. This port must be accessible from other cluster nodes. +- `clustering.hubServer.cluster.network.routes`The connections to other instances. +- `clustering.nodeName` The name of your node, must be unique within the cluster. +- `clustering.user` The name of the user credentials used for Inter-node authentication. + + +`enabled` - _Type_: boolean; _Default_: false + +Enable clustering. + +_Note: If you enabled clustering but do not create and add a cluster user you will get a validation error. See `user` description below on how to add a cluster user._ + +```yaml +clustering: + enabled: true +``` + +`clustering.hubServer.cluster` + +Clustering’s `hubServer` facilitates the HarperDB mesh network and discovery service. + +```yaml +clustering: + hubServer: + cluster: + name: harperdb + network: + port: 9932 + routes: + - host: 3.62.184.22 + port: 9932 + - host: 3.735.184.8 + port: 9932 +``` + +`name` - _Type_: string, _Default_: harperdb + +The name of your cluster. This name needs to be consistent for all other nodes intended to be meshed in the same network. + +
+ +`port` - _Type_: integer, _Default_: 9932 + +The port the hub server uses to accept cluster connections + +`routes` - _Type_: array, _Default_: null + +An object array that represent the host and port this server will cluster to. Each object must have two properties `port` and `host`. Multiple entries can be added to create network resiliency in the event one server is unavailable. Routes can be added, updated and removed either by directly editing the `harperdb-config.yaml` file or by using the `cluster_set_routes` or `cluster_delete_routes` API endpoints. +
+ +
+ +`host` - _Type_: string + +The host of the remote instance you are creating the connection with. + +`port` - _Type_: integer + +The port of the remote instance you are creating the connection with. This is likely going to be the `clustering.hubServer.cluster.network.port` on the remote instance. + +
+ +`clustering.hubServer.leafNodes` + +```yaml +clustering: + hubServer: + leafNodes: + network: + port: 9931 +``` + +`port` - _Type_: integer; _Default_: 9931 + +The port the hub server uses to accept leaf server connections. + +`clustering.hubServer.network` + +```yaml +clustering: + hubServer: + network: + port: 9930 +``` + +`port` - _Type_: integer; _Default_: 9930 + +Use this port to connect a client to the hub server, for example using the NATs SDK to interact with the server. + +`clustering.leafServer` + +Manages streams, streams are ‘message stores’ that store table transactions. + +```yaml +clustering: + leafServer: + network: + port: 9940 + routes: + - host: 3.62.184.22 + port: 9931 + - host: node3.example.com + port: 9931 + streams: + maxAge: 3600 + maxBytes: 10000000 + maxMsgs: 500 + path: /user/hdb/clustering/leaf +``` + +`port` - _Type_: integer; _Default_: 9940 + +Use this port to connect a client to the leaf server, for example using the NATs SDK to interact with the server. + +`routes` - _Type_: array; _Default_: null + +An object array that represent the host and port the leaf node will directly connect with. Each object must have two properties `port` and `host`. Unlike the hub server, the leaf server will establish connections to all listed hosts. Routes can be added, updated and removed either by directly editing the `harperdb-config.yaml` file or by using the `cluster_set_routes` or `cluster_delete_routes` API endpoints. + +
+ +`host` - _Type_: string + +The host of the remote instance you are creating the connection with. + +`port` - _Type_: integer + +The port of the remote instance you are creating the connection with. This is likely going to be the `clustering.hubServer.cluster.network.port` on the remote instance. +
+ +
+ +`clustering.leafServer.streams` + +`maxAge` - _Type_: integer; _Default_: null + +The maximum age of any messages in the stream, expressed in seconds. + +`maxBytes` - _Type_: integer; _Default_: null + +The maximum size of the stream in bytes. Oldest messages are removed if the stream exceeds this size. + +`maxMsgs` - _Type_: integer; _Default_: null + +How many messages may be in a stream. Oldest messages are removed if the stream exceeds this number. + +`path` - _Type_: string; _Default_: <ROOTPATH>/clustering/leaf + +The directory where all the streams are kept. + +--- +`logLevel` - _Type_: string; _Default_: error + +Control the verbosity of clustering logs. + +```yaml +clustering: + logLevel: error +``` + +There exists a log level hierarchy in order as `trace`, `debug`, `info`, `warn`, and `error`. When the level is set to `trace` logs will be created for all possible levels. Whereas if the level is set to `warn`, the only entries logged will be `warn` and `error`. The default value is `error`. + + +`nodeName` - _Type_: string; _Default_: null + +The name of this node in your HarperDB cluster topology. This must be a value unique from the rest of the cluster node names. + +_Note: If you want to change the node name make sure there are no subscriptions in place before doing so. After the name has been changed a full restart is required._ + +```yaml +clustering: + nodeName: great_node +``` + +`tls` + +Transport Layer Security default values are automatically generated on install. + +```yaml +clustering: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem + insecure: true + verify: true +``` + +`certificate` - _Type_: string; _Default_: <ROOTPATH>/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: <ROOTPATH>/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: <ROOTPATH>/keys/privateKey.pem + +Path to the private key file. + +`insecure` - _Type_: boolean; _Default_: true + +When true, will skip certificate verification. For use only with self-signed certs. + +`republishMessages` - _Type_: boolean; _Default_: true + +When true, all transactions that are received from other nodes are republished to this node's stream. When subscriptions are not fully connected between all nodes, this ensures that messages are routed to all nodes through intermediate nodes. This also ensures that all writes, whether local or remote, are written to the NATS transaction log. However, there is additional overhead with republishing, and setting this is to false can provide better data replication performance. When false, you need to ensure all subscriptions are fully connected between every node to every other node, and be aware that the NATS transaction log will only consist of local writes. + +`verify` - _Type_: boolean; _Default_: true + +When true, hub server will verify client certificate using the CA certificate. + +--- + +`user` - _Type_: string; _Default_: null + +The username given to the `cluster_user`. All instances in a cluster must use the same clustering user credentials (matching username and password). + +Inter-node authentication takes place via a special HarperDB user role type called `cluster_user`. + +The user can be created either through the API using an `add_user` request with the role set to `cluster_user`, or on install using environment variables `CLUSTERING_USER=cluster_person` `CLUSTERING_PASSWORD=pass123!` or CLI variables `harperdb --CLUSTERING_USER cluster_person` `--CLUSTERING_PASSWORD` `pass123!` + +```yaml +clustering: + user: cluster_person +``` + +--- + + +### `customFunctions` + +The `customFunctions` section configures HarperDB Custom Functions. + +`enabled` - _Type_: boolean; _Default_: true + +Enable the Custom Function server or not. + +```yaml +customFunctions: + enabled: true +``` + +`customFunctions.network` + +```yaml +customFunctions: + network: + cors: true + corsAccessList: + - null + headersTimeout: 60000 + https: false + keepAliveTimeout: 5000 + port: 9926 + timeout: 120000 +``` + +
+ +`cors` - _Type_: boolean; _Default_: true + +Enable Cross Origin Resource Sharing, which allows requests across a domain. + +`corsAccessList` - _Type_: array; _Default_: null + +An array of allowable domains with CORS + +`headersTimeout` - _Type_: integer; _Default_: 60,000 milliseconds (1 minute) + +Limit the amount of time the parser will wait to receive the complete HTTP headers with. + +`https` - _Type_: boolean; _Default_: false + +Enables HTTPS on the Custom Functions API. This requires a valid certificate and key. If `false`, Custom Functions will run using standard HTTP. + +`keepAliveTimeout` - _Type_: integer; _Default_: 5,000 milliseconds (5 seconds) + +Sets the number of milliseconds of inactivity the server needs to wait for additional incoming data after it has finished processing the last response. + +`port` - _Type_: integer; _Default_: 9926 + +The port used to access the Custom Functions server. + +`timeout` - _Type_: integer; _Default_: Defaults to 120,000 milliseconds (2 minutes) + +The length of time in milliseconds after which a request will timeout. +
+ +`nodeEnv` - _Type_: string; _Default_: production + +Allows you to specify the node environment in which application will run. + +```yaml +customFunctions: + nodeEnv: production +``` + +- `production` native node logging is kept to a minimum; more caching to optimize performance. This is the default value. +- `development` more native node logging; less caching. + +`root` - _Type_: string; _Default_: <ROOTPATH>/custom_functions + +The path to the folder containing Custom Function files. + +```yaml +customFunctions: + root: ~/hdb/custom_functions +``` + +`tls` +Transport Layer Security + +```yaml +customFunctions: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`certificate` - _Type_: string; _Default_: <ROOTPATH>/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: <ROOTPATH>/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: <ROOTPATH>/keys/privateKey.pem + +Path to the private key file. + + +--- + + +### `ipc` + +The `ipc` section configures the HarperDB Inter-Process Communication interface. + +```yaml +ipc: + network: + port: 9383 +``` + +`port` - _Type_: integer; _Default_: 9383 + +The port the IPC server runs on. The default is `9383`. + + +--- + + +### `localStudio` + +The `localStudio` section configures the local HarperDB Studio, a simplified GUI for HarperDB hosted on the server. A more comprehensive GUI is hosted by HarperDB at https:/studio.harperdb.io. Note, all database traffic from either `localStudio` or HarperDB Studio is made directly from your browser to the instance. + +`enabled` - _Type_: boolean; _Default_: false + +Enabled the local studio or not. + +```yaml +localStudio: + enabled: false +``` + +--- + + +### `logging` + +The `logging` section configures HarperDB logging across all HarperDB functionality. HarperDB leverages pm2 for logging. Each process group gets their own log file which is located in `logging.root`. + +`auditLog` - _Type_: boolean; _Default_: false + +Enabled table transaction logging. + +```yaml +logging: + auditLog: false +``` + +To access the audit logs, use the API operation `read_audit_log`. It will provide a history of the data, including original records and changes made, in a specified table. +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog" +} +```` +`file` - _Type_: boolean; _Default_: true + +Defines whether or not to log to a file. + +```yaml +logging: + file: true +``` + +`level` - _Type_: string; _Default_: error + +Control the verbosity of logs. + +```yaml +logging: + level: error +``` +There exists a log level hierarchy in order as `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. When the level is set to `trace` logs will be created for all possible levels. Whereas if the level is set to `fatal`, the only entries logged will be `fatal` and `notify`. The default value is `error`. + +`root` - _Type_: string; _Default_: <ROOTPATH>/log + +The path where the log files will be written. + +```yaml +logging: + root: ~/hdb/log +``` + +`rotation` + +Rotation provides the ability for a user to systematically rotate and archive the `hdb.log` file. To enable `interval` and/or `maxSize` must be set. + +**_Note:_** `interval` and `maxSize` are approximates only. It is possible that the log file will exceed these values slightly before it is rotated. + +```yaml +logging: + rotation: + enabled: true + compress: false + interval: 1D + maxSize: 100K + path: /user/hdb/log +``` +
+ +`enabled` - _Type_: boolean; _Default_: false + +Enables logging rotation. + +`compress` - _Type_: boolean; _Default_: false + +Enables compression via gzip when logs are rotated. + +`interval` - _Type_: string; _Default_: null + +The time that should elapse between rotations. Acceptable units are D(ays), H(ours) or M(inutes). + +`maxSize` - _Type_: string; _Default_: null + +The maximum size the log file can reach before it is rotated. Must use units M(egabyte), G(igabyte), or K(ilobyte). + +`path` - _Type_: string; _Default_: <ROOTPATH>/log + +Where to store the rotated log file. File naming convention is `HDB-YYYY-MM-DDT-HH-MM-SSSZ.log`. + +
+ + +`stdStreams` - _Type_: boolean; _Default_: false + +Log HarperDB logs to the standard output and error streams. The `operationsApi.foreground` flag must be enabled in order to receive the stream. + +```yaml +logging: + stdStreams: false +``` + +--- + + +### `operationsApi` + +The `operationsApi` section configures the HarperDB Operations API. + +`authentication` + +```yaml +operationsApi: + authentication: + operationTokenTimeout: 1d + refreshTokenTimeout: 30d +``` + +
+ +`operationTokenTimeout` - _Type_: string; _Default_: 1d + +Defines the length of time an operation token will be valid until it expires. Example values: https:/github.com/vercel/ms. + +`refreshTokenTimeout` - _Type_: string; _Default_: 1d + +Defines the length of time a refresh token will be valid until it expires. Example values: https:/github.com/vercel/ms. +
+ +`foreground` - _Type_: boolean; _Default_: false + +Determines whether or not HarperDB runs in the foreground. + +```yaml +operationsApi: + foreground: false +``` + +`network` + +```yaml +operationsApi: + network: + cors: true + corsAccessList: + - null + headersTimeout: 60000 + https: false + keepAliveTimeout: 5000 + port: 9925 + timeout: 120000 +``` +
+ +`cors` - _Type_: boolean; _Default_: true + +Enable Cross Origin Resource Sharing, which allows requests across a domain. + +`corsAccessList` - _Type_: array; _Default_: null + +An array of allowable domains with CORS + +`headersTimeout` - _Type_: integer; _Default_: 60,000 milliseconds (1 minute) + +Limit the amount of time the parser will wait to receive the complete HTTP headers with. + +`https` - _Type_: boolean; _Default_: false + +Enable HTTPS on the HarperDB operations endpoint. This requires a valid certificate and key. If `false`, HarperDB will run using standard HTTP. + +`keepAliveTimeout` - _Type_: integer; _Default_: 5,000 milliseconds (5 seconds) + +Sets the number of milliseconds of inactivity the server needs to wait for additional incoming data after it has finished processing the last response. + +`port` - _Type_: integer; _Default_: 9925 + +The port the HarperDB operations API interface will listen on. + +`timeout` - _Type_: integer; _Default_: Defaults to 120,000 milliseconds (2 minutes) + +The length of time in milliseconds after which a request will timeout. + +
+ +`nodeEnv` - _Type_: string; _Default_: production + +Allows you to specify the node environment in which application will run. + +```yaml +operationsApi: + nodeEnv: production +``` + +- `production` native node logging is kept to a minimum; more caching to optimize performance. This is the default value. +- `development` more native node logging; less caching. + +`tls` + +This configures the Transport Layer Security for HTTPS support. + +```yaml +operationsApi: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`certificate` - _Type_: string; _Default_: <ROOTPATH>/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: <ROOTPATH>/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: <ROOTPATH>/keys/privateKey.pem + +Path to the private key file. + +--- + +### `http` + +`threads` - _Type_: number; _Default_: One less than the number of logical cores/ processors + +The `threads` option specifies the number of threads that will be used to service the HTTP requests for the operations API and custom functions. Generally, this should be close to the number of CPU logical cores/processors to ensure the CPU is fully utilized (a little less because HarperDB does have other threads at work), assuming HarperDB is the main service on a server. + +```yaml +http: + threads: 11 +``` + +`sessionAffinity` - _Type_: string; _Default_: null + +HarperDB is a multi-threaded server designed to scale to utilize many CPU cores with high concurrency. Session affinity can help improve the efficiency and fairness of thread utilization by routing multiple requests from the same client to the same thread. This provides a fairer method of request handling by keeping a single user contained to a single thread, can improve caching locality (multiple requests from a single user are more likely to access the same data), and can provide the ability to share information in-memory in user sessions. Enabling session affinity will cause subsequent requests from the same client to be routed to the same thread. + +To enable `sessionAffinity`, you need to specify how clients will be identified from the incoming requests. If you are using HarperDB to directly serve HTTP requests from users from different remote addresses, you can use a setting of `ip`. However, if you are using HarperDB behind a proxy server or application server, all the remote ip addresses will be the same and HarperDB will effectively only run on a single thread. Alternately, you can specify a header to use for identification. If you are using basic authentication, you could use the "Authorization" header to route requests to threads by the user's credentials. If you have another header that uniquely identifies users/clients, you can use that as the value of sessionAffinity. But be careful to ensure that the value does provide sufficient uniqueness and that requests are effectively distributed to all the threads and fully utilizing all your CPU cores. +```yaml +http: + sessionAffinity: ip +``` + +--- + +### `rootPath` + +`rootPath` - _Type_: string; _Default_: home directory of the current user + +The HarperDB database and applications/API/interface are decoupled from each other. The `rootPath` directory specifies where the HarperDB application persists data, config, logs, and Custom Functions. + +```yaml +rootPath: /Users/jonsnow/hdb +``` + +--- + +### `storage` + +`writeAsync` - _Type_: boolean; _Default_: false + +The `writeAsync` option turns off disk flushing/syncing, allowing for faster write operation throughput. However, this does not provide storage integrity guarantees, and if a server crashes, it is possible that there may be data loss requiring restore from another backup/another node. + +```yaml +storage: + writeAsync: false +``` + +`caching` - _Type_: boolean; _Default_: true + +The `caching` option enables in-memory caching of records, providing faster access to frequently accessed objects. This can incur some extra overhead for situations where reads are extremely random and don't benefit from caching. + +```yaml +storage: + caching: true +``` + + +`compression` - _Type_: boolean; _Default_: false + +The `compression` option enables compression of records in the database. This can be helpful for very large databases in reducing storage requirements and potentially allowing more data to be cached. This uses the very fast LZ4 compression algorithm, but this still incurs extra costs for compressing and decompressing. + +```yaml +storage: + compression: false +``` + + +`noReadAhead` - _Type_: boolean; _Default_: true + +The `noReadAhead` option advises the operating system to not read ahead when reading from the database. This provides better memory utilization, except in situations where large records are used or frequent range queries are used. + +```yaml +storage: + noReadAhead: true +``` + + +`prefetchWrites` - _Type_: boolean; _Default_: true + +The `prefetchWrites` option loads data prior to write transactions. This should be enabled for databases that are larger than memory (although it can be faster to disable this for smaller databases). + +```yaml +storage: + prefetchWrites: true +``` + + +`path` - _Type_: string; _Default_: `/schema` + +The `path` configuration sets where all database files should reside. + +```yaml +storage: + path: /users/harperdb/storage +``` + +**_Note:_** This configuration applies to all database files, which includes system tables that are used internally by HarperDB. For this reason if you wish to use a non default `path` value you must move any existing schemas into your `path` location. Existing schemas is likely to include the system schema which can be found at `/schema/system`. + +--- + +### `schemas` + +The `schemas` section is an optional configuration that can be used to define where database files should reside down to the table level. +

This configuration should be set before the schema and table have been created. +

The configuration will not create the directories in the path, that must be done by the user. +
+ +To define where a schema and all its tables should reside use the name of your schema and the `path` parameter. + +```yaml +schemas: + nameOfSchema: + path: /path/to/schema +``` + +To define where specific tables within a schema should reside use the name of your schema, the `tables` parameter, the name of your table and the `path` parameter. + +```yaml +schemas: + nameOfSchema: + tables: + nameOfTable: + path: /path/to/table +``` + +This same pattern can be used to define where the audit log database files should reside. To do this use the `auditPath` parameter. + +```yaml +schemas: + nameOfSchema: + auditPath: /path/to/schema +``` +
+ +**Setting the schemas section through the command line, environment variables or API** + +When using command line variables,environment variables or the API to configure the schemas section a slightly different convention from the regular one should be used. To add one or more configurations use a JSON object array. + +Using command line variables: +```bash +--SCHEMAS [{\"nameOfSchema\":{\"tables\":{\"nameOfTable\":{\"path\":\"\/path\/to\/table\"}}}}] +``` + +Using environment variables: +```bash +SCHEMAS=[{"nameOfSchema":{"tables":{"nameOfTable":{"path":"/path/to/table"}}}}] +``` + +Using the API: +```json +{ + "operation": "set_configuration", + "schemas": [{ + "nameOfSchema": { + "tables": { + "nameOfTable": { + "path": "/path/to/table" + } + } + } + }] +} +``` diff --git a/site/versioned_docs/version-4.1/custom-functions/create-project.md b/site/versioned_docs/version-4.1/custom-functions/create-project.md new file mode 100644 index 00000000..9e856975 --- /dev/null +++ b/site/versioned_docs/version-4.1/custom-functions/create-project.md @@ -0,0 +1,40 @@ +--- +title: Create a Project +--- + +# Create a Project + +To create a project using our web-based GUI, HarperDB Studio, checkout out how to manage Custom Functions [here](../harperdb-studio/manage-functions). + +Otherwise, to create a project, you have the following options: + +1. **Use the add\_custom\_function\_project operation** + + This operation creates a new project folder, and populates it with templates for the routes, helpers, and static subfolders. + +```json +{ + "operation": "add_custom_function_project", + "project": "dogs" +} +``` + +1. **Clone our public gitHub project template** + + _This requires a local installation. Remove the .git directory for a clean slate of git history._ + +```bash +> git clone https:/github.com/HarperDB/harperdb-custom-functions-template.git ~/hdb/custom_functions/dogs +``` + +1. **Create a project folder in your Custom Functions root directory** and **initialize** + + _This requires a local installation._ + +```bash +> mkdir ~/hdb/custom_functions/dogs +``` + +```bash +> npm init +``` diff --git a/site/versioned_docs/version-4.1/custom-functions/custom-functions-operations.md b/site/versioned_docs/version-4.1/custom-functions/custom-functions-operations.md new file mode 100644 index 00000000..490a730f --- /dev/null +++ b/site/versioned_docs/version-4.1/custom-functions/custom-functions-operations.md @@ -0,0 +1,47 @@ +--- +title: Custom Functions Operations +--- + +# Custom Functions Operations + +One way to manage Custom Functions is through [HarperDB Studio](../harperdb-studio/). It performs all the necessary operations automatically. To get started, navigate to your instance in HarperDB Studio and click the subnav link for “functions”. If you have not yet enabled Custom Functions, it will walk you through the process. Once configuration is complete, you can manage and deploy Custom Functions in minutes. + +HarperDB Studio manages your Custom Functions using nine HarperDB operations. You may view these operations within our [API Docs](https:/api.harperdb.io/). A brief overview of each of the operations is below: + + + +* **custom_functions_status** + + Returns the state of the Custom Functions server. This includes whether it is enabled, upon which port it is listening, and where its root project directory is located on the host machine. + +* **get_custom_functions** + + Returns an array of projects within the Custom Functions root project directory. Each project has details including each of the files in the **routes** and **helpers** directories, and the total file count in the **static** folder. + +* **get_custom_function** + + Returns the content of the specified file as text. HarperDB Studio uses this call to render the file content in its built-in code editor. + +* **set_custom_function** + + Updates the content of the specified file. HarperDB Studio uses this call to save any changes made through its built-in code editor. + +* **drop_custom_function** + + Deletes the specified file. + +* **add_custom_function_project** + + Creates a new project folder in the Custom Functions root project directory. It also inserts into the new directory the contents of our Custom Functions Project template, which is available publicly, here: https:/github.com/HarperDB/harperdb-custom-functions-template. + +* **drop_custom_function_project** + + Deletes the specified project folder and all of its contents. + +* **package_custom_function_project** + + Creates a .tar file of the specified project folder, then reads it into a base64-encoded string and returns that string the user. + +* **deploy_custom_function_project** + + Takes the output of package_custom_function_project, decrypts the base64-encoded string, reconstitutes the .tar file of your project folder, and extracts it to the Custom Functions root project directory. diff --git a/site/versioned_docs/version-4.1/custom-functions/debugging-custom-function.md b/site/versioned_docs/version-4.1/custom-functions/debugging-custom-function.md new file mode 100644 index 00000000..91d34bd6 --- /dev/null +++ b/site/versioned_docs/version-4.1/custom-functions/debugging-custom-function.md @@ -0,0 +1,102 @@ +--- +title: Debugging a Custom Function +--- + +# Debugging a Custom Function + +HarperDB Custom Functions projects are managed by HarperDB’s process manager. As such, it may seem more difficult to debug Custom Functions than your standard project. The goal of this document is to provide best practices and recommendations for debugging your Custom Function. + + + +For local debugging and development, it is recommended that you use standard console log statements for logging. For production use, you may want to use HarperDB's logging facilities, so you aren't logging to the console. The [HarperDB Custom Functions template](https:/github.com/HarperDB/harperdb-custom-functions-template) includes the HarperDB logger module in the primary function parameters with the name `logger`. This logger can be used to output messages directly to the HarperDB log using standardized logging level functions, described below. The log level can be set in the [HarperDB Configuration File](../configuration). + +HarperDB Logger Functions +* `trace(message)`: Write a 'trace' level log, if the configured level allows for it. +* `debug(message)`: Write a 'debug' level log, if the configured level allows for it. +* `info(message)`: Write a 'info' level log, if the configured level allows for it. +* `warn(message)`: Write a 'warn' level log, if the configured level allows for it. +* `error(message)`: Write a 'error' level log, if the configured level allows for it. +* `fatal(message)`: Write a 'fatal' level log, if the configured level allows for it. +* `notify(message)`: Write a 'notify' level log. + + +For debugging purposes, it is recommended to use `notify` as these messages will appear in the log regardless of log level configured. + +## Viewing the Log + +The HarperDB Log can be found on the [Studio Status page](../harperdb-studio/instance-metrics) or in the local Custom Functions log file, `/log/custom_functions.log`. Additionally, you can use the [`read_log` operation](https:/api.harperdb.io/#7f718dd1-afa5-49ce-bc0c-564e17b1c9cf) to query the HarperDB log. + +### Example 1: Execute Query and Log Results + +This example performs a SQL query in HarperDB and logs the result. This example utilizes the `logger.notify` function to log the stringified version of the result. If an error occurs, it will output the error using `logger.error` and return the error. + + + +```javascript +server.route({ + url: '/', + method: 'GET', + handler: async (request) => { + request.body = { + operation: 'sql', + sql: 'SELECT * FROM dev.dog ORDER BY dog_name' + }; + + try { + let result = await hdbCore.requestWithoutAuthentication(request); + logger.notify(`Query Result: ${JSON.stringify(result)}`); + return result; + } catch (e) { + logger.error(`Query Error: ${e}`); + return e; + } + } +}); +``` + +### Example 2: Execute Multiple Queries and Log Activity + +This example performs two SQL queries in HarperDB with logging throughout to describe what is happening. This example utilizes the `logger.notify` function to log the stringified version of the operation and the result of each query. If an error occurs, it will output the error using `logger.error` and return the error. + + +```javascript +server.route({ + url: '/example', + method: 'GET', + handler: async (request) => { + logger.notify('/example called!'); + const results = []; + + request.body = { + operation: 'sql', + sql: 'SELECT * FROM dev.dog WHERE id = 1' + }; + logger.notify(`Query 1 Operation: ${JSON.stringify(request.body)}`); + try { + let result = await hdbCore.requestWithoutAuthentication(request); + logger.notify(`Query 1: ${JSON.stringify(result)}`); + results.push(result); + } catch (e) { + logger.error(`Query 1: ${e}`); + return e; + } + + request.body = { + operation: 'sql', + sql: 'SELECT * FROM dev.dog WHERE id = 2' + }; + logger.notify(`Query 2 Operation: ${JSON.stringify(request.body)}`); + try { + let result = await hdbCore.requestWithoutAuthentication(request); + logger.notify(`Query 2: ${JSON.stringify(result)}`); + results.push(result); + } catch (e) { + logger.error(`Query 2: ${e}`); + return e; + } + + logger.notify('/example complete!'); + return results; + } +}); +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/custom-functions/define-helpers.md b/site/versioned_docs/version-4.1/custom-functions/define-helpers.md new file mode 100644 index 00000000..eccd9b6a --- /dev/null +++ b/site/versioned_docs/version-4.1/custom-functions/define-helpers.md @@ -0,0 +1,36 @@ +--- +title: Define Helpers +--- + +# Define Helpers + +Helpers are functions for use within your routes. You may want to use the same helper in multiple route files, so this allows you to write it once, and include it wherever you need it. + + + +* To use your helpers, they must be exported from your helper file. Please use any standard export mechanisms available for your module system. We like ESM, ECMAScript Modules. Our example below exports using `module.exports`. + +* You must import the helper module into the file that needs access to the exported functions. With ESM, you'd use a `require` statement. See [this example](./define-routes#custom-prevalidation-hooks) in Define Routes. + + +Below is code from the customValidation helper that is referenced in [Define Routes](./define-routes). It takes the request and the logger method from the route declaration, and makes a call to an external API to validate the headers using fetch. The API in this example is just returning a list of ToDos, but it could easily be replaced with a call to a real authentication service. + + +```javascript +const customValidation = async (request,logger) => { + let response = await fetch('https:/jsonplaceholder.typicode.com/todos/1', { headers: { authorization: request.headers.authorization } }); + let result = await response.json(); + + /* + * throw an authentication error based on the response body or statusCode + */ + if (result.error) { + const errorString = result.error || 'Sorry, there was an error authenticating your request'; + logger.error(errorString); + throw new Error(errorString); + } + return request; +}; + +module.exports = customValidation; +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/custom-functions/define-routes.md b/site/versioned_docs/version-4.1/custom-functions/define-routes.md new file mode 100644 index 00000000..84cef1da --- /dev/null +++ b/site/versioned_docs/version-4.1/custom-functions/define-routes.md @@ -0,0 +1,131 @@ +--- +title: Define Routes +--- + +# Define Routes + +HarperDB’s Custom Functions is built on top of [Fastify](https:/www.fastify.io/), so our route definitions follow their specifications. Below is a very simple example of a route declaration. + + + +Route URLs are resolved in the following manner: + +* [**Instance URL**]:[**Custom Functions Port**]/[**Project Name**]/[**Route URL**] + +* The route below, within the **dogs** project, with a route of **breeds** would be available at **http:/localhost:9926/dogs/breeds**. + + +In effect, this route is just a pass-through to HarperDB. The same result could have been achieved by hitting the core HarperDB API, since it uses **hdbCore.preValidation** and **hdbCore.request**, which are defined in the “helper methods” section, below. + + + +```javascript +module.exports = async (server, { hdbCore, logger }) => { + server.route({ + url: '/', + method: 'POST', + preValidation: hdbCore.preValidation, + handler: hdbCore.request, + }) +} +``` + + +## Custom Handlers + +For endpoints where you want to execute multiple operations against HarperDB, or perform additional processing (like an ML classification, or an aggregation, or a call to a 3rd party API), you can define your own logic in the handler. The function below will execute a query against the dogs table, and filter the results to only return those dogs over 4 years in age. + + + +**IMPORTANT: This route has NO preValidation and uses hdbCore.requestWithoutAuthentication, which- as the name implies- bypasses all user authentication. See the security concerns and mitigations in the “helper methods” section, below.** + + + +```javascript +module.exports = async (server, { hdbCore, logger }) => { + server.route({ + url: '/:id', + method: 'GET', + handler: (request) => { + request.body= { + operation: 'sql', + sql: `SELECT * FROM dev.dog WHERE id = ${request.params.id}` + }; + + const result = await hdbCore.requestWithoutAuthentication(request); + return result.filter((dog) => dog.age > 4); + } + }); +} +``` + +## Custom preValidation Hooks +The simple example above was just a pass-through to HarperDB- the exact same result could have been achieved by hitting the core HarperDB API. But for many applications, you may want to authenticate the user using custom logic you write, or by conferring with a 3rd party service. Custom preValidation hooks let you do just that. + + + +Below is an example of a route that uses a custom validation hook: + +```javascript +const customValidation = require('../helpers/customValidation'); + +module.exports = async (server, { hdbCore, logger }) => { + server.route({ + url: '/:id', + method: 'GET', + preValidation: (request) => customValidation(request, logger), + handler: (request) => { + request.body= { + operation: 'sql', + sql: `SELECT * FROM dev.dog WHERE id = ${request.params.id}` + }; + + return hdbCore.requestWithoutAuthentication(request); + } + }); +} +``` + + +Notice we imported customValidation from the **helpers** directory. To include a helper, and to see the actual code within customValidation, see [Define Helpers](./define-helpers). + +## Helper Methods +When declaring routes, you are given access to 2 helper methods: hdbCore and logger. + + + +**hdbCore** + +hdbCore contains three functions that allow you to authenticate an inbound request, and execute operations against HarperDB directly, by passing the standard Operations API. + + + +* **preValidation** + + This takes the authorization header from the inbound request and executes the same authentication as the standard HarperDB Operations API. It will determine if the user exists, and if they are allowed to perform this operation. **If you use the request method, you have to use preValidation to get the authenticated user**. + +* **request** + + This will execute a request with HarperDB using the operations API. The `request.body` should contain a standard HarperDB operation and must also include the `hdb_user` property that was in `request.body` provided in the callback. + +* **requestWithoutAuthentication** + + Executes a request against HarperDB without any security checks around whether the inbound user is allowed to make this request. For security purposes, you should always take the following precautions when using this method: + + * Properly handle user-submitted values, including url params. User-submitted values should only be used for `search_value` and for defining values in records. Special care should be taken to properly escape any values if user-submitted values are used for SQL. + + +**logger** + +This helper allows you to write directly to the Custom Functions log file, custom_functions.log. It’s useful for debugging during development, although you may also use the console logger. There are 5 functions contained within logger, each of which pertains to a different **logging.level** configuration in your harperdb-config.yaml file. + + +* logger.trace(‘Starting the handler for /dogs’) + +* logger.debug(‘This should only fire once’) + +* logger.warn(‘This should never ever fire’) + +* logger.error(‘This did not go well’) + +* logger.fatal(‘This did not go very well at all’) \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/custom-functions/example-projects.md b/site/versioned_docs/version-4.1/custom-functions/example-projects.md new file mode 100644 index 00000000..88ded5fd --- /dev/null +++ b/site/versioned_docs/version-4.1/custom-functions/example-projects.md @@ -0,0 +1,37 @@ +--- +title: Example Projects +--- + +# Example Projects + +**Library of example projects and tutorials using Custom Functions:** + +* [Authorization in HarperDB using Okta Customer Identity Cloud](https:/www.harperdb.io/post/authorization-in-harperdb-using-okta-customer-identity-cloud), by Yitaek Hwang + +* [How to Speed Up your Applications by Caching at the Edge with HarperDB](https:/dev.to/doabledanny/how-to-speed-up-your-applications-by-caching-at-the-edge-with-harperdb-3o2l), by Danny Adams + +* [OAuth Authentication in HarperDB using Auth0 & Node.js](https:/www.harperdb.io/post/oauth-authentication-in-harperdb-using-auth0-and-node-js), by Lucas Santos + +* [How To Create a CRUD API with Next.js & HarperDB Custom Functions](https:/www.harperdb.io/post/create-a-crud-api-w-next-js-harperdb), by Colby Fayock + +* [Build a Dynamic REST API with Custom Functions](https:/harperdb.io/blog/build-a-dynamic-rest-api-with-custom-functions/), by Terra Roush + +* [How to use HarperDB Custom Functions to Build your Entire Backend](https:/dev.to/andrewbaisden/how-to-use-harperdb-custom-functions-to-build-your-entire-backend-a2m), by Andrew Baisden + +* [Using TensorFlowJS & HarperDB Custom Functions for Machine Learning](https:/harperdb.io/blog/using-tensorflowjs-harperdb-for-machine-learning/), by Kevin Ashcraft + +* [Build & Deploy a Fitness App with Python & HarperDB](https:/www.youtube.com/watch?v=KMkmA4i2FQc), by Patrick Löber + +* [Create a Discord Slash Bot using HarperDB Custom Functions](https:/geekysrm.hashnode.dev/discord-slash-bot-with-harperdb-custom-functions), by Soumya Ranjan Mohanty + +* [How I used HarperDB Custom Functions to Build a Web App for my Newsletter](https:/blog.hrithwik.me/how-i-used-harperdb-custom-functions-to-build-a-web-app-for-my-newsletter), by Hrithwik Bharadwaj + +* [How I used HarperDB Custom Functions and Recharts to create Dashboard](https:/blog.greenroots.info/how-to-create-dashboard-with-harperdb-custom-functions-and-recharts), by Tapas Adhikary + +* [How To Use HarperDB Custom Functions With Your React App](https:/dev.to/tyaga001/how-to-use-harperdb-custom-functions-with-your-react-app-2c43), by Ankur Tyagi + +* [Build a Web App Using HarperDB’s Custom Functions](https:/www.youtube.com/watch?v=rz6prItVJZU), livestream by Jaxon Repp + +* [How to Web Scrape Using Python, Snscrape & Custom Functions](https:/hackernoon.com/how-to-web-scrape-using-python-snscrape-and-harperdb), by Davis David + +* [What’s the Big Deal w/ Custom Functions](https:/rss.com/podcasts/harperdb-select-star/278933/), Select* Podcast \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/custom-functions/host-static.md b/site/versioned_docs/version-4.1/custom-functions/host-static.md new file mode 100644 index 00000000..0dcd2788 --- /dev/null +++ b/site/versioned_docs/version-4.1/custom-functions/host-static.md @@ -0,0 +1,21 @@ +--- +title: Host A Static Web UI +--- + +# Host A Static Web UI + +The [@fastify/static](https:/github.com/fastify/fastify-static) module can be utilized to serve static files. + +Install the module in your project by running `npm i @fastify/static` from inside your project directory. + +Register `@fastify/static` with the server and set `root` to the absolute path of the directory that contains the static files to serve. + +For further information on how to send specific files see the [@fastify/static](https:/github.com/fastify/fastify-static) docs. + +```javascript +module.exports = async (server, { hdbCore, logger }) => { + server.register(require('@fastify/static'), { + root: path.join(__dirname, 'public'), + }) +}; +``` diff --git a/site/versioned_docs/version-4.1/custom-functions/index.md b/site/versioned_docs/version-4.1/custom-functions/index.md new file mode 100644 index 00000000..4b97f156 --- /dev/null +++ b/site/versioned_docs/version-4.1/custom-functions/index.md @@ -0,0 +1,28 @@ +--- +title: Custom Functions +--- + +# Custom Functions + +Custom functions are a key part of building a complete HarperDB application. It is highly recommended that you use Custom Functions as the primary mechanism for your application to access your HarperDB database. Using Custom Functions gives you complete control over the accessible endpoints, how users are authenticated and authorized, what data is accessed from the database, and how it is aggregated and returned to users. + +* Add your own API endpoints to a standalone API server inside HarperDB + +* Use HarperDB Core methods to interact with your data at lightning speed + +* Custom Functions are powered by Fastify, so they’re extremely flexible + +* Manage in HarperDB Studio, or use your own IDE and Version Management System + +* Distribute your Custom Functions to all your HarperDB instances with a single click + +--- +* [Requirements and Definitions](./requirements-definitions) + +* [Create A Project](./create-project) + +* [Define Routes](./define-routes) + +* [Define Helpers](./define-helpers) + +* [Host a Static UI](./host-static) \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/custom-functions/requirements-definitions.md b/site/versioned_docs/version-4.1/custom-functions/requirements-definitions.md new file mode 100644 index 00000000..a38a0ec6 --- /dev/null +++ b/site/versioned_docs/version-4.1/custom-functions/requirements-definitions.md @@ -0,0 +1,77 @@ +--- +title: Requirements And Definitions +--- + +# Requirements And Definitions +Before you get started with Custom Functions, here’s a primer on the basic configuration and the structure of a Custom Functions Project. + +## Configuration +Custom Functions are configured in the harperdb-config.yaml file located in the operations API root directory (by default this is a directory named `hdb` located in the home directory of the current user). Below is a view of the Custom Functions' section of the config YAML file, plus descriptions of important Custom Functions settings. + +```yaml +customFunctions: + enabled: true + network: + cors: true + corsAccessList: + - null + headersTimeout: 60000 + https: false + keepAliveTimeout: 5000 + port: 9926 + timeout: 120000 + nodeEnv: production + root: ~/hdb/custom_functions + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +* **`enabled`** + A boolean value that tells HarperDB to start the Custom Functions server. Set it to **true** to enable custom functions and **false** to disable. `enabled` is `true` by default. + +* **`network.port`** + This is the port HarperDB will use to start a standalone Fastify Server dedicated to serving your Custom Functions’ routes. + +* **`root`** + This is the root directory where your Custom Functions projects and their files will live. By default, it’s in your \, but you can locate it anywhere--in a developer folder next to your other development projects, for example. + +_Please visit our [configuration docs](../configuration) for a more comprehensive look at these settings._ + +## Project Structure +**project folder** + +The name of the folder that holds your project files serves as the root prefix for all the routes you create. All routes created in the **dogs** project folder will have a URL like this: **https:/my-server-url.com:9926/dogs/my/route**. As such, it’s important that any project folders you create avoid any characters that aren’t URL-friendly. You should avoid URL delimiters in your folder names. + + +**/routes folder** + +Files in the **routes** folder define the requests that your Custom Functions server will handle. They are [standard Fastify route declarations](https:/www.fastify.io/docs/latest/Reference/Routes/), so if you’re familiar with them, you should be up and running in no time. The default components for a route are the url, method, preValidation, and handler. + +```javascript +module.exports = async (server, { hdbCore, logger }) => { + server.route({ + url: '/', + method: 'POST', + preValidation: hdbCore.preValidation, + handler: hdbCore.request, + }); +} +``` + +**/helpers folder** + +These files are JavaScript modules that you can use in your handlers, or for custom `preValidation` hooks. Examples include calls to third party Authentication services, filters for results of calls to HarperDB, and custom error responses. As modules, you can use standard import and export functionality. + +```javascript +"use strict"; + +const dbFilter = (databaseResultsArray) => databaseResultsArray.filter((result) => result.showToApi === true); + +module.exports = dbFilter; +``` + +**/static folder** + +If you’d like to serve your visitors a static website, you can place the html and supporting files into a directory called **static**. The directory must have an **index.html** file, and can have as many supporting resources as are necessary in whatever subfolder structure you prefer within that **static** directory. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/custom-functions/restarting-server.md b/site/versioned_docs/version-4.1/custom-functions/restarting-server.md new file mode 100644 index 00000000..b8352059 --- /dev/null +++ b/site/versioned_docs/version-4.1/custom-functions/restarting-server.md @@ -0,0 +1,18 @@ +--- +title: Restarting the Server +--- + +# Restarting the Server + +One way to manage Custom Functions is through [HarperDB Studio](../harperdb-studio/). It performs all the necessary operations automatically. To get started, navigate to your instance in HarperDB Studio and click the subnav link for “functions”. If you have not yet enabled Custom Functions, it will walk you through the process. Once configuration is complete, you can manage and deploy Custom Functions in minutes. + +For any changes made to your routes, helpers, or projects, you’ll need to restart the Custom Functions server to see them take effect. HarperDB Studio does this automatically whenever you create or delete a project, or add, edit, or edit a route or helper. If you need to start the Custom Functions server yourself, you can use the following operation to do so: + + + +```json +{ + "operation": "restart_service", + "service": "custom_functions" +} +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/custom-functions/templates.md b/site/versioned_docs/version-4.1/custom-functions/templates.md new file mode 100644 index 00000000..0fb6401e --- /dev/null +++ b/site/versioned_docs/version-4.1/custom-functions/templates.md @@ -0,0 +1,7 @@ +--- +title: Templates +--- + +# Templates + +Check out our always-expanding library of templates in our open-source [HarperDB-Add-Ons GitHub repo](https:/github.com/HarperDB-Add-Ons). \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/custom-functions/using-npm-git.md b/site/versioned_docs/version-4.1/custom-functions/using-npm-git.md new file mode 100644 index 00000000..4120fd17 --- /dev/null +++ b/site/versioned_docs/version-4.1/custom-functions/using-npm-git.md @@ -0,0 +1,13 @@ +--- +title: Using NPM and Git +--- + +# Using NPM and Git + +Custom function projects can be structured and managed like normal Node.js projects. You can include external dependencies, include them in your route and helper files, and manage your revisions without changing your development tooling or pipeline. + + + +* To initialize your project to use npm packages, use the terminal to execute `npm init` from the root of your project folder. + +* To implement version control using git, use the terminal to execute `git init` from the root of your project folder. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/getting-started/getting-started.md b/site/versioned_docs/version-4.1/getting-started/getting-started.md new file mode 100644 index 00000000..0d1db68f --- /dev/null +++ b/site/versioned_docs/version-4.1/getting-started/getting-started.md @@ -0,0 +1,54 @@ +--- +title: Getting Started +--- + +# Getting Started + +Getting started with HarperDB is easy and fast. + +The quickest way to get up and running with HarperDB is with HarperDB Cloud, our database-as-a-service offering, which this guide will utilize. + +### Set Up a HarperDB Instance + +Before you can start using HarperDB you need to set up an instance. Note, if you would prefer to install HarperDB locally, [check out the installation guides including Linux, Mac, and many other options](../install-harperdb/). + +1. [Sign up for the HarperDB Studio](https:/studio.harperdb.io/sign-up) +1. [Create a new HarperDB Cloud instance](../harperdb-studio/instances#create-a-new-instance) + +> HarperDB Cloud instance provisioning typically takes 5-15 minutes. You will receive an email notification when your instance is ready. + +### Using the HarperDB Studio + +Now that you have a HarperDB instance, you can do pretty much everything you’d like through the Studio. This section links to appropriate articles to get you started interacting with your data. + +1. [Create a schema](../harperdb-studio/manage-schemas-browse-data#create-a-schema) +1. [Create a table](../harperdb-studio/manage-schemas-browse-data#create-a-table) +1. [Add a record](../harperdb-studio/manage-schemas-browse-data#add-a-record) +1. [Load CSV data](../harperdb-studio/manage-schemas-browse-data#load-csv-data) (Here’s a sample CSV of the HarperDB team’s dogs) +1. [Query data via SQL](../harperdb-studio/query-instance-data) + +### Using the HarperDB API + +Complete HarperDB API documentation is available at api.harperdb.io. The HarperDB Studio features an example code builder that generates API calls in the programming language of your choice. For example purposes, a basic cURL command is shown below to create a schema called dev. + +``` +curl --location --request POST 'https:/instance-subdomain.harperdbcloud.com' \ +--header 'Authorization: Basic YourBase64EncodedInstanceUser:Pass' \ +--header 'Content-Type: application/json' \ +--data-raw '{ +"operation": "create_schema", +"schema": "dev" +}' +``` + +Breaking it down, there are only a few requirements for interacting with HarperDB: + +* Using the HTTP POST method. +* Providing the URL of the HarperDB instance. +* Providing the Authorization header (more on using Basic authentication). +* Providing the Content-Type header. +* Providing a JSON body with the desired operation and any additional operation properties (shown in the --data-raw parameter). This is the only parameter that needs to be changed to execute alternative operations on HarperDB. + +### Video Tutorials + +[HarperDB video tutorials are available within the HarperDB Studio](../harperdb-studio/resources#video-tutorials). HarperDB and the HarperDB Studio are constantly changing, as such, there may be small discrepancies in UI/UX. diff --git a/site/versioned_docs/version-4.1/harperdb-cli.md b/site/versioned_docs/version-4.1/harperdb-cli.md new file mode 100644 index 00000000..b7c1f9e0 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-cli.md @@ -0,0 +1,114 @@ +--- +title: HarperDB CLI +--- + +# HarperDB CLI + +The HarperDB command line interface (CLI) is used to administer [self-installed HarperDB instances](./install-harperdb/). + +## Installing HarperDB + +To install HarperDB with CLI prompts, run the following command: + +```bash +harperdb install +``` + +Alternatively, HarperDB installations can be automated with environment variables or command line arguments; [see a full list of configuration parameters here](./configuration#using-the-configuration-file-and-naming-conventions). Note, when used in conjunction, command line arguments will override environment variables. + +#### Environment Variables + +```bash +#minimum required parameters for no additional CLI prompts +export TC_AGREEMENT=yes +export HDB_ADMIN_USERNAME=HDB_ADMIN +export HDB_ADMIN_PASSWORD=password +export ROOTPATH=/tmp/hdb/ +export OPERATIONSAPI_NETWORK_PORT=9925 +harperdb install +``` + +#### Command Line Arguments + +```bash +#minimum required parameters for no additional CLI prompts +harperdb install --TC_AGREEMENT yes --HDB_ADMIN_USERNAME HDB_ADMIN --HDB_ADMIN_PASSWORD password --ROOTPATH /tmp/hdb/ --OPERATIONSAPI_NETWORK_PORT 9925 +``` + +*** + +## Starting HarperDB + +To start HarperDB after it is installed, run the following command: + +```bash +harperdb start +``` + +*** + +## Stopping HarperDB + +To stop HarperDB once it is running, run the following command: + +```bash +harperdb stop +``` + +*** + +## Restarting HarperDB + +To restart HarperDB once it is running, run the following command: + +```bash +harperdb restart +``` + +*** + +## Managing HarperDB Service(s) + +The following commands are used to start, restart, or stop one or more HarperDB service without restarting the full application: + +```bash +harperdb start --service harperdb,"custom functions",ipc +harperdb stop --service harperdb +harperdb restart --service "custom functions" +``` + +The following services are managed via the above commands: + +* HarperDB +* Custom Functions +* IPC +* Clustering + +*** + +## Getting the HarperDB Version + +To check the version of HarperDB that is installed run the following command: + +```bash +harperdb version +``` + +## Get all available CLI commands + +To display all available HarperDB CLI commands along with a brief description run: + +```bash +harperdb help +``` + +## Get the status of HarperDB and clustering + +To display the status of the HarperDB process, the clustering hub and leaf processes, the clustering network and replication statuses, run: + +```bash +harperdb status +``` + +## Backups +HarperDB uses a transactional commit process that ensures that data on disk is always transactionally consistent with storage. This means that HarperDB maintains safety of database integrity in the event of a crash. It also means that you can use any standard volume snapshot tool to make a backup of a HarperDB database. Database files are stored in the hdb/schemas directory (organized schema directories). As long as the snapshot is an atomic snapshot of these database files, the data can be copied/movied back into the schemas directory to restore a previous backup (with HarperDB shut down) , and database integrity will be preserved. Note that simply copying an in-use database file (using `cp`, for example) is _not_ a snapshot, and this would progressively read data from the database at different points in time, which yields unreliable copy that likely will not be usable. Standard copying is only reliable for a database file that is not in use. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-cloud/alarms.md b/site/versioned_docs/version-4.1/harperdb-cloud/alarms.md new file mode 100644 index 00000000..26f28a24 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-cloud/alarms.md @@ -0,0 +1,27 @@ +--- +title: HarperDB Cloud Alarms +--- + +# HarperDB Cloud Alarms + +HarperDB Cloud instance alarms are triggered when certain conditions are met. Once alarms are triggered organization owners will immediately receive an email alert and the alert will be available on the [Instance Configuration](../harperdb-studio/instance-configuration) page. The below table describes each alert and their evaluation metrics. + + + +### Heading Definitions + +* **Alarm**: Title of the alarm. + +* **Threshold**: Definition of the alarm threshold. + +* **Intervals**: The number of occurrences before an alarm is triggered and the period that the metric is evaluated over. + +* **Proposed Remedy**: Recommended solution to avoid the alert in the future. + + +| Alarm | Threshold | Intervals | Proposed Remedy | +|---------|------------|-----------|----------------------------------------------------------------------------------------------------------------| +| Storage | > 90% Disk | 1 x 5min | [Increased storage volume](../harperdb-studio/instance-configuration#update-instance-storage) | +| CPU | > 90% Avg | 2 x 5min | [Increase instance size for additional CPUs](../harperdb-studio/instance-configuration#update-instance-ram) | +| Memory | > 90% RAM | 2 x 5min | [Increase instance size](../harperdb-studio/instance-configuration#update-instance-ram) | + diff --git a/site/versioned_docs/version-4.1/harperdb-cloud/index.md b/site/versioned_docs/version-4.1/harperdb-cloud/index.md new file mode 100644 index 00000000..d820c858 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-cloud/index.md @@ -0,0 +1,7 @@ +--- +title: HarperDB Cloud +--- + +# HarperDB Cloud + +HarperDB Cloud is the easiest way to test drive HarperDB, it’s HarperDB-as-a-Service. Cloud handles deployment and management of your instances in just a few clicks. HarperDB Cloud is currently powered by AWS with additional cloud providers on our roadmap for the future. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-cloud/instance-size-hardware-specs.md b/site/versioned_docs/version-4.1/harperdb-cloud/instance-size-hardware-specs.md new file mode 100644 index 00000000..74dca186 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-cloud/instance-size-hardware-specs.md @@ -0,0 +1,26 @@ +--- +title: HarperDB Cloud Instance Size Hardware Specs +--- + +# HarperDB Cloud Instance Size Hardware Specs + +While HarperDB Cloud bills by RAM, each instance has other specifications associated with the RAM selection. The following table describes each instance size in detail*. + +| AWS EC2 Instance Size | RAM (GiB) | # vCPUs | Network (Gbps) | Processor | +|------------------------|------------|----------|-----------------|----------------------------------------| +| t3.nano | 0.5 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| t3.micro | 1 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| t3.small | 2 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| t3.medium | 4 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| m5.large | 8 | 2 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.xlarge | 16 | 4 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.2xlarge | 32 | 8 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.4xlarge | 64 | 16 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.8xlarge | 128 | 32 | 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.12xlarge | 192 | 48 | 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.16xlarge | 256 | 64 | 20 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.24xlarge | 384 | 96 | 25 | Up to 3.1 GHz Intel Xeon Platinum 8000 | + + + +*Specifications are subject to change. For the most up to date information, please refer to AWS documentation: https:/aws.amazon.com/ec2/instance-types/. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-cloud/iops-impact.md b/site/versioned_docs/version-4.1/harperdb-cloud/iops-impact.md new file mode 100644 index 00000000..10baf28c --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-cloud/iops-impact.md @@ -0,0 +1,49 @@ +--- +title: IOPS Impact on Performance +--- + +# IOPS Impact on Performance + +HarperDB, like any database, can place a tremendous load on its storage resources. Storage, not CPU or memory, will more often be the bottleneck of server, virtual machine, or a container running HarperDB. Understanding how storage works, and how much storage performance your workload requires, is key to ensuring that HarperDB performs as expected. + +## IOPS Overview +The primary measure of storage performance is the number of input/output operations per second (IOPS) that a storage device can perform. Different storage devices can have dramatically different performance profiles. A hard drive (HDD) might only perform a hundred or so IOPS, while a solid state drive (SSD) might be able to perform tens or hundreds of thousands of IOPS. + + + +Cloud providers like AWS, which powers HarperDB Cloud, don’t typically attach individual disks to a virtual machine or container. Instead, they combine large numbers of storage drives to create very high performance storage servers. Chunks (volumes) of that storage is then carved out and presented to many different virtual machines and containers. Due to the shared nature of this type of storage, the cloud provider places configurable limits on the number of IOPS that a volume can perform. The same way that cloud providers charge more for larger capacity volumes, they also charge more for volumes with more IOPS. + +## HarperDB Cloud Storage + +HarperDB Cloud utilizes AWS Elastic Block Storage (EBS) General Purpose SSD (gp3) volumes. This is the most common storage type used in AWS, as it provides reasonable performance for most workloads, at a reasonable price. + + + +AWS EBS gp3 volumes have a baseline performance level of 3,000 IOPS, as a result, all HarperDB Cloud storage options will offer 3,000 IOPS. We plan to offer scalable IOPS as an option in the future. + + + +You can read more about AWS EBS volume IOPS here: https:/docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html. + +## Estimating IOPS for HarperDB Instance + +The number of IOPS required for a particular workload is influenced by many factors. Testing your particular application is the best way to determine the number of IOPS required. A reliable method is to estimate about two IOPS for every index, including the primary key itself. So if a table has two indices besides primary key, estimate that an insert or update will require about six IOPS. Note that that can often be closer to one IOPS per index under load due to internal batching of writes, and sometimes even better when doing sequential inserts. Again it is best to test to verify this with application specific data and write patterns. + + + +For assistance in estimating IOPS requirements feel free to contact HarperDB Support or join our Community Slack Channel. + +## Example Use Case IOPS Requirements + +* **Sensor Data Collection** + + In case of IoT sensors where data collection will be sustained high IOPS are required. While there are not typically large queries going on in this case, there is a high volume of data being ingested. This implies that IOPS will be sustained at a high level. For example, if you are collection 100 records per second you would expect to need roughly 3,000 IOPS just to handle the data inserts. +* **Data Analytics/BI Server** + + Providing a server for analytics purposes typically requires a larger machine. Typically these cases involve large scale SQL joins and aggregations, which puts a large strain on reads. HarperDB utilizes an in-memory cache, which provides a significant performance boost on machines with large amounts of memory. However, if disparate datasets are constantly being queried and/or new data is frequently being loaded, you will find that the system still needs to have high IOPS to meet performance demand. +* **Web Services** + + Typical web service implementations with discrete reads and writes often do not need high IOPS to perform as expected. This is often the case is more transactional systems without the requirement for high performance load. A good rule to follow is that any HarperDB operation that requires a data scan will be IOPS intensive, but if these are not frequent then the EBS boost will suffice. Queries utilizing equals operations in either SQL or NoSQL do not require a scan due to HarperDB’s native indexing. +* **High Performance Database** + + Ultimately, if performance is your top priority, HarperDB should be run on bare metal hardware. Cloud providers offer these options at a higher cost, but they come with obvious performance improvements. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-cloud/verizon-5g-wavelength-instances.md b/site/versioned_docs/version-4.1/harperdb-cloud/verizon-5g-wavelength-instances.md new file mode 100644 index 00000000..1aaa838d --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-cloud/verizon-5g-wavelength-instances.md @@ -0,0 +1,46 @@ +--- +title: Verizon 5G Wavelength Instances +--- + +# Verizon 5G Wavelength Instances + +These instances are only accessible from the Verizon network. When accessing your HarperDB instance please ensure you are connected to the Verizon network, examples include Verizon 5G Internet, Verizon Hotspots, or Verizon mobile devices. + + + +HarperDB on Verizon 5G Wavelength brings HarperDB closer to the end user exclusively on the Verizon network resulting in as little as single-digit millisecond response time from HarperDB to the client. + + + +Instances are built via AWS Wavelength. You can read more about [AWS Wavelength here](https:/aws.amazon.com/wavelength/). + +HarperDB 5G Wavelength Instance Specs +While HarperDB 5G Wavelength bills by RAM, each instance has other specifications associated with the RAM selection. The following table describes each instance size in detail*. + +| AWS EC2 Instance Size | RAM (GiB) | # vCPUs | Network (Gbps) | Processor | +|------------------------|------------|----------|-----------------|---------------------------------------------| +| t3.medium | 4 | 2 | Up to 5 | Up to 3.1 GHz Intel Xeon Platinum Processor | +| t3.xlarge | 16 | 4 | Up to 5 | Up to 3.1 GHz Intel Xeon Platinum Processor | +| r5.2xlarge | 64 | 8 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum Processor | + + + + + +*Specifications are subject to change. For the most up to date information, please refer to [AWS documentation](https:/aws.amazon.com/ec2/instance-types/). + +## HarperDB 5G Wavelength Storage + +HarperDB 5G Wavelength utilizes AWS Elastic Block Storage (EBS) General Purpose SSD (gp2) volumes. This is the most common storage type used in AWS, as it provides reasonable performance for most workloads, at a reasonable price. + + + +AWS EBS gp2 volumes have a baseline performance level, which determines the number of IOPS it can perform indefinitely. The larger the volume, the higher it’s baseline performance. Additionally, smaller gp2 volumes are able to burst to a higher number of IOPS for periods of time. + + + +Smaller gp2 volumes are perfect for trying out the functionality of HarperDB, and might also work well for applications that don’t perform many database transactions. For applications that perform a moderate or high number of transactions, we recommend that you use a larger HarperDB volume. Learn more about the impact of IOPS on performance here. + + + +You can read more about [AWS EBS gp2 volume IOPS here](https:/docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html#ebsvolumetypes_gp2). \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-studio/create-account.md b/site/versioned_docs/version-4.1/harperdb-studio/create-account.md new file mode 100644 index 00000000..635de7f4 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/create-account.md @@ -0,0 +1,26 @@ +--- +title: Create a Studio Account +--- + +# Create a Studio Account +Start at the [HarperDB Studio sign up page](https:/studio.harperdb.io/sign-up). + +1) Provide the following information: + * First Name + * Last Name + * Email Address + * Subdomain + + *Part of the URL that will be used to identify your HarperDB Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com.* + * Coupon Code (optional) +2) Review the Privacy Policy and Terms of Service. +3) Click the sign up for free button. +4) You will be taken to a new screen to add an account password. Enter your password. + *Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character.* +5) Click the add account password button. + +You will receive a Studio welcome email confirming your registration. + + + +Note: Your email address will be used as your username and cannot be changed. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-studio/enable-mixed-content.md b/site/versioned_docs/version-4.1/harperdb-studio/enable-mixed-content.md new file mode 100644 index 00000000..1948d6be --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/enable-mixed-content.md @@ -0,0 +1,11 @@ +--- +title: Enable Mixed Content +--- + +# Enable Mixed Content + +Enabling mixed content is required in cases where you would like to connect the HarperDB Studio to HarperDB Instances via HTTP. This should not be used for production systems, but may be convenient for development and testing purposes. Doing so will allow your browser to reach HTTP traffic, which is considered insecure, through an HTTPS site like the Studio. + + + +A comprehensive guide is provided by Adobe [here](https:/experienceleague.adobe.com/docs/target/using/experiences/vec/troubleshoot-composer/mixed-content.html). \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-studio/index.md b/site/versioned_docs/version-4.1/harperdb-studio/index.md new file mode 100644 index 00000000..93ba1af7 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/index.md @@ -0,0 +1,15 @@ +--- +title: HarperDB Studio +--- + +# HarperDB Studio +HarperDB Studio is the web-based GUI for HarperDB. Studio enables you to administer, navigate, and monitor all of your HarperDB instances in a simple, user friendly interface without any knowledge of the underlying HarperDB API. It’s free to sign up, get started today! + +[Sign up for free!](https:/studio.harperdb.io/sign-up) + +--- +## How does Studio Work? +While HarperDB Studio is web based and hosted by us, all database interactions are performed on the HarperDB instance the studio is connected to. The HarperDB Studio loads in your browser, at which point you login to your HarperDB instances. Credentials are stored in your browser cache and are not transmitted back to HarperDB. All database interactions are made via the HarperDB Operations API directly from your browser to your instance. + +## What type of instances can I manage? +HarperDB Studio enables users to manage both HarperDB Cloud instances and privately hosted instances all from a single UI. All HarperDB instances feature identical behavior whether they are hosted by us or by you. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-studio/instance-configuration.md b/site/versioned_docs/version-4.1/harperdb-studio/instance-configuration.md new file mode 100644 index 00000000..55c01be1 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/instance-configuration.md @@ -0,0 +1,119 @@ +--- +title: Instance Configuration +--- + +# Instance Configuration + +HarperDB instance configuration can be viewed and managed directly through the HarperDB Studio. HarperDB Cloud instances can be resized in two different ways via this page, either by modifying machine RAM or by increasing drive storage. User-installed instances can have their licenses modified by modifying licensed RAM. + + + +All instance configuration is handled through the **config** page of the HarperDB Studio, accessed with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click config in the instance control bar. + +*Note, the **config** page will only be available to super users and certain items are restricted to Studio organization owners.* + +## Instance Overview + +The **instance overview** panel displays the following instance specifications: + +* Instance URL + +* Instance Node Name (for clustering) + +* Instance API Auth Header (this user) + + *The Basic authentication header used for the logged in HarperDB database user* + +* Created Date (HarperDB Cloud only) + +* Region (HarperDB Cloud only) + + *The geographic region where the instance is hosted.* + +* Total Price + +* RAM + +* Storage (HarperDB Cloud only) + +* Disk IOPS (HarperDB Cloud only) + +## Update Instance RAM + +HarperDB Cloud instance size and user-installed instance licenses can be modified with the following instructions. This option is only available to Studio organization owners. + + + +Note: For HarperDB Cloud instances, upgrading RAM may add additional CPUs to your instance as well. Click here to see how many CPUs are provisioned for each instance size. + +1) In the **update ram** panel at the bottom left: + + * Select the new instance size. + + * If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. + + * If you do have a credit card associated, you will be presented with the updated billing information. + + * Click **Upgrade**. + +2) The instance will shut down and begin reprovisioning/relicensing itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. + +3) Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. + +*Note, if HarperDB Cloud instance reprovisioning takes longer than 20 minutes, please submit a support ticket here: https:/harperdbhelp.zendesk.com/hc/en-us/requests/new.* + +## Update Instance Storage + +The HarperDB Cloud instance storage size can be increased with the following instructions. This option is only available to Studio organization owners. + +Note: Instance storage can only be upgraded once every 6 hours. + +1) In the **update storage** panel at the bottom left: + + * Select the new instance storage size. + + * If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. + + * If you do have a credit card associated, you will be presented with the updated billing information. + + * Click **Upgrade**. + +2) The instance will shut down and begin reprovisioning itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. + +3) Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. + +*Note, if this process takes longer than 20 minutes, please submit a support ticket here: https:/harperdbhelp.zendesk.com/hc/en-us/requests/new.* + +## Remove Instance + +The HarperDB instance can be deleted/removed from the Studio with the following instructions. Once this operation is started it cannot be undone. This option is only available to Studio organization owners. + +1) In the **remove instance** panel at the bottom left: + * Enter the instance name in the text box. + + * The Studio will present you with a warning. + + * Click **Remove**. + +2) The instance will begin deleting immediately. + +## Restart Instance + +The HarperDB Cloud instance can be restarted with the following instructions. + +1) In the **restart instance** panel at the bottom right: + * Enter the instance name in the text box. + + * The Studio will present you with a warning. + + * Click **Restart**. + +2) The instance will begin restarting immediately. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-studio/instance-example-code.md b/site/versioned_docs/version-4.1/harperdb-studio/instance-example-code.md new file mode 100644 index 00000000..b4b74e5f --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/instance-example-code.md @@ -0,0 +1,62 @@ +--- +title: Instance Example Code +--- + +# Instance Example Code + +Example code prepopulated with the instance URL and authorization token for the logged in database user can be found on the **example code** page of the HarperDB Studio. Code samples are generated based on the HarperDB API Documentation Postman collection. Code samples accessed with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **example code** in the instance control bar. + +5) Select the appropriate **category** from the left navigation. + +6) Select the appropriate **operation** from the left navigation. + +7) Select your desired language/variant from the **Choose Programming Language** dropdown. + +8) Copy code from the sample code panel using the copy icon. + +## Supported Languages + +Sample code uses two identifiers: **language** and **variant**. + +* **language** is the programming language that the sample code is generated in. + +* **variant** is the methodology or library used by the language to send HarperDB requests. + +The list of available language/variants are as follows: + +| Language | Variant | +|--------------|---------------| +| C# | RestSharp | +| cURL | cURL | +| Go | Native | +| HTTP | HTTP | +| Java | OkHttp | +| Java | Unirest | +| JavaScript | Fetch | +| JavaScript | jQuery | +| JavaScript | XHR | +| NodeJs | Axios | +| NodeJs | Native | +| NodeJs | Request | +| NodeJs | Unirest | +| Objective-C | NSURLSession | +| OCaml | Cohttp | +| PHP | cURL | +| PHP | HTTP_Request2 | +| PowerShell | RestMethod | +| Python | http.client | +| Python | Requests | +| Ruby | Net:HTTP | +| Shell | Httpie | +| Shell | wget | +| Swift | URLSession | + + diff --git a/site/versioned_docs/version-4.1/harperdb-studio/instance-metrics.md b/site/versioned_docs/version-4.1/harperdb-studio/instance-metrics.md new file mode 100644 index 00000000..b2bda847 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/instance-metrics.md @@ -0,0 +1,19 @@ +--- +title: Instance Metrics +--- + +# Instance Metrics + +The HarperDB Studio display instance status and metrics on the instance status page, which can be accessed with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **status** in the instance control bar. + +Once on the instance browse page you can view host system information, [HarperDB logs](../logging), and HarperDB Cloud alarms (if it is a cloud instance). + +*Note, the **status** page will only be available to super users.* \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-studio/instances.md b/site/versioned_docs/version-4.1/harperdb-studio/instances.md new file mode 100644 index 00000000..33e61ab6 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/instances.md @@ -0,0 +1,144 @@ +--- +title: Instances +--- + +# Instances + +The HarperDB Studio allows you to administer all of your HarperDB instances in one place. HarperDB currently offers the following instance types: + +* **HarperDB Cloud Instance** +Managed installations of HarperDB, what we call HarperDB Cloud. +* **5G Wavelength Instance** +Managed installations of HarperDB running on the Verizon network through AWS Wavelength, what we call 5G Wavelength Instances. *Note, these instances are only accessible via the Verizon network.* +* **User-Installed Instance** +Any HarperDB installation that is managed by you. These include instances hosted within your cloud provider accounts (for example, from the AWS or Digital Ocean Marketplaces), privately hosted instances, or instances installed locally. + +All interactions between the Studio and your instances take place directly from your browser. HarperDB stores metadata about your instances, which enables the Studio to display these instances when you log in. Beyond that, all traffic is routed from your browser to the HarperDB instances using the standard [HarperDB API](https:/api.harperdb.io/). + +## Organization Instance List +A summary view of all instances within an organization can be viewed by clicking on the appropriate organization from the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. Each instance gets their own card. HarperDB Cloud and user-installed instances are listed together. + +## Create a New Instance + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +2) Click the appropriate organization for the instance to be created under. +3) Click the **Create New HarperDB Cloud Instance + Register User-Installed Instance** card. +4) Select your desired Instance Type. +5) For a HarperDB Cloud Instance or a HarperDB 5G Wavelength Instance, click **Create HarperDB Cloud Instance**. + + 1) Fill out Instance Info. + 1) Enter Instance Name + + *This will be used to build your instance URL. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com. The Instance URL will be previewed below.* + + 2) Enter Instance Username + + *This is the username of the initial HarperDB instance super user.* + + 3) Enter Instance Password + + *This is the password of the initial HarperDB instance super user.* + + 2) Click **Instance Details** to move to the next page. + 3) Select Instance Specs + + 1) Select Instance RAM + + *HarperDB Cloud Instances are billed based on Instance RAM, this will select the size of your provisioned instance. More on instance specs.* + + 2) Select Storage Size + + *Each instance has a mounted storage volume where your HarperDB data will reside. Storage is provisioned based on space and IOPS. More on IOPS Impact on Performance.* + + 3) Select Instance Region + + *The geographic area where your instance will be provisioned.* + + 4) Click **Confirm Instance Details** to move to the next page. + 5) Review your Instance Details, if there is an error, use the back button to correct it. + 6) Review the [Privacy Policy](https:/harperdb.io/legal/privacy-policy/) and [Terms of Service](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. + 7) Click **Add Instance**. + 8) Your HarperDB Cloud instance will be provisioned in the background. Provisioning typically takes 5-15 minutes. You will receive an email notification when your instance is ready. + +## Register User-Installed Instance + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +2) Click the appropriate organization for the instance to be created under. +3) Click the **Create New HarperDB Cloud Instance + Register User-Installed Instance** card. +4) Select **Register User-Installed Instance**. + 1) Fill out Instance Info. + + 1) Enter Instance Name + + *This is used for descriptive purposes only.* + 2) Enter Instance Username + + *The username of a HarperDB super user that is already configured in your HarperDB installation.* + 3) Enter Instance Password + + *The password of a HarperDB super user that is already configured in your HarperDB installation.* + 4) Enter Host + + *The host to access the HarperDB instance. For example, `harperdb.myhost.com` or `localhost`.* + 5) Enter Port + + *The port to access the HarperDB instance. HarperDB defaults `9925`.* + 6) Select SSL + + *If your instance is running over SSL, select the SSL checkbox. If not, you will need to enable mixed content in your browser to allow the HTTPS Studio to access the HTTP instance. If there are issues connecting to the instance, the Studio will display a red error message.* + + 2) Click **Instance Details** to move to the next page. + 3) Select Instance Specs + 1) Select Instance RAM + + *HarperDB instances are billed based on Instance RAM. Selecting additional RAM will enable the ability for faster and more complex queries.* + 4) Click **Confirm Instance Details** to move to the next page. + 5) Review your Instance Details, if there is an error, use the back button to correct it. + 6) Review the [Privacy Policy](https:/harperdb.io/legal/privacy-policy/) and [Terms of Service](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. + 7) Click **Add Instance**. + 8) The HarperDB Studio will register your instance and restart it for the registration to take effect. Your instance will be immediately available after this is complete. + +## Delete an Instance + +Instance deletion has two different behaviors depending on the instance type. + +* **HarperDB Cloud Instance** +This instance will be permanently deleted, including all data. This process is irreversible and cannot be undone. +* **User-Installed Instance** +The instance will be removed from the HarperDB Studio only. This does not uninstall HarperDB from your system and your data will remain intact. + +An instance can be deleted as follows: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +2) Click the appropriate organization that the instance belongs to. +3) Identify the proper instance card and click the trash can icon. +4) Enter the instance name into the text box. + + *This is done for confirmation purposes to ensure you do not accidentally delete an instance.* +5) Click the **Do It** button. + +## Upgrade an Instance + +HarperDB instances can be resized on the [Instance Configuration](./instance-configuration) page. + +## Instance Log In/Log Out + +The Studio enables users to log in and out of different database users from the instance control panel. To log out of an instance: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +2) Click the appropriate organization that the instance belongs to. +3) Identify the proper instance card and click the lock icon. +4) You will immediately be logged out of the instance. + +To log in to an instance: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +2) Click the appropriate organization that the instance belongs to. +3) Identify the proper instance card, it will have an unlocked icon and a status reading PLEASE LOG IN, and click the center of the card. +4) Enter the database username. + + *The username of a HarperDB user that is already configured in your HarperDB instance.* +5) Enter the database password. + + *The password of a HarperDB user that is already configured in your HarperDB instance.* +6) Click **Log In**. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-studio/login-password-reset.md b/site/versioned_docs/version-4.1/harperdb-studio/login-password-reset.md new file mode 100644 index 00000000..dddda5c1 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/login-password-reset.md @@ -0,0 +1,42 @@ +--- +title: Login and Password Reset +--- + +# Login and Password Reset + +## Log In to Your HarperDB Studio Account + +To log into your existing HarperDB Studio account: + +1) Navigate to the [HarperDB Studio](https:/studio.harperdb.io/). +2) Enter your email address. +3) Enter your password. +4) Click **sign in**. + +## Reset a Forgotten Password + +To reset a forgotten password: + +1) Navigate to the HarperDB Studio password reset page. +2) Enter your email address. +3) Click **send password reset email**. +4) If the account exists, you will receive an email with a temporary password. +5) Navigate back to the HarperDB Studio login page. +6) Enter your email address. +7) Enter your temporary password. +8) Click **sign in**. +9) You will be taken to a new screen to reset your account password. Enter your new password. +*Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character.* +10) Click the **add account password** button. + +## Change Your Password + +If you are already logged into the Studio, you can change your password though the user interface. + +1) Navigate to the HarperDB Studio profile page. +2) In the **password** section, enter: + + * Current password. + * New password. + * New password again *(for verification)*. +4) Click the **Update Password** button. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-studio/manage-charts.md b/site/versioned_docs/version-4.1/harperdb-studio/manage-charts.md new file mode 100644 index 00000000..f96505f5 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/manage-charts.md @@ -0,0 +1,79 @@ +--- +title: Charts +--- + +# Charts + +The HarperDB Studio includes a charting feature within an instance. They are generated in real time based on your existing data and automatically refreshed every 15 seconds. Instance charts can be accessed with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +2) Click the appropriate organization that the instance belongs to. +3) Select your desired instance. +4) Click **charts** in the instance control bar. + +## Creating a New Chart + +Charts are generated based on SQL queries, therefore to build a new chart you first need to build a query. Instructions as follows (starting on the charts page described above): + +1) Click **query** in the instance control bar. +2) Enter the SQL query you would like to generate a chart from. + + *For example, using the dog demo data from the API Docs, we can get the average dog age per owner with the following query: `SELECT AVG(age) as avg_age, owner_name FROM dev.dog GROUP BY owner_name`.* + +3) Click **Execute**. + +4) Click **create chart** at the top right of the results table. + +5) Configure your chart. + + 1) Choose chart type. + + *HarperDB Studio offers many standard charting options like line, bar, etc.* + + 2) Choose a data column. + + *This column will be used to plot the data point. Typically, this is the values being calculated in the `SELECT` statement. Depending on the chart type, you can select multiple data columns to display on a single chart.* + 3) Depending on the chart type, you will need to select a grouping. + + *This could be labeled as x-axis, label, etc. This will be used to group the data, typically this is what you used in your **GROUP BY** clause.* + + 4) Enter a chart name. + + *Used for identification purposes and will be displayed at the top of the chart.* + + 5) Choose visible to all org users toggle. + + *Leaving this option off will limit chart visibility to just your HarperDB Studio user. Toggling it on will enable all users with this Organization to view this chart.* + + 6) Click **Add Chart**. + + 7) The chart will now be visible on the **charts** page. + +The example query above, configured as a bar chart, results in the following chart: + +![Average Age per Owner Example](/img/v4.1/ave-age-per-owner-ex.png) + + +## Downloading Charts +HarperDB Studio charts can be downloaded in SVG, PNG, and CSV format. Instructions as follows (starting on the charts page described above): + +1) Identify the chart you would like to export. +2) Click the three bars icon. + +3) Select the appropriate download option. + +4) The Studio will generate the export and begin downloading immediately. + +## Delete a Chart + +Delete a chart as follows (starting on the charts page described above): + +1) Identify the chart you would like to delete. + +2) Click the X icon. + +3) Click the **confirm delete chart** button. + +4) The chart will be deleted. + +Deleting a chart that is visible to all Organization users will delete it for all users. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-studio/manage-clustering.md b/site/versioned_docs/version-4.1/harperdb-studio/manage-clustering.md new file mode 100644 index 00000000..7155249d --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/manage-clustering.md @@ -0,0 +1,94 @@ +--- +title: Manage Clustering +--- + +# Manage Clustering + +HarperDB instance clustering and replication can be configured directly through the HarperDB Studio. It is recommended to read through the clustering documentation first to gain a strong understanding of HarperDB clustering behavior. + + + +All clustering configuration is handled through the **cluster** page of the HarperDB Studio, accessed with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **cluster** in the instance control bar. + +Note, the **cluster** page will only be available to super users. + +--- +## Initial Configuration + +HarperDB instances do not have clustering configured by default. The HarperDB Studio will walk you through the initial configuration. Upon entering the **cluster** screen for the first time you will need to complete the following configuration. Configurations are set in the **enable clustering** panel on the left while actions are described in the middle of the screen. + +1) Create a cluster user, read more about this here: Clustering Users and Roles. + * Enter username. + + * Enter password. + + * Click **Create Cluster User**. + +2) Click **Set Cluster Node Name**. +3) Click **Enable Instance Clustering**. + +At this point the Studio will restart your HarperDB Instance, required for the configuration changes to take effect. + +--- + +## Manage Clustering +Once initial clustering configuration is completed you a presented with a clustering management screen with the following properties: + +* **connected instances** + + Displays all instances within the Studio Organization that this instance manages a connection with. + +* **unconnected instances** + + Displays all instances within the Studio Organization that this instance does not manage a connection with. + +* **unregistered instances** + + Displays all instances outside of the Studio Organization that this instance manages a connection with. + +* **manage clustering** + + Once instances are connected, this will display clustering management options for all connected instances and all schemas and tables. +--- + +## Connect an Instance + +HarperDB Instances can be clustered together with the following instructions. + +1) Ensure clustering has been configured on both instances and a cluster user with identical credentials exists on both. + +2) Identify the instance you would like to connect from the **unconnected instances** panel. + +3) Click the plus icon next the appropriate instance. + +4) If configurations are correct, all schemas will sync across the cluster, then appear in the **manage clustering** panel. If there is a configuration issue, a red exclamation icon will appear, click it to learn more about what could be causing the issue. + +--- + +## Disconnect an Instance + +HarperDB Instances can be disconnected with the following instructions. + +1) Identify the instance you would like to disconnect from the **connected instances** panel. + +2) Click the minus icon next the appropriate instance. + +--- + +## Manage Replication + +Subscriptions must be configured in order to move data between connected instances. Read more about subscriptions here: Creating A Subscription. The **manage clustering** panel displays a table with each row representing an channel per instance. Cells are bolded to indicate a change in the column. Publish and subscribe replication can be configured per table with the following instructions: + +1) Identify the instance, schema, and table for replication to be configured. + +2) For publish, click the toggle switch in the **publish** column. + +3) For subscribe, click the toggle switch in the **subscribe** column. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-studio/manage-functions.md b/site/versioned_docs/version-4.1/harperdb-studio/manage-functions.md new file mode 100644 index 00000000..3a74d7e5 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/manage-functions.md @@ -0,0 +1,163 @@ +--- +title: Manage Functions +--- + +# Manage Functions + +HarperDB Custom Functions are enabled by default and can be configured further through the HarperDB Studio. It is recommended to read through the Custom Functions documentation first to gain a strong understanding of HarperDB Custom Functions behavior. + + + +All Custom Functions configuration is handled through the **functions** page of the HarperDB Studio, accessed with the following instructions: + +1) Navigate to the HarperDB Studio Organizations page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **functions** in the instance control bar. + +*Note, the **functions** page will only be available to super users.* + +## Manage Projects + +On the **functions** page of the HarperDB Studio you are presented with a functions management screen with the following properties: + +* **projects** + + Displays a list of Custom Functions projects residing on this instance. +* **/project_name/routes** + + Only displayed if there is an existing project. Displays the routes files contained within the selected project. +* **/project_name/helpers** + + Only displayed if there is an existing project. Displays the helper files contained within the selected project. +* **/project_name/static** + + Only displayed if there is an existing project. Displays the static file count and a link to the static files contained within the selected project. Note, static files cannot currently be deployed through the Studio and must be deployed via the [HarperDB API](https:/api.harperdb.io/) or manually to the server (not applicable with HarperDB Cloud). +* **Root File Directory** + + Displays the root file directory where the Custom Functions projects reside on this instance. +* **Custom Functions Server URL** + + Displays the base URL in which all Custom Functions are accessed for this instance. + + +## Create a Project + +HarperDB Custom Functions Projects can be initialized with the following instructions. + +1) If this is your first project, skip this step. Click the plus icon next to the **projects** heading. + +2) Enter the project name in the text box located under the **projects** heading. + +3) Click the check mark icon next the appropriate instance. + +4) The Studio will take a few moments to provision a new project based on the [Custom Functions template](https:/github.com/HarperDB/harperdb-custom-functions-template). + +5) The Custom Functions project is now created and ready to modify. + +## Modify a Project + +Custom Functions routes and helper functions can be modified directly through the Studio. From the **functions** page: + +1) Select the appropriate **project**. + +2) Select the appropriate **route** or **helper**. + +3) Modify the code with your desired changes. + +4) Click the save icon at the bottom right of the screen. + + *Note, saving modifications will restart the Custom Functions server on your HarperDB instance and may result in up to 60 seconds of downtime for all Custom Functions.* + +## Create Additional Routes/Helpers + +To create an additional **route** to your Custom Functions project. From the **functions** page: + +1) Select the appropriate Custom Functions **project**. + +2) Click the plus icon to the right of the **routes** header. + +3) Enter the name of the new route in the textbox that appears. + +4) Click the check icon to create the new route. + + *Note, adding a route will restart the Custom Functions server on your HarperDB instance and may result in up to 60 seconds of downtime for all Custom Functions.* + +To create an additional **helper** to your Custom Functions project. From the **functions** page: + +1) Select the appropriate Custom Functions **project**. + +2) Click the plus icon to the right of the **helpers** header. + +3) Enter the name of the new helper in the textbox that appears. + +4) Click the check icon to create the new helper. + + *Note, adding a helper will restart the Custom Functions server on your HarperDB instance and may result in up to 60 seconds of downtime for all Custom Functions.* + +## Delete a Project/Route/Helper + +To delete a Custom Functions project from the **functions** page: + +1) Click the minus icon to the right of the **projects** header. + +2) Click the red minus icon to the right of the Custom Functions project you would like to delete. + +3) Confirm deletion by clicking the red check icon. + + *Note, deleting a project will restart the Custom Functions server on your HarperDB instance and may result in up to 60 seconds of downtime for all Custom Functions.* + +To delete a Custom Functions _project route_ from the **functions** page: + +1) Select the appropriate Custom Functions **project**. + +2) Click the minus icon to the right of the **routes** header. + +3) Click the red minus icon to the right of the Custom Functions route you would like to delete. + +4) Confirm deletion by clicking the red check icon. + + *Note, deleting a route will restart the Custom Functions server on your HarperDB instance and may result in up to 60 seconds of downtime for all Custom Functions.* + +To delete a Custom Functions _project helper_ from the **functions** page: + +1) Select the appropriate Custom Functions **project**. + +2) Click the minus icon to the right of the **helper** header. + +3) Click the red minus icon to the right of the Custom Functions header you would like to delete. + +4) Confirm deletion by clicking the red check icon. + + *Note, deleting a header will restart the Custom Functions server on your HarperDB instance and may result in up to 60 seconds of downtime for all Custom Functions.* + +## Deploy Custom Functions Project to Other Instances + +The HarperDB Studio provides the ability to deploy Custom Functions projects to additional HarperDB instances within the same Studio Organization. To deploy Custom Functions projects to additional instances, starting from the **functions** page: + +1) Select the **project** you would like to deploy. + +2) Click the **deploy** button at the top right. + +3) A list of instances (excluding the current instance) within the organization will be displayed in tabular with the following information: + + * **Instance Name**: The name used to describe the instance. + + * **Instance URL**: The URL used to access the instance. + + * **CF Capable**: Describes if the instance version supports Custom Functions (yes/no). + + * **CF Enabled**: Describes if Custom Functions are configured and enabled on the instance (yes/no). + + * **Has Project**: Describes if the selected Custom Functions project has been previously deployed to the instance (yes/no). + + * **Deploy**: Button used to deploy the project to the instance. + + * **Remote**: Button used to remove the project from the instance. *Note, this will only be visible if the project has been previously deployed to the instance.* + +4) In the appropriate instance row, click the **deploy** button. + + *Note, deploying a project will restart the Custom Functions server on the HarperDB instance receiving the deployment and may result in up to 60 seconds of downtime for all Custom Functions.* diff --git a/site/versioned_docs/version-4.1/harperdb-studio/manage-instance-roles.md b/site/versioned_docs/version-4.1/harperdb-studio/manage-instance-roles.md new file mode 100644 index 00000000..e301e7d8 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/manage-instance-roles.md @@ -0,0 +1,76 @@ +--- +title: Manage Instance Roles +--- + +# Manage Instance Roles + +HarperDB users can be managed directly through the HarperDB Studio. It is recommended to read through the users & roles documentation to gain a strong understanding of how they operate. + + + +Instance role configuration is handled through the roles page of the HarperDB Studio, accessed with the following instructions: + +1) Navigate to the HarperDB Studio Organizations page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **rules** in the instance control bar. + +*Note, the **roles** page will only be available to super users.* + + + +The *roles management* screen consists of the following panels: + +* **super users** + + Displays all super user roles for this instance. +* **cluster users** + + Displays all cluster user roles for this instance. +* **standard roles** + + Displays all standard roles for this instance. +* **role permission editing** + + Once a role is selected for editing, permissions will be displayed here in JSON format. + +*Note, when new tables are added that are not configured, the Studio will generate configuration values with permissions defaulting to `false`.* + +## Role Management + +#### Create a Role + +1) Click the plus icon at the top right of the appropriate role section. + +2) Enter the role name. + +3) Click the green check mark. + +4) Configure the role permissions in the role permission editing panel. + + *Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel.* + +5) Click **Update Role Permissions**. + +#### Modify a Role + +1) Click the appropriate role from the appropriate role section. + +2) Modify the role permissions in the role permission editing panel. + + *Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel.* + +3) Click **Update Role Permissions**. + +#### Delete a Role + +Deleting a role is permanent and irreversible. A role cannot be remove if users are associated with it. + +1) Click the minus icon at the top right of the schemas section. + +2) Identify the appropriate role to delete and click the red minus sign in the same row. + +3) Click the red check mark to confirm deletion. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-studio/manage-instance-users.md b/site/versioned_docs/version-4.1/harperdb-studio/manage-instance-users.md new file mode 100644 index 00000000..4871cf88 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/manage-instance-users.md @@ -0,0 +1,63 @@ +--- +title: Manage Instance Users +--- + +# Manage Instance Users + +HarperDB instance clustering and replication can be configured directly through the HarperDB Studio. It is recommended to read through the clustering documentation first to gain a strong understanding of HarperDB clustering behavior. + + + +Instance user configuration is handled through the **users** page of the HarperDB Studio, accessed with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **users** in the instance control bar. + +*Note, the **users** page will only be available to super users.* + +## Add a User + +HarperDB instance users can be added with the following instructions. + +1) In the **add user** panel on the left enter: + + * New user username. + + * New user password. + + * Select a role. + + *Learn more about role management here: [Manage Instance Roles](./manage-instance-roles).* + +2) Click **Add User**. + +## Edit a User + +HarperDB instance users can be modified with the following instructions. + +1) In the **existing users** panel, click the row of the user you would like to edit. + +2) To change a user’s password: + + 1) In the **Change user password** section, enter the new password. + + 2) Click **Update Password**. + +3) To change a user’s role: + + 1) In the **Change user role** section, select the new role. + + 2) Click **Update Role**. + +4) To delete a user: + + 1) In the **Delete User** section, type the username into the textbox. + + *This is done for confirmation purposes.* + + 2) Click **Delete User**. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-studio/manage-schemas-browse-data.md b/site/versioned_docs/version-4.1/harperdb-studio/manage-schemas-browse-data.md new file mode 100644 index 00000000..41493b96 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/manage-schemas-browse-data.md @@ -0,0 +1,132 @@ +--- +title: Manage Schemas / Browse Data +--- + +# Manage Schemas / Browse Data + +Manage instance schemas/tables and browse data in tabular format with the following instructions: + +1) Navigate to the HarperDB Studio Organizations page. +2) Click the appropriate organization that the instance belongs to. +3) Select your desired instance. +4) Click **browse** in the instance control bar. + +Once on the instance browse page you can view data, manage schemas and tables, add new data, and more. + +## Manage Schemas and Tables + +#### Create a Schema + +1) Click the plus icon at the top right of the schemas section. +2) Enter the schema name. +3) Click the green check mark. + + +#### Delete a Schema + +Deleting a schema is permanent and irreversible. Deleting a schema removes all tables and data within it. + +1) Click the minus icon at the top right of the schemas section. +2) Identify the appropriate schema to delete and click the red minus sign in the same row. +3) Click the red check mark to confirm deletion. + + +#### Create a Table + +1) Select the desired schema from the schemas section. +2) Click the plus icon at the top right of the tables section. +3) Enter the table name. +4) Enter the primary key. + + *The primary key is also often referred to as the hash attribute in the studio, and it defines the unique identifier for each row in your table.* +5) Click the green check mark. + + +#### Delete a Table +Deleting a table is permanent and irreversible. Deleting a table removes all data within it. + +1) Select the desired schema from the schemas section. +2) Click the minus icon at the top right of the tables section. +3) Identify the appropriate table to delete and click the red minus sign in the same row. +4) Click the red check mark to confirm deletion. + +## Manage Table Data + +The following section assumes you have selected the appropriate table from the schema/table browser. + + + +#### Filter Table Data + +1) Click the magnifying glass icon at the top right of the table browser. +2) This expands the search filters. +3) The results will be filtered appropriately. + + +#### Load CSV Data + +1) Click the data icon at the top right of the table browser. You will be directed to the CSV upload page where you can choose to import a CSV by URL or upload a CSV file. +2) To import a CSV by URL: + 1) Enter the URL in the **CSV file URL** textbox. + 2) Click **Import From URL**. + 3) The CSV will load, and you will be redirected back to browse table data. +3) To upload a CSV file: + 1) Click **Click or Drag to select a .csv file** (or drag your CSV file from your file browser). + 2) Navigate to your desired CSV file and select it. + 3) Click **Insert X Records**, where X is the number of records in your CSV. + 4) The CSV will load, and you will be redirected back to browse table data. + + +#### Add a Record + +1) Click the plus icon at the top right of the table browser. +2) The Studio will pre-populate existing table attributes in JSON format. + + *The primary key is not included, but you can add it in and set it to your desired value. Auto-maintained fields are not included and cannot be manually set. You may enter a JSON array to insert multiple records in a single transaction.* +3) Enter values to be added to the record. + + *You may add new attributes to the JSON; they will be reflexively added to the table.* +4) Click the **Add New** button. + + +#### Edit a Record + +1) Click the record/row you would like to edit. +2) Modify the desired values. + + *You may add new attributes to the JSON; they will be reflexively added to the table.* + +3) Click the **save icon**. + + +#### Delete a Record + +Deleting a record is permanent and irreversible. If transaction logging is turned on, the delete transaction will be recorded as well as the data that was deleted. + +1) Click the record/row you would like to delete. +2) Click the **delete icon**. +3) Confirm deletion by clicking the **check icon**. + +## Browse Table Data + +The following section assumes you have selected the appropriate table from the schema/table browser. + +#### Browse Table Data + +The first page of table data is automatically loaded on table selection. Paging controls are at the bottom of the table. Here you can: + +* Page left and right using the arrows. +* Type in the desired page. +* Change the page size (the amount of records displayed in the table). + + +#### Refresh Table Data + +Click the refresh icon at the top right of the table browser. + + + +#### Automatically Refresh Table Data + +Toggle the auto switch at the top right of the table browser. The table data will now automatically refresh every 15 seconds. Filters and pages will remain set for refreshed data. + diff --git a/site/versioned_docs/version-4.1/harperdb-studio/organizations.md b/site/versioned_docs/version-4.1/harperdb-studio/organizations.md new file mode 100644 index 00000000..f9d5cb50 --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/organizations.md @@ -0,0 +1,105 @@ +--- +title: Organizations +--- + +# Organizations +HarperDB Studio organizations provide the ability to group HarperDB Cloud Instances. Organization behavior is as follows: + +* Billing occurs at the organization level to a single credit card. +* Organizations retain their own unique HarperDB Cloud subdomain. +* Cloud instances reside within an organization. +* Studio users can be invited to organizations to share instances. + + +An organization is automatically created for you when you sign up for HarperDB Studio. If you only have one organization, the Studio will automatically bring you to your organization’s page. + +--- + +## List Organizations +A summary view of all organizations your user belongs to can be viewed on the [HarperDB Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. You can navigate to this page at any time by clicking the **all organizations** link at the top of the HarperDB Studio. + +## Create a New Organization +A new organization can be created as follows: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the **Create a New Organization** card. +3) Fill out new organization details + * Enter Organization Name + *This is used for descriptive purposes only.* + * Enter Organization Subdomain + *Part of the URL that will be used to identify your HarperDB Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com.* +4) Click Create Organization. + +## Delete an Organization +An organization cannot be deleted until all instances have been removed. An organization can be deleted as follows: + +1) Navigate to the HarperDB Studio Organizations page. +2) Identify the proper organization card and click the trash can icon. +3) Enter the organization name into the text box. + + *This is done for confirmation purposes to ensure you do not accidentally delete an organization.* +4) Click the **Do It** button. + +## Manage Users +HarperDB Studio organization owners can manage users including inviting new users, removing users, and toggling ownership. + + + +#### Inviting a User +A new user can be invited to an organization as follows: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the appropriate organization card. +3) Click **users** at the top of the screen. +4) In the **add user** box, enter the new user’s email address. +5) Click **Add User**. + +Users may or may not already be HarperDB Studio users when adding them to an organization. If the HarperDB Studio account already exists, the user will receive an email notification alerting them to the organization invitation. If the user does not have a HarperDB Studio account, they will receive an email welcoming them to HarperDB Studio. + +--- + +#### Toggle a User’s Organization Owner Status +Organization owners have full access to the organization including the ability to manage organization users, create, modify, and delete instances, and delete the organization. Users must have accepted their invitation prior to being promoted to an owner. A user’s organization owner status can be toggled owner as follows: + +1) Navigate to the HarperDB Studio Organizations page. +2) Click the appropriate organization card. +3) Click **users** at the top of the screen. +4) Click the appropriate user from the **existing users** section. +5) Toggle the **Is Owner** switch to the desired status. +--- + +#### Remove a User from an Organization +Users may be removed from an organization at any time. Removing a user from an organization will not delete their HarperDB Studio account, it will only remove their access to the specified organization. A user can be removed from an organization as follows: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the appropriate organization card. +3) Click **users** at the top of the screen. +4) Click the appropriate user from the **existing users** section. +5) Type **DELETE** in the text box in the **Delete User** row. + + *This is done for confirmation purposes to ensure you do not accidentally delete a user.* +6) Click **Delete User**. + +## Manage Billing + +Billing is configured per organization and will be billed to the stored credit card at appropriate intervals (monthly or annually depending on the registered instance). Billing settings can be configured as follows: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the appropriate organization card. +3) Click **billing** at the top of the screen. + +Here organization owners can view invoices, manage coupons, and manage the associated credit card. + + + +*HarperDB billing and payments are managed via Stripe.* + + + +### Add a Coupon + +Coupons are applicable towards any paid tier or user-installed instance and you can change your subscription at any time. Coupons can be added to your Organization as follows: + +1) In the coupons panel of the **billing** page, enter your coupon code. +2) Click **Add Coupon**. +3) The coupon will then be available and displayed in the coupons panel. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-studio/query-instance-data.md b/site/versioned_docs/version-4.1/harperdb-studio/query-instance-data.md new file mode 100644 index 00000000..5c3ae28f --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/query-instance-data.md @@ -0,0 +1,53 @@ +--- +title: Query Instance Data +--- + +# Query Instance Data + +SQL queries can be executed directly through the HarperDB Studio with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +2) Click the appropriate organization that the instance belongs to. +3) Select your desired instance. +4) Click **query** in the instance control bar. +5) Enter your SQL query in the SQL query window. +6) Click **Execute**. + +*Please note, the Studio will execute the query exactly as entered. For example, if you attempt to `SELECT *` from a table with millions of rows, you will most likely crash your browser.* + +## Browse Query Results Set + +#### Browse Results Set Data + +The first page of results set data is automatically loaded on query execution. Paging controls are at the bottom of the table. Here you can: + +* Page left and right using the arrows. +* Type in the desired page. +* Change the page size (the amount of records displayed in the table). + +#### Refresh Results Set + +Click the refresh icon at the top right of the results set table. + +#### Automatically Refresh Results Set + +Toggle the auto switch at the top right of the results set table. The results set will now automatically refresh every 15 seconds. Filters and pages will remain set for refreshed data. + +## Query History + +Query history is stored in your local browser cache. Executed queries are listed with the most recent at the top in the **query history** section. + + +#### Rerun Previous Query + +* Identify the query from the **query history** list. +* Click the appropriate query. It will be loaded into the **sql query** input box. +* Click **Execute**. + +#### Clear Query History + +Click the trash can icon at the top right of the **query history** section. + +## Create Charts + +The HarperDB Studio includes a charting feature where you can build charts based on your specified queries. Visit the Charts documentation for more information. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/harperdb-studio/resources.md b/site/versioned_docs/version-4.1/harperdb-studio/resources.md new file mode 100644 index 00000000..3eaf0a4a --- /dev/null +++ b/site/versioned_docs/version-4.1/harperdb-studio/resources.md @@ -0,0 +1,43 @@ +--- +title: Resources (Marketplace, Drivers, Tutorials, & Example Code) +--- + +# Resources (Marketplace, Drivers, Tutorials, & Example Code) + +HarperDB Studio resources are available regardless of whether or not you are logged in. + +# HarperDB Marketplace + +The [HarperDB Marketplace](https:/studio.harperdb.io/resources/marketplace/active) is a collection of SDKs and connectors that enable developers to expand upon HarperDB for quick and easy solution development. Extensions are built and supported by the HarperDB Community. Each extension is hosted on the appropriate package manager or host. + + + +To download a Marketplace extension: + +1) Navigate to the [HarperDB Marketplace](https:/studio.harperdb.io/resources/marketplace/active) page. +2) Identity the extension you would like to use. +3) Either click the link to the package. +4) Follow the extension’s instructions to proceed. + +You can submit your rating for each extension by clicking on the stars. + +## HarperDB Drivers + +HarperDB offers standard drivers to connect real-time HarperDB data with BI, analytics, reporting and data visualization technologies. Drivers are built and maintained by [CData Software](https:/www.cdata.com/drivers/harperdb/). + + + +To download a driver: + +1) Navigate to the [HarperDB Drivers](https:/studio.harperdb.io/resources/marketplace/active) page. +2) Identity the driver you would like to use. +3) Click the download link. +4) For additional instructions, visit the support link on the driver card. + +## Video Tutorials + +HarperDB offers video tutorials available in the Studio on the [HarperDB Tutorials](https:/studio.harperdb.io/resources/tutorials/UExsZ1RNVEtzeXBTNUdJbjRZaTNOeEM0aW5YX3RBNU85SS4yODlGNEE0NkRGMEEzMEQy) page as well as our [YouTube channel](https:/www.youtube.com/playlist?list=PLlgTMTKsypS5GIn4Yi3NxC4inX_tA5O9I). The HarperDB Studio is changing all the time, as a result these, the videos may not include all of the current Studio features. + +## Example Code + +The [code examples](https:/studio.harperdb.io/resources/examples/QuickStart%20Examples/Create%20dev%20Schema) page offers example code for many different programming languages. These samples will include a placeholder for your authorization token. Full code examples with the authorization token prepopulated are available within individual instance pages. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/index.md b/site/versioned_docs/version-4.1/index.md new file mode 100644 index 00000000..fe8e0795 --- /dev/null +++ b/site/versioned_docs/version-4.1/index.md @@ -0,0 +1,17 @@ +--- +title: Documentation +--- + +# Documentation + +HarperDB's documentation covers installation, getting started, APIs, security, and much more. Browse the topics at left, or choose one of the commonly used documentation sections below. + +*** + +* [Install HarperDB Locally](./install-harperdb/) +* [Getting Started](./getting-started/) +* [HarperDB Operations API](https:/api.harperdb.io) +* [HarperDB Studio](./harperdb-studio/) +* [HarperDB Cloud](./harperdb-cloud/) +* [Developer Project Examples](https:/github.com/search?q=harperdb) +* [Support](./support) diff --git a/site/versioned_docs/version-4.1/install-harperdb/index.md b/site/versioned_docs/version-4.1/install-harperdb/index.md new file mode 100644 index 00000000..72c05115 --- /dev/null +++ b/site/versioned_docs/version-4.1/install-harperdb/index.md @@ -0,0 +1,61 @@ +--- +title: Install HarperDB +--- + +# Install HarperDB + +This documentation contains information for installing HarperDB locally. Note that if you’d like to get up and running quickly, you can try a [managed instance with HarperDB Cloud](https:/studio.harperdb.io/sign-up). HarperDB is a cross-platform database; we recommend Linux for production use, but HarperDB can run on Windows and Mac as well, for development purposes. Installation is usually very simple and just takes a few steps, but there are a few different options documented here. + +HarperDB runs on Node.js, so if you do not have it installed, you need to do that first (if you have installed, you can skip to installing HarperDB, itself). Node.js can be downloaded and installed from [their site](https:/nodejs.org/). For Linux and Mac, we recommend installing and managing Node versions with [NVM, which has instructions for installation](https:/github.com/nvm-sh/nvm), but generally NVM can be installed with: +```bash +curl -o- https:/raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh | bash +``` +And then logout and login, and then install Node.js using nvm. We recommend using LTS, but support all currently maintained Node versions (which is currently version 14 and newer, and make sure to always uses latest minor/patch for the major version): + +```bash +nvm install 18 +``` + +### Install and Start HarperDB +Then you can install HarperDB with NPM and start it: + +```bash +npm install -g harperdb +harperdb +``` + +HarperDB will automatically start after installation. + +If you are setting up a production server on Linux, [we have much more extensive documentation on how to configure volumes for database storage, set up a systemd script, configure your operating system for use a database server in our linux installation guide](./linux). + + + +# With Docker + +If you would like to run HarperDB in Docker, install [Docker Desktop](https:/docs.docker.com/desktop/) on your Mac or Windows computer. Otherwise, install the [Docker Engine](https:/docs.docker.com/engine/install/) on your Linux server. + +Once Docker Desktop or Docker Engine is installed, visit our [Docker Hub page](https:/hub.docker.com/r/harperdb/harperdb) for information and examples on how to run a HarperDB container. + +# Offline Install + +If you need to install HarperDB on a device that doesn't have an Internet connection, you can choose your version and download the npm package and install it directly (you’ll still need Node.js and NPM): + +Download Install Package + + +Once you’ve downloaded the .tgz file, run the following command from the directory where you’ve placed it: + +```bash +npm install -g harperdb-X.X.X.tgz harperdb install +``` + +For more information visit the [HarperDB Command Line Interface](../harperdb-cli) guide. + + +# Installation on Less Common Platforms + +HarperDB comes with binaries for standard AMD64/x64 or ARM64 CPU architectures on Linux, Windows (x64 only), and Mac (including Apple Silicon). However, if you are installing on a less common platform (Alpine, for example), you will need to ensure that you have build tools installed for the installation process to compile the binaries (this is handled automatically), including: +* [Go](https:/go.dev/dl/): version 1.19.1 +* GCC +* Make +* Python v3.7, v3.8, v3.9, or v3.10 diff --git a/site/versioned_docs/version-4.1/install-harperdb/linux.md b/site/versioned_docs/version-4.1/install-harperdb/linux.md new file mode 100644 index 00000000..8435985c --- /dev/null +++ b/site/versioned_docs/version-4.1/install-harperdb/linux.md @@ -0,0 +1,208 @@ +--- +title: Linux Installation and Configuration +--- + +# Linux Installation and Configuration + +If you wish to install locally or already have a configured server, see the basic [Installation Guide](./) + +The following is a recommended way to configure Linux and install HarperDB. These instructions should work reasonably well for any public cloud or on-premises Linux instance. + +--- + +These instructions assume that the following has already been completed: + +1. Linux is installed +1. Basic networking is configured +1. A non-root user account dedicated to HarperDB with sudo privileges exists +1. An additional volume for storing HarperDB files is attached to the Linux instance +1. Traffic to ports 9925 (HarperDB Operations API,) 9926 (HarperDB Custom Functions,) and 9932 (HarperDB Clustering) is permitted + +For this example, we will use an AWS Ubuntu Server 22.04 LTS m5.large EC2 Instance with an additional General Purpose SSD EBS volume and the default “ubuntu” user account. + +--- + +### (Optional) LVM Configuration +Logical Volume Manager (LVM) can be used to stripe multiple disks together to form a single logical volume. If striping disks together is not a requirement, skip these steps. + +Find disk that already has a partition + +```bash +used_disk=$(lsblk -P -I 259 | grep "nvme.n1.*part" | grep -o "nvme.n1") +``` + +Create array of free disks + +```bash +declare -a free_disks +mapfile -t free_disks < <(lsblk -P -I 259 | grep "nvme.n1.*disk" | grep -o "nvme.n1" | grep -v "$used_disk") +``` + +Get quantity of free disks + +```bash +free_disks_qty=${#free_disks[@]} +``` + +Construct pvcreate command + +```bash +cmd_string="" +for i in "${free_disks[@]}" +do +cmd_string="$cmd_string /dev/$i" +done +``` + +Initialize disks for use by LVM + +```bash +pvcreate_cmd="pvcreate $cmd_string" +sudo $pvcreate_cmd +``` + +Create volume group + +```bash +vgcreate_cmd="vgcreate hdb_vg $cmd_string" +sudo $vgcreate_cmd +``` + +Create logical volume + +```bash +sudo lvcreate -n hdb_lv -i $free_disks_qty -l 100%FREE hdb_vg +``` + +### Configure Data Volume + +Run `lsblk` and note the device name of the additional volume + +```bash +lsblk +``` + +Create an ext4 filesystem on the volume (The below commands assume the device name is nvme1n1. If you used LVM to create logical volume, replace /dev/nvme1n1 with /dev/hdb_vg/hdb_lv) + +```bash +sudo mkfs.ext4 -L hdb_data /dev/nvme1n1 +``` + +Mount the file system and set the correct permissions for the directory + +```bash +mkdir /home/ubuntu/hdb +sudo mount -t ext4 /dev/nvme1n1 /home/ubuntu/hdb +sudo chown -R ubuntu:ubuntu /home/ubuntu/hdb +sudo chmod 775 /home/ubuntu/hdb +``` + +Create a fstab entry to mount the filesystem on boot + +```bash +echo "LABEL=hdb_data /home/ubuntu/hdb ext4 defaults,noatime 0 1" | sudo tee -a /etc/fstab +``` + +### Configure Linux and Install Prerequisites +If a swap file or partition does not already exist, create and enable a 2GB swap file + +```bash +sudo dd if=/dev/zero of=/swapfile bs=128M count=16 +sudo chmod 600 /swapfile +sudo mkswap /swapfile +sudo swapon /swapfile +echo "/swapfile swap swap defaults 0 0" | sudo tee -a /etc/fstab +``` + +Increase the open file limits for the ubuntu user + +```bash +echo "ubuntu soft nofile 500000" | sudo tee -a /etc/security/limits.conf +echo "ubuntu hard nofile 1000000" | sudo tee -a /etc/security/limits.conf +``` + +Install Node Version Manager (nvm) + +```bash +curl -o- https:/raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh | bash +``` + +Load nvm (or logout and then login) + +```bash +. ~/.nvm/nvm.sh +``` + +Install Node.js using nvm ([read more about specific Node version requirements](https:/www.npmjs.com/package/harperdb#prerequisites)) + +```bash +nvm install +``` + +### Install and Start HarperDB +Here is an example of installing HarperDB with minimal configuration. + +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --ROOTPATH "/home/ubuntu/hdb" \ + --OPERATIONSAPI_NETWORK_PORT "9925" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" +``` + +Here is an example of installing HarperDB with commonly used additional configuration. + +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --ROOTPATH "/home/ubuntu/hdb" \ + --OPERATIONSAPI_NETWORK_PORT "9925" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" \ + --OPERATIONSAPI_NETWORK_HTTPS "true" \ + --CUSTOMFUNCTIONS_NETWORK_HTTPS "true" \ + --CLUSTERING_ENABLED "true" \ + --CLUSTERING_USER "cluster_user" \ + --CLUSTERING_PASSWORD "password" \ + --CLUSTERING_NODENAME "hdb1" +``` + +HarperDB will automatically start after installation. If you wish HarperDB to start when the OS boots, you have two options + +You can set up a crontab: + +```bash +(crontab -l 2>/dev/null; echo "@reboot PATH=\"/home/ubuntu/.nvm/versions/node/v18.15.0/bin:$PATH\" && harperdb start") | crontab - +``` + +Or you can create a systemd script at `/etc/systemd/system/harperdb.service` + +Pasting the following contents into the file: + +``` +[Unit] +Description=HarperDB + +[Service] +Type=simple +Restart=always +User=ubuntu +Group=ubuntu +WorkingDirectory=/home/ubuntu +ExecStart=/bin/bash -c 'PATH="/home/ubuntu/.nvm/versions/node/v18.15.0/bin:$PATH"; harperdb' + +[Install] +WantedBy=multi-user.target +``` + +And then running the following: + +``` +systemctl daemon-reload +systemctl enable harperdb +``` + +For more information visit the [HarperDB Command Line Interface guide](../harperdb-cli) and the [HarperDB Configuration File guide](../configuration). diff --git a/site/versioned_docs/version-4.1/jobs.md b/site/versioned_docs/version-4.1/jobs.md new file mode 100644 index 00000000..e91330c7 --- /dev/null +++ b/site/versioned_docs/version-4.1/jobs.md @@ -0,0 +1,112 @@ +--- +title: Asynchronous Jobs +--- + +# Asynchronous Jobs + +HarperDB Jobs are asynchronous tasks performed by the Operations API. + +## Job Summary + +Jobs uses an asynchronous methodology to account for the potential of a long-running operation. For example, exporting millions of records to S3 could take some time, so that job is started and the id is provided to check on the status. + +The job status can be **COMPLETE** or **IN_PROGRESS**. + +## Example Job Operations + +Example job operations include: + +[csv data load](https:/api.harperdb.io/#0186bc25-b9ae-44e7-bd9e-8edc0f289aa2) + +[csv file load](https:/api.harperdb.io/#c4b71011-8a1d-4cb2-8678-31c0363fea5e) + +[csv url load](https:/api.harperdb.io/#d1e9f433-e250-49db-b44d-9ce2dcd92d32) + +[import from s3](https:/api.harperdb.io/#820b3947-acbe-41f9-858b-2413cabc3a18) + +[delete_records_before](https:/api.harperdb.io/#8de87e47-73a8-4298-b858-ca75dc5765c2) + +[export_local](https:/api.harperdb.io/#49a02517-ada9-4198-b48d-8707db905be0) + +[export_to_s3](https:/api.harperdb.io/#f6393e9f-e272-4180-a42c-ff029d93ddd4) + +Example Response from a Job Operation + +``` +{ + "message": "Starting job with id 062a1892-6a0a-4282-9791-0f4c93b12e16" +} +``` + +Whenever one of these operations is initiated, an asynchronous job is created and the request contains the id of that job which can be used to check on its status. + +## Managing Jobs + +To check on a job's status, use the [get_job](https:/api.harperdb.io/#d501bef7-dbb7-4714-b535-e466f6583dce) operation. + +Get Job Request + +``` +{ + "operation": "get_job", + "id": "4a982782-929a-4507-8794-26dae1132def" +} +``` + +Get Job Response + +``` +[ + { + "__createdtime__": 1611615798782, + "__updatedtime__": 1611615801207, + "created_datetime": 1611615798774, + "end_datetime": 1611615801206, + "id": "4a982782-929a-4507-8794-26dae1132def", + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "start_datetime": 1611615798805, + "status": "COMPLETE", + "type": "csv_url_load", + "user": "HDB_ADMIN", + "start_datetime_converted": "2021-01-25T23:03:18.805Z", + "end_datetime_converted": "2021-01-25T23:03:21.206Z" + } +] +``` + +## Finding Jobs + +To find jobs (if the id is not know) use the [search_jobs_by_start_date](https:/api.harperdb.io/#4474ca16-e4c2-4740-81b5-14ed98c5eeab) operation. + +Search Jobs Request + +``` +{ + "operation": "search_jobs_by_start_date", + "from_date": "2021-01-25T22:05:27.464+0000", + "to_date": "2021-01-25T23:05:27.464+0000" +} +``` + +Search Jobs Response + +``` +[ + { + "id": "942dd5cb-2368-48a5-8a10-8770ff7eb1f1", + "user": "HDB_ADMIN", + "type": "csv_url_load", + "status": "COMPLETE", + "start_datetime": 1611613284781, + "end_datetime": 1611613287204, + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "created_datetime": 1611613284764, + "__createdtime__": 1611613284767, + "__updatedtime__": 1611613287207, + "start_datetime_converted": "2021-01-25T22:21:24.781Z", + "end_datetime_converted": "2021-01-25T22:21:27.204Z" + } +] +``` diff --git a/site/versioned_docs/version-4.1/logging.md b/site/versioned_docs/version-4.1/logging.md new file mode 100644 index 00000000..06d2eadc --- /dev/null +++ b/site/versioned_docs/version-4.1/logging.md @@ -0,0 +1,67 @@ +--- +title: Logging +--- + +# Logging + +HarperDB maintains a log of events that take place throughout operation. Log messages can be used for diagnostics purposes as well as monitoring. + +All logs (except for the install log) are stored in the main log file in the hdb directory `/log/hdb.log`. The install log is located in the HarperDB application directory most likely located in your npm directory `npm/harperdb/logs`. + +Each log message has several key components for consistent reporting of events. A log message has a format of: +``` + [] [] ...[]: +``` +For example, a typical log entry looks like: +``` +2023-03-09T14:25:05.269Z [notify] [main/0]: HarperDB successfully started. +``` +The components of a log entry are: +* timestamp - This is the date/time stamp when the event occurred +* level - This is an associated log level that gives a rough guide to the importance and urgency of the message. The available log levels in order of least urgent (and more verbose) are: `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. +* thread/id - This reports the name of the thread and the thread id, that the event was reported on. Note that NATS logs are recorded by their process name and there is no thread id for them since they are a separate process. Key threads are: + * main - This is the thread that is responsible for managing all other threads and routes incoming requests to the other threads + * http - These are the worker threads that handle the primary workload of incoming HTTP requests to the operations API and custom functions. + * Clustering* - These are threads and processes that handle replication. + * job - These are job threads that have been started to handle operations that are executed in a separate job thread. +* tags - Logging from a custom function will include a "custom-function" tag in the log entry. Most logs will not have any additional tags. +* message - This is the main message that was reported. + +We try to keep logging to a minimum by default, to do this the default log level is `error`. If you require more information from the logs, increasing the log level down will provide that. + +The log level can be changed by modifying `logging.level` in the config file `harperdb-config.yaml`. + +## Clustering Logging + +HarperDB clustering utilizes two [Nats](https:/nats.io/) servers, named Hub and Leaf. The Hub server is responsible for establishing the mesh network that connects instances of HarperDB +and the Leaf server is responsible for managing the message stores (streams) that replicate and store messages between instances. Due to the verbosity of these servers there is a separate +log level configuration for them. To adjust their log verbosity set `clustering.logLevel` in the config file `harperdb-config.yaml`. Valid log levels from least verbose are +`error`, `warn`, `info`, `debug` and `trace`. + +## Log File vs Standard Streams + +HarperDB logs can optionally be streamed to standard streams. Logging to standard streams (stdout/stderr) is primarily used for container logging drivers. For more traditional installations, we recommend logging to a file. Logging to both standard streams and to a file can be enabled simultaneously. +To log to standard streams effectively, make sure to directly run `harperdb` and don't start it as a separate process (don't use `harperdb start`) and `logging.stdStreams` must be set to true. Note, logging to standard streams only will disable clustering catchup. + +## Logging Rotation + +Log rotation allows for managing log files, such as compressing rotated log files, archiving old log files, determining when to rotate, and the like. This will allow for organized storage and efficient use of disk space. For more information see “logging” in our [config docs](./configuration). + +## Read Logs via the API + +To access specific logs you may query the HarperDB API. Logs can be queried using the `read_log` operation. `read_log` returns outputs from the log based on the provided search criteria. + +```json +{ + "operation": "read_log", + "start": 0, + "limit": 1000, + "level": "error", + "from": "2021-01-25T22:05:27.464+0000", + "until": "2021-01-25T23:05:27.464+0000", + "order": "desc" +} +``` + + + diff --git a/site/versioned_docs/version-4.1/reference/content-types.md b/site/versioned_docs/version-4.1/reference/content-types.md new file mode 100644 index 00000000..c8a1bad8 --- /dev/null +++ b/site/versioned_docs/version-4.1/reference/content-types.md @@ -0,0 +1,25 @@ +--- +title: HarperDB Supported Content Types +--- + +# HarperDB Supported Content Types + +HarperDB supports several different content types (or MIME types) for both HTTP request bodies (describing operations) as well as for serializing content into HTTP response bodies. HarperDB follows HTTP standards for specifying both request body content types and acceptable response body content types. Any of these content types can be used with any of the standard HarperDB operations. + +For request body content, the content type should be specified with the `Content-Type` header. For example with JSON, use `Content-Type: application/json` and for CBOR, include `Content-Type: application/cbor`. To request that the response body be encoded with a specific content type, use the `Accept` header. If you want the response to be in JSON, use `Accept: application/json`. If you want the response to be in CBOR, use `Accept: application/cbor`. + +The following content types are supported: + +## JSON - application/json +JSON is the most widely used content type, and is relatively readable and easy to work with. However, JSON does not support all the data types that are supported by HarperDB, and can't be used to natively encode data types like binary data or explicit Maps/Sets. Also, JSON is not as efficient as binary formats. When using JSON, compression is recommended (this also follows standard HTTP protocol with the `Accept-Encoding` header) to improve network transfer performance (although there is server performance overhead). JSON is a good choice for web development and when standard JSON types are sufficient and when combined with compression and debuggability/observability is important. + +## CBOR - application/cbor +CBOR is a highly efficient binary format, and is a recommended format for most production use cases with HarperDB. CBOR supports the full range of HarperDB data types, including binary data, typed dates, and explicit Maps/Sets. CBOR is very performant and space efficient even without compression. Compression will still yield better network transfer size/performance, but compressed CBOR is generally not any smaller than compressed JSON. CBOR also natively supports streaming for optimal performance (using indefinite length arrays). The CBOR format has excellent standardization and HarperDB's CBOR provides an excellent balance of performance and size efficiency. + +## MessagePack - application/x-msgpack +MessagePack is another efficient binary format like CBOR, with a support for all HarperDB data types. MessagePack generally has wider adoption than CBOR and can be useful in systems that don't have CBOR support (or good support). However, MessagePack does not have native support for streaming of arrays of data (for query results), and so query results are returned as a (concatenated) sequence of MessagePack objects/maps. MessagePack decoders used with HarperDB's MessagePack must be prepared to decode a direct sequence of MessagePack values to properly read responses. + +## Comma-separated Values (CSV) - text/csv +Comma-separated values is an easy to use and understand format that can be readily imported into spreadsheets or used for data processing. CSV lacks hierarchical structure most data types, and shouldn't be used for frequent/production use, but when you need it, it is available. + + diff --git a/site/versioned_docs/version-4.1/reference/data-types.md b/site/versioned_docs/version-4.1/reference/data-types.md new file mode 100644 index 00000000..78a8a684 --- /dev/null +++ b/site/versioned_docs/version-4.1/reference/data-types.md @@ -0,0 +1,37 @@ +--- +title: HarperDB Supported Data Types +--- + +# HarperDB Supported Data Types + +HarperDB supports a rich set of data types for use in records in databases. Various data types can be used from both direct JavaScript interfaces in Custom Functions and the HTTP operations APIs. Using JSON for communication naturally limits the data types to those available in JSON (HarperDB’s supports all of JSON data types), but JavaScript code and alternate data formats facilitate the use of additional data types. As of v4.1, HarperDB supports MessagePack and CBOR, which allows for all of HarperDB supported data types. This includes: + +## Boolean +true or false. + +## String +Strings, or text, are a sequence of any unicode characters and are internally encoded with UTF-8. + +## Number +Numbers can be stored as signed integers up to 64-bit or floating point with 64-bit floating point precision, and numbers are automatically stored using the most optimal type. JSON is parsed by JS, so the maximum safe (precise) integer is 9007199254740991 (larger numbers can be stored, but aren’t guaranteed integer precision). Custom Functions may use BigInt numbers to store/access larger 64-bit integers, but integers beyond 64-bit can’t be stored with integer precision (will be stored as standard double-precision numbers). + +## Object/Map +Objects, or maps, that hold a set named properties can be stored in HarperDB. When provided as JSON objects or JavaScript objects, all property keys are stored as strings. The order of properties is also preserved in HarperDB’s storage. Duplicate property keys are not allowed (they are dropped in parsing any incoming data). + +## Array +Arrays hold an ordered sequence of values and can be stored in HarperDB. There is no support for sparse arrays, although you can use objects to store data with numbers (converted to strings) as properties. + +## Null +A null value can be stored in HarperDB property values as well. + +## Date +Dates can be stored as a specific data type. This is not supported in JSON, but is supported by MessagePack and CBOR. Custom Functions can also store and use Dates using JavaScript Date instances. + +## Binary Data +Binary data can be stored in property values as well. JSON doesn’t have any support for encoding binary data, but MessagePack and CBOR support binary data in data structures, and this will be preserved in HarperDB. Custom Functions can also store binary data by using NodeJS’s Buffer or Uint8Array instances to hold the binary data. + +## Explicit Map/Set +Explicit instances of JavaScript Maps and Sets can be stored and preserved in HarperDB as well. This can’t be represented with JSON, but can be with CBOR. + + + diff --git a/site/versioned_docs/version-4.1/reference/dynamic-schema.md b/site/versioned_docs/version-4.1/reference/dynamic-schema.md new file mode 100644 index 00000000..c700e42d --- /dev/null +++ b/site/versioned_docs/version-4.1/reference/dynamic-schema.md @@ -0,0 +1,148 @@ +--- +title: Dynamic Schema +--- + +# Dynamic Schema + +HarperDB is built to make data ingestion simple. A primary driver of that is the Dynamic Schema. The purpose of this document is to provide a detailed explanation of the dynamic schema specifically related to schema definition and data ingestion. + +The dynamic schema provides the structure of schema and table namespaces while simultaneously providing the flexibility of a data-defined schema. Individual attributes are reflexively created as data is ingested, meaning the table will adapt to the structure of data ingested. HarperDB tracks the metadata around schemas, tables, and attributes allowing for describe table, describe schema, and describe all operations. + +### Schemas + +HarperDB schemas are analogous to a namespace that groups tables together. A schema is required to create a table. + +### Tables + +HarperDB tables group records together with a common data pattern. To create a table users must provide a table name and a primary key. + +* **Table Name**: Used to identify the table. +* **Primary Key**: This is a required attribute that serves as the unique identifier for a record and is also known as the `hash_attribute` in HarperDB. + +Primary Key + +The primary key (also referred to as the `hash_attribute`) is used to uniquely identify records. Uniqueness is enforced on the primary; inserts with the same primary key will be rejected. If a primary key is not provided on insert, a GUID will be automatically generated and returned to the user. The [HarperDB Storage Algorithm](./storage-algorithm) utilizes this value for indexing. + +**Standard Attributes** + +Additional attributes are reflexively added via insert and update operations (in both SQL and NoSQL) when new attributes are included in the data structure provided to HarperDB. As a result, schemas are additive, meaning new attributes are created in the underlying storage algorithm as additional data structures are provided. HarperDB offers `create_attribute` and `drop_attribute` operations for users who prefer to manually define their data model independent of data ingestion. When new attributes are added to tables with existing data the value of that new attribute will be assumed `null` for all existing records. + +**Audit Attributes** + +HarperDB automatically creates two audit attributes used on each record. + +* `__createdtime__`: The time the record was created in [Unix Epoch with milliseconds](https:/www.epochconverter.com/) format. +* `__updatedtime__`: The time the record was updated in [Unix Epoch with milliseconds](https:/www.epochconverter.com/) format. + +### Dynamic Schema Example + +To better understand the behavior let’s take a look at an example. This example utilizes [HarperDB API operations](https:/api.harperdb.io/). + +**Create a Schema** + +```bash +{ + "operation": "create_schema", + "schema": "dev" +} +``` + +**Create a Table** + +Notice the schema name, table name, and hash attribute name are the only required parameters. + +```bash +{ + "operation": "create_table", + "schema": "dev", + "table": "dog", + "hash_attribute": "id" +} +``` + +At this point the table does not have structure beyond what we provided, so the table looks like this: + +**dev.dog** + +![](/img/v4.1/reference/dynamic\_schema\_2\_create\_table.png.webp) + +**Insert Record** + +To define attributes we do not need to do anything beyond sending them in with an insert operation. + +```bash +{ + "operation": "insert", + "schema": "dev", + "table": "dog", + "records": [ + {"id": 1, "dog_name": "Penny", "owner_name": "Kyle"} + ] +} +``` + +With a single record inserted and new attributes defined, our table now looks like this: + +**dev.dog** + +![](/img/v4.1/reference/dynamic\_schema\_3\_insert\_record.png.webp) + +Indexes have been automatically created for `dog_name` and `owner_name` attributes. + +**Insert Additional Record** + +If we continue inserting records with the same data schema no schema updates are required. One record will omit the hash attribute from the insert to demonstrate GUID generation. + +```bash +{ + "operation": "insert", + "schema": "dev", + "table": "dog", + "records": [ + {"id": 2, "dog_name": "Monk", "owner_name": "Aron"}, + {"dog_name": "Harper","owner_name": "Stephen"} + ] +} +``` + +In this case, there is no change to the schema. Our table now looks like this: + +**dev.dog** + +![](/img/v4.1/reference/dynamic\_schema\_4\_insert\_additional\_record.png.webp) + +**Update Existing Record** + +In this case, we will update a record with a new attribute not previously defined on the table. + +```bash +{ + "operation": "update", + "schema": "dev", + "table": "dog", + "records": [ + {"id": 2, "weight_lbs": 35} + ] +} +``` + +Now we have a new attribute called `weight_lbs`. Our table now looks like this: + +**dev.dog** + +![](/img/v4.1/reference/dynamic\_schema\_5\_update\_existing\_record.png.webp) + +**Query Table with SQL** + +Now if we query for all records where `weight_lbs` is `null` we expect to get back two records. + +```bash +{ + "operation": "sql", + "sql": "SELECT * FROM dev.dog WHERE weight_lbs IS NULL" +} +``` + +This results in the expected two records being returned. + +![](/img/v4.1/reference/dynamic\_schema\_6\_query\_table\_with\_sql.png.webp) diff --git a/site/versioned_docs/version-4.1/reference/headers.md b/site/versioned_docs/version-4.1/reference/headers.md new file mode 100644 index 00000000..330425bf --- /dev/null +++ b/site/versioned_docs/version-4.1/reference/headers.md @@ -0,0 +1,13 @@ +--- +title: HarperDB Headers +--- + +# HarperDB Headers + +All HarperDB API responses include headers that are important for interoperability and debugging purposes. The following headers are returned with all HarperDB API responses: + +| Key | Example Value | Description | +|-------------------|------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------| +| server-timing | db;dur=7.165 | This reports the duration of the operation, in milliseconds. This follows the standard for Server-Timing and can be consumed by network monitoring tools. | +| hdb-response-time | 7.165 | This is the legacy header for reporting response time. It is deprecated and will be removed in 4.2. | +| content-type | application/json | This reports the MIME type of the returned content, which is negotiated based on the requested content type in the Accept header. | diff --git a/site/versioned_docs/version-4.1/reference/index.md b/site/versioned_docs/version-4.1/reference/index.md new file mode 100644 index 00000000..70a3e37e --- /dev/null +++ b/site/versioned_docs/version-4.1/reference/index.md @@ -0,0 +1,14 @@ +--- +title: Reference +--- + +# Reference + +This section contains technical details and reference materials for HarperDB. + +* [Storage Algorithm](./storage-algorithm) +* [Dynamic Schema](./dynamic-schema) +* [Headers](./headers) +* [Limitations](./limits) +* Content Types +* [Data Types](./data-types) diff --git a/site/versioned_docs/version-4.1/reference/limits.md b/site/versioned_docs/version-4.1/reference/limits.md new file mode 100644 index 00000000..f6509b7b --- /dev/null +++ b/site/versioned_docs/version-4.1/reference/limits.md @@ -0,0 +1,33 @@ +--- +title: HarperDB Limits +--- + +# HarperDB Limits + +This document outlines limitations of HarperDB. + +## Schema Naming Restrictions + +**Case Sensitivity** + +HarperDB schema metadata (schema names, table names, and attribute/column names) are case sensitive. Meaning schemas, tables, and attributes can differ only by the case of their characters. + +**Restrictions on Schema Metadata Names** + +HarperDB schema metadata (schema names, table names, and attribute names) cannot contain the following UTF-8 characters: + +``` +/`¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ +``` + +Additionally, they cannot contain the first 31 non-printing characters. Spaces are allowed, but not recommended as best practice. The regular expression used to verify a name is valid is: + +``` +^[\x20-\x2E|\x30-\x5F|\x61-\x7E]*$ +``` + +## Table Limitations + +**Attribute Maximum** + +HarperDB limits number of attributes to 10,000 per table. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/reference/storage-algorithm.md b/site/versioned_docs/version-4.1/reference/storage-algorithm.md new file mode 100644 index 00000000..efd26d14 --- /dev/null +++ b/site/versioned_docs/version-4.1/reference/storage-algorithm.md @@ -0,0 +1,22 @@ +--- +title: Storage Algorithm +--- + +# Storage Algorithm +The HarperDB storage algorithm is fundamental to the HarperDB core functionality, enabling the [Dynamic Schema](./dynamic-schema) and all other user-facing functionality. HarperDB is built on top of Lightning Memory-Mapped Database (LMDB), a key-value store offering industry leading performance and functionality, which allows for our storage algorithm to store data in tables as rows/objects. This document will provide additional details on how data is stored within HarperDB. + +## Query Language Agnostic +The HarperDB storage algorithm was designed to abstract the data storage from any individual query language. HarperDB currently supports both SQL and NoSQL on top of this storage algorithm, with the ability to add additional query languages in the future. This means data can be inserted via NoSQL and read via SQL while hitting the same underlying data storage. + +## ACID Compliant +Utilizing Multi-Version Concurrency Control (MVCC) through LMDB, HarperDB offers ACID compliance independently on each node. Readers and writers operate independently of each other, meaning readers don’t block writers and writers don’t block readers. Each HarperDB table has a single writer process, avoiding deadlocks and assuring that writes are executed in the order in which they were received. HarperDB tables can have multiple reader processes operating at the same time for consistent, high scale reads. + +## Universally Indexed +All top level attributes are automatically indexed immediately upon ingestion. The [HarperDB Dynamic Schema](./dynamic-schema) reflexively creates both the attribute and index reflexively as new schema metadata comes in. Indexes are agnostic of datatype, honoring the following order: booleans, numbers ordered naturally, strings ordered lexically. Within the LMDB implementation, table records are grouped together into a single LMDB environment file, where each attribute index is a sub-database (dbi) inside said environment file. An example of the indexing scheme can be seen below. + +## Additional LMDB Benefits +HarperDB inherits both functional and performance benefits by implementing LMDB as the underlying key-value store. Data is memory-mapped, which enables quick data access without data duplication. All writers are fully serialized, making writes deadlock-free. LMDB is built to maximize operating system features and functionality, fully exploiting buffer cache and built to run in CPU cache. To learn more about LMDB, visit their documentation. + +## HarperDB Indexing Example (Single Table) + +![](/img/v4.1/reference/HarperDB-3.0-Storage-Algorithm.png.webp) \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/End-of-Life.md b/site/versioned_docs/version-4.1/release-notes/End-of-Life.md new file mode 100644 index 00000000..ca15f713 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/End-of-Life.md @@ -0,0 +1,14 @@ +--- +title: HarperDB Software Lifecycle Schedules +--- + +# HarperDB Software Lifecycle Schedules + +The lifecycle schedules below form a part of HarperDB’s Support Policies. They include Major Releases and Minor Release that have reached their end of life date in the past 3 years. + +| **Release** | **Release Date** | **End of Life Date** | +|-------------|------------------|----------------------| +| 3.2 | 6/22 | 6/25 | +| 3.3 | 9/22 | 9/25 | +| 4.0 | 1/23 | 1/26 | +| 4.1 | 4/23 | 4/26 | diff --git a/site/versioned_docs/version-4.1/release-notes/index.md b/site/versioned_docs/version-4.1/release-notes/index.md new file mode 100644 index 00000000..8c0a3fb9 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/index.md @@ -0,0 +1,80 @@ +--- +title: Release Notes +--- + +# Release Notes + +### Current Release + +[Meet Tucker](./v4-tucker/) Our 4th Release Pup + +[4.1.2 Tucker](./v4-tucker/4.1.2) + +[4.1.1 Tucker](./v4-tucker/4.1.1) + +[4.1.0 Tucker](./v4-tucker/4.1.0) + +[4.0.6 Tucker](./v4-tucker/4.0.6) + +[4.0.5 Tucker](./v4-tucker/4.0.5) + +[4.0.4 Tucker](./v4-tucker/4.0.4) + +[4.0.3 Tucker](./v4-tucker/4.0.3) + +[4.0.2 Tucker](./v4-tucker/4.0.2) + +[4.0.1 Tucker](./v4-tucker/4.0.1) + +[4.0.0 Tucker](./v4-tucker/4.0.0) + + +### Past Releases + +[Meet Monkey](./v3-monkey/) Our 3rd Release Pup + +[3.2.1 Monkey](./v3-monkey/3.2.1) + +[3.2.0 Monkey](./v3-monkey/3.2.0) + +[3.1.5 Monkey](./v3-monkey/3.1.5) + +[3.1.4 Monkey](./v3-monkey/3.1.4) + +[3.1.3 Monkey](./v3-monkey/3.1.3) + +[3.1.2 Monkey](./v3-monkey/3.1.2) + +[3.1.1 Monkey](./v3-monkey/3.1.1) + +[3.1.0 Monkey](./v3-monkey/3.1.0) + +[3.0.0 Monkey](./v3-monkey/3.0.0) + +*** + +[Meet Penny](./v2-penny/) Our 2nd Release Pup + +[2.3.1 Penny](./v2-penny/2.3.1) + +[2.3.0 Penny](./v2-penny/2.3.0) + +[2.2.3 Penny](./v2-penny/2.2.3) + +[2.2.2 Penny](./v2-penny/2.2.2) + +[2.2.0 Penny](./v2-penny/2.2.0) + +[2.1.1 Penny](./v2-penny/2.1.1) + +*** + +[Meet Alby](./v1-alby/) Our 1st Release Pup + +[1.3.1 Alby](./v1-alby/1.3.1) + +[1.3.0 Alby](./v1-alby/1.3.0) + +[1.2.0 Alby](./v1-alby/1.2.0) + +[1.1.0 Alby](./v1-alby/1.1.0) diff --git a/site/versioned_docs/version-4.1/release-notes/v1-alby/1.1.0.md b/site/versioned_docs/version-4.1/release-notes/v1-alby/1.1.0.md new file mode 100644 index 00000000..b42514a2 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v1-alby/1.1.0.md @@ -0,0 +1,77 @@ +--- +title: 1.1.0 +sidebar_position: 89899 +--- + +### HarperDB 1.1.0, Alby Release +4/18/2018 + +**Features** + +* Users & Roles: + + * Limit/Assign access to all HarperDB operations + + * Limit/Assign access to schemas, tables & attributes + + * Limit/Assign access to specific SQL operations (`INSERT`, `UPDATE`, `DELETE`, `SELECT`) + +* Enhanced SQL parser + + * Added extensive ANSI SQL Support. + + * Added Array function, which allows for converting relational data into Object/Hierarchical data + + * `Distinct_Array` Function: allows for removing duplicates in the Array function. + + * Enhanced SQL Validation: Improved validation around structure of SQL, validating the schema, etc.. + + * 10x performance improvement on SQL statements. + +* Export Function: can now call a NoSQL/SQL search and have it export to CSV or JSON. + +* Added upgrade function to CLI + +* Added ability to perform bulk update from CSV + +* Created landing page for HarperDB. + +* Added CORS support to HarperDB + +**Fixes** + +* Fixed memory leak in CSV bulk loads + +* Corrected error when attempting to perform a `SQL DELETE` + +* Added further validation to NoSQL `UPDATE` to validate schema & table exist + +* Fixed install issue occurring when part of the install path does not exist, the install would silently fail. + +* Fixed issues with replicated data when one of the replicas is down + +* Removed logging of initial user’s credentials during install + +* Can now use reserved words as aliases in SQL + +* Removed user(s) password in results when calling `list_users` + +* Corrected forwarding of operations to other nodes in a cluster + +* Corrected lag in schema meta-data passing to other nodes in a cluster + +* Drop table & schema now move the table & schema or table to the trash folder under the Database folder for later permanent deletion. + +* Bulk inserts no longer halt the entire operation if n records already exist, instead the return includes the hashes of records that have been skipped. + +* Added ability to accept EULA from command line + +* Corrected `search_by_value` not searching on the correct attribute + +* Added ability to increase the timeout of a request by adding `SERVER_TIMEOUT_MS` to config/settings.js + +* Add error handling resulting from SQL calculations. + +* Standardized error responses as JSON. + +* Corrected internal process generation to not allow more processes than machine has cores. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v1-alby/1.2.0.md b/site/versioned_docs/version-4.1/release-notes/v1-alby/1.2.0.md new file mode 100644 index 00000000..095bf239 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v1-alby/1.2.0.md @@ -0,0 +1,42 @@ +--- +title: 1.2.0 +sidebar_position: 89799 +--- + +### HarperDB 1.2.0, Alby Release +7/10/2018 + +**Features** + +* Time to Live: Conserve the resources of your edge device by setting data on devices to live for a specific period of time. +* Geo: HarperDB has implemented turf.js into its SQL parser to enable geo based analytics. +* Jobs: CSV Data loads, Exports & Time to Live now all run as back ground jobs. +* Exports: Perform queries that export into JSON or CSV and save to disk or S3. + + +**Fixes** + +* Fixed issue where CSV data loads incorrectly report number of records loaded. +* Added validation to stop `BETWEEN` operations in SQL. +* Updated logging to not include internal variables in the logs. +* Cleaned up `add_role` response to not include internal variables. +* Removed old and unused dependencies. +* Build out further unit tests and integration tests. +* Fixed https to handle certificates properly. +* Improved stability of clustering & replication. +* Corrected issue where Objects and Arrays were not casting properly in `SQL SELECT` response. +* Fixed issue where Blob text was not being returned from `SQL SELECT`s. +* Fixed error being returned when querying on table with no data, now correctly returns empty array. +* Improved performance in SQL when searching on exact values. +* Fixed error when ./harperdb stop is called. +* Fixed logging issue causing instability in installer. +* Fixed `read_log` operation to accept date time. +* Added permissions checking to `export_to_s3`. +* Added ability to run SQL on `SELECT` without a `FROM`. +* Fixed issue where updating a user’s password was not encrypting properly. +* Fixed `user_guide.html` to point to readme on git repo. +* Created option to have HarperDB run as a foreground process. +* Updated `user_info` to return the correct role for a user. +* Fixed issue where HarperDB would not stop if the database root was deleted. +* Corrected error message on insert if an invalid schema is provided. +* Added permissions checks for user & role operations. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v1-alby/1.3.0.md b/site/versioned_docs/version-4.1/release-notes/v1-alby/1.3.0.md new file mode 100644 index 00000000..ad196159 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v1-alby/1.3.0.md @@ -0,0 +1,27 @@ +--- +title: 1.3.0 +sidebar_position: 89699 +--- + +### HarperDB 1.3.0, Alby Release +11/2/2018 + +**Features** + +* Upgrade: Upgrade to newest version via command line. +* SQL Support: Added `IS NULL` for SQL parser. +* Added attribute validation to search operations. + + +**Fixes** + +* Fixed `SELECT` calculations, i.e. `SELECT` 2+2. +* Fixed select OR not returning expected results. +* No longer allowing reserved words for schema and table names. +* Corrected process interruptions from improper SQL statements. +* Improved message handling between spawned processes that replace killed processes. +* Enhanced error handling for updates to tables that do not exist. +* Fixed error handling for NoSQL responses when `get_attributes` is provided with invalid attributes. +* Fixed issue with new columns not being updated properly in update statements. +* Now validating roles, tables and attributes when creating or updating roles. +* Fixed an issue where in some cases `undefined` was being returned after dropping a role diff --git a/site/versioned_docs/version-4.1/release-notes/v1-alby/1.3.1.md b/site/versioned_docs/version-4.1/release-notes/v1-alby/1.3.1.md new file mode 100644 index 00000000..77e3ffe4 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v1-alby/1.3.1.md @@ -0,0 +1,29 @@ +--- +title: 1.3.1 +sidebar_position: 89698 +--- + +### HarperDB 1.3.1, Alby Release +2/26/2019 + +**Features** + +* Clustering connection direction appointment +* Foundations for threading/multi processing +* UUID autogen for hash attributes that were not provided +* Added cluster status operation + + +**Bug Fixes and Enhancements** + +* More logging +* Clustering communication enhancements +* Clustering queue ordering by timestamps +* Cluster re connection enhancements +* Number of system core(s) detection +* Node LTS (10.15) compatibility +* Update/Alter users enhancements +* General performance enhancements +* Warning is logged if different versions of harperdb are connected via clustering +* Fixed need to restart after user creation/alteration +* Fixed SQL error that occurred on selecting from an empty table \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v1-alby/index.md b/site/versioned_docs/version-4.1/release-notes/v1-alby/index.md new file mode 100644 index 00000000..265fe04d --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v1-alby/index.md @@ -0,0 +1,13 @@ +--- +title: HarperDB Alby (Version 1) +--- + +# HarperDB Alby (Version 1) + +Did you know our release names are dedicated to employee pups? For our first release, Alby was our pup. + +Here is a bit about Alby: + +![picture of black dog](/img/v4.1/dogs/alby.webp) + +_Hi, I am Alby. My mom is Kaylan Stock, Director of Marketing at HarperDB. I am a 9-year-old Great Dane mix who loves sun bathing, going for swims, and wreaking havoc on the local squirrels. My favorite snack is whatever you are eating, and I love a good butt scratch!_ diff --git a/site/versioned_docs/version-4.1/release-notes/v2-penny/2.1.1.md b/site/versioned_docs/version-4.1/release-notes/v2-penny/2.1.1.md new file mode 100644 index 00000000..e1314a5f --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v2-penny/2.1.1.md @@ -0,0 +1,27 @@ +--- +title: 2.1.1 +sidebar_position: 79898 +--- + +### HarperDB 2.1.1, Penny Release +05/22/2020 + +**Highlights** + +* CORE-1007 Added the ability to perform `SQL INSERT` & `UPDATE` with function calls & expressions on values. +* CORE-1023 Fixed minor bug in final SQL step incorrectly trying to translate ordinals to alias in `ORDER BY` statement. +* CORE-1020 Fixed bug allowing 'null' and 'undefined' string values to be passed in as valid hash values. +* CORE-1006 Added SQL functionality that enables `JOIN` statements across different schemas. +* CORE-1005 Implemented JSONata library to handle our JSON document search functionality in SQL, creating the `SEARCH_JSON` function. +* CORE-1009 Updated schema validation to allow all printable ASCII characters to be used in schema/table/attribute names, except, forward slashes and backticks. Same rules apply now for hash attribute values. +* CORE-1003 Fixed handling of ORDER BY statements with function aliases. +* CORE-1004 Fixed bug related to `SELECT*` on `JOIN` queries with table columns with the same name. +* CORE-996 Fixed an issue where the `transact_to_cluster` flag is lost for CSV URL loads, fixed an issue where new attributes created in CSV bulk load do not sync to the cluster. +* CORE-994 Added new operation `system_information`. This operation returns info & metrics for the OS, time, memory, cpu, disk, network. +* CORE-993 Added new custom date functions for AlaSQL & UTC updates. +* CORE-991 Changed jobs to spawn a new process which will run the intended job without impacting a main HarperDB process. +* CORE-992 HTTPS enabled by default. +* CORE-990 Updated `describe_table` to add the record count for the table for LMDB data storage. +* CORE-989 Killed the socket cluster processes prior to HarperDB processes to eliminate a false uptime. +* CORE-975 Updated time values set by SQL Date Functions to be in epoch format. +* CORE-974 Added date functions to `SQL SELECT` column alias functionality. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v2-penny/2.2.0.md b/site/versioned_docs/version-4.1/release-notes/v2-penny/2.2.0.md new file mode 100644 index 00000000..267168cd --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v2-penny/2.2.0.md @@ -0,0 +1,43 @@ +--- +title: 2.2.0 +sidebar_position: 79799 +--- + +### HarperDB 2.2.0, Penny Release +08/24/2020 + +**Features/Updates** + +* CORE-997 Updated the data format for CSV data loads being sync'd across a cluster to take up less resources +* CORE-1018 Adds SQL functionality for `BETWEEN` statements +* CORE-1032 Updates permissions to allow regular users (i.e. non-super users) to call the `get_job` operation +* CORE-1036 On create/drop table we auto create/drop the related transactions environments for the schema.table +* CORE-1042 Built raw functions to write to a tables transaction log for insert/update/delete operations +* CORE-1057 Implemented write transaction into lmdb create/update/delete functions +* CORE-1048 Adds `SEARCH` wildcard handling for role permissions standards +* CORE-1059 Added config setting to disable transaction logging for an instance +* CORE-1076 Adds permissions filter to describe operations +* CORE-1043 Change clustering catchup to use the new transaction log +* CORE-1052 Removed word "master" from source +* CORE-1061 Added new operation called `delete_transactions_before` this will tail a transaction log for a specific schema / table +* CORE-1040 On HarperDB startup make sure all tables have a transaction environment +* CORE-1055 Added 2 new setting to change the server headersTimeout & keepAliveTimeout from the config file +* CORE-1044 Created new operation `read_transaction_log` which will allow a user to get transactions for a table by `timestamp`, `username`, or `hash_value` +* CORE-1043 Change clustering catchup to use the new transaction log +* CORE-1089 Added new attribute to `system_information` for table/transaction log data size in bytes & transaction log record count +* CORE-1101 Fix to store empty strings rather than considering them null & fix to be able to search on empty strings in SQL/NoSQL. +* CORE-1054 Updates permissions object to remove delete attribute permission and update table attribute permission key to `attribute_permissions` +* CORE-1092 Do not allow the `__createdtime__` to be updated +* CORE-1085 Updates create schema/table & drop schema/table/attribute operations permissions to require super user role and adds integration tests to validate +* CORE-1071 Updates response messages and status codes from `describe_schema` and `describe_table` operations to provide standard language/status code when a schema item is not found +* CORE-1049 Updates response message for SQL update op with no matching rows +* CORE-1096 Added tracking of the origin in the transaction log. This origin object stores the node name, timestamp of the transaction from the originating node & the user. + +**Bug Fixes** + +* CORE-1028 Fixes bug for simple `SQL SELECT` queries not returning aliases and incorrectly returning hash values when not requested in query +* CORE-1037 Fixed an issue where numbers with leading zero i.e. 00123 are converted to numbers rather than being honored as strings. +* CORE-1063 Updates permission error response shape to consolidate issues into individual objects per schema/table combo +* CORE-1098 Fixed an issue where transaction environments were remaining in the global cache after being dropped. +* CORE-1086 Fixed issue where responses from insert/update were incorrect with skipped records. +* CORE-1079 Fixes SQL bugs around invalid schema/table and special characters in `WHERE` clause \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v2-penny/2.2.2.md b/site/versioned_docs/version-4.1/release-notes/v2-penny/2.2.2.md new file mode 100644 index 00000000..827c63db --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v2-penny/2.2.2.md @@ -0,0 +1,16 @@ +--- +title: 2.2.2 +sidebar_position: 79797 +--- + +### HarperDB 2.2.2, Penny Release +10/27/2020 + +* CORE-1154 Allowed transaction logging to be disabled even if clustering is enabled. +* CORE-1153 Fixed issue where `delete_files_before` was writing to transaction log. +* CORE-1152 Fixed issue where no more than 4 HarperDB forks would be created. +* CORE-1112 Adds handling for system timestamp attributes in permissions. +* CORE-1131 Adds better handling for checking perms on operations with action value in JSON. +* CORE-1113 Fixes validation bug checking for super user/cluster user permissions and other permissions. +* CORE-1135 Adds validation for valid keys in role API operations. +* CORE-1073 Adds new `import_from_s3` operation to API. diff --git a/site/versioned_docs/version-4.1/release-notes/v2-penny/2.2.3.md b/site/versioned_docs/version-4.1/release-notes/v2-penny/2.2.3.md new file mode 100644 index 00000000..eca953e2 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v2-penny/2.2.3.md @@ -0,0 +1,9 @@ +--- +title: 2.2.3 +sidebar_position: 79796 +--- + +### HarperDB 2.2.3, Penny Release +11/16/2020 + +* CORE-1158 Performance improvements to core delete function and configuration of `delete_files_before` to run in batches with a pause into between. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v2-penny/2.3.0.md b/site/versioned_docs/version-4.1/release-notes/v2-penny/2.3.0.md new file mode 100644 index 00000000..2b248490 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v2-penny/2.3.0.md @@ -0,0 +1,22 @@ +--- +title: 2.3.0 +sidebar_position: 79699 +--- + +### HarperDB 2.3.0, Penny Release +12/03/2020 + +**Features/Updates** + +* CORE-1191, CORE-1190, CORE-1125, CORE-1157, CORE-1126, CORE-1140, CORE-1134, CORE-1123, CORE-1124, CORE-1122 Added JWT Authentication option (See documentation for more information) +* CORE-1128, CORE-1143, CORE-1140, CORE-1129 Added `upsert` operation +* CORE-1187 Added `get_configuration` operation which allows admins to view their configuration settings. +* CORE-1175 Added new internal LMDB function to copy an environment for use in future features. +* CORE-1166 Updated packages to address security vulnerabilities. + +**Bug Fixes** + +* CORE-1195 Modified `drop_attribute` to drop after data cleanse completes. +* CORE-1149 Fix SQL bug regarding self joins and updates alasql to 0.6.5 release. +* CORE-1168 Fix inconsistent invalid schema/table errors. +* CORE-1162 Fix bug which caused `delete_files_before` to cause tables to grow in size due to an open cursor issue. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v2-penny/2.3.1.md b/site/versioned_docs/version-4.1/release-notes/v2-penny/2.3.1.md new file mode 100644 index 00000000..51291a01 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v2-penny/2.3.1.md @@ -0,0 +1,12 @@ +--- +title: 2.3.1 +sidebar_position: 79698 +--- + +### HarperDB 2.3.1, Penny Release +1/29/2021 + +**Bug Fixes** + +* CORE-1218 A bug in HarperDB 2.3.0 was identified related to manually calling the `create_attribute` operation. This bug caused secondary indexes to be overwritten by the most recently inserted or updated value for the index, thereby causing a search operation filtered with that index to only return the most recently inserted/updated row. Note, this issue does not affect attributes that are reflexively/automatically created. It only affects attributes created using `create_attribute`. To resolve this issue in 2.3.0 or earlier, drop and recreate your table using reflexive attribute creation. In 2.3.1, drop and recreate your table and use either reflexive attribute creation or `create_attribute`. +* CORE-1219 Increased maximum table attributes from 1000 to 10000 \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v2-penny/index.md b/site/versioned_docs/version-4.1/release-notes/v2-penny/index.md new file mode 100644 index 00000000..5ab6c2a5 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v2-penny/index.md @@ -0,0 +1,13 @@ +--- +title: HarperDB Penny (Version 2) +--- + +# HarperDB Penny (Version 2) + +Did you know our release names are dedicated to employee pups? For our second release, Penny was the star. + +Here is a bit about Penny: + +![picture of brindle dog](/img/v4.1/dogs/penny.webp) + +_Hi I am Penny! My dad is Kyle Bernhardy, the CTO of HarperDB. I am a nine-year-old Whippet who lives for running hard and fast while exploring the beautiful terrain of Colorado. My favorite activity is chasing birds along with afternoon snoozes in a sunny spot in my backyard._ diff --git a/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.0.0.md b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.0.0.md new file mode 100644 index 00000000..2907ee6c --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.0.0.md @@ -0,0 +1,31 @@ +--- +title: 3.0.0 +sidebar_position: 69999 +--- + +### HarperDB 3.0, Monkey Release +5/18/2021 + +**Features/Updates** + +* CORE-1217, CORE-1226, CORE-1232 Create new `search_by_conditions` operation. +* CORE-1304 Upgrade to Node 12.22.1. +* CORE-1235 Adds new upgrade/install functionality. +* CORE-1206, CORE-1248, CORE-1252 Implement `lmdb-store` library for optimized performance. +* CORE-1062 Added alias operation for `delete_files_before`, named `delete_records_before`. +* CORE-1243 Change `HTTPS_ON` settings value to false by default. +* CORE-1189 Implement fastify web server, resulting in improved performance. +* CORE-1221 Update user API to use role name instead of role id. +* CORE-1225 Updated dependencies to eliminate npm security warnings. +* CORE-1241 Adds 3.0 update directive and refactors/fixes update functionality. + +**Bug Fixes** + +* CORE-1299 Remove all references to the `PROJECT_DIR` setting. This setting is problematic when using node version managers and upgrading the version of node and then installing a new instance of HarperDB. +* CORE-1288 Fix bug with drop table/schema that was causing 'env required' error log. +* CORE-1285 Update warning log when trying to create an attribute that already exists. +* CORE-1254 Added logic to manage data collisions in clustering. +* CORE-1212 Add pre-check to `drop_user` that returns error if user doesn't exist. +* CORE-1114 Update response code and message from `add_user` when user already exists. +* CORE-1111 Update response from `create_attribute` to match the create schema/table response. +* CORE-1205 Fixed bug that prevented schema/table from being dropped if name was a number or had a wildcard value in it. Updated validation for insert, upsert and update. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.0.md b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.0.md new file mode 100644 index 00000000..148690f6 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.0.md @@ -0,0 +1,23 @@ +--- +title: 3.1.0 +sidebar_position: 69899 +--- + +### HarperDB 3.1.0, Monkey Release +8/24/2021 + +**Features/Updates** + +* CORE-1320, CORE-1321, CORE-1323, CORE-1324 Version 1.0 of HarperDB Custom Functions +* CORE-1275, CORE-1276, CORE-1278, CORE-1279, CORE-1280, CORE-1282, CORE-1283, CORE-1305, CORE-1314 IPC server for communication between HarperDB processes, including HarperDB, HarperDB Clustering, and HarperDB Functions +* CORE-1352, CORE-1355, CORE-1356, CORE-1358 Implement pm2 for HarperDB process management +* CORE-1292, CORE-1308, CORE-1312, CORE-1334, CORE-1338 Updated installation process to start HarperDB immediately on install and to accept all config settings via environment variable or command line arguments +* CORE-1310 Updated licensing functionality +* CORE-1301 Updated validation for performance improvement +* CORE-1359 Add `hdb-response-time` header which returns the HarperDB response time in milliseconds +* CORE-1330, CORE-1309 New config settings: `LOG_TO_FILE`, `LOG_TO_STDSTREAMS`, `IPC_SERVER_PORT`, `RUN_IN_FOREGROUND`, `CUSTOM_FUNCTIONS`, `CUSTOM_FUNCTIONS_PORT`, `CUSTOM_FUNCTIONS_DIRECTORY`, `MAX_CUSTOM_FUNCTION_PROCESSES` + +**Bug Fixes** + +* CORE-1315 Corrected issue in HarperDB restart scenario +* CORE-1370 Update some of the validation error handlers so that they don't log full stack \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.1.md b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.1.md new file mode 100644 index 00000000..0adbeb21 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.1.md @@ -0,0 +1,18 @@ +--- +title: 3.1.1 +sidebar_position: 69898 +--- + +### HarperDB 3.1.1, Monkey Release +9/23/2021 + +**Features/Updates** + +* CORE-1393 Added utility function to add settings from env/cmd vars to the settings file on every run/restart +* CORE-1395 Create a setting which will allow to enable the local Studio to be served from an instance of HarperDB +* CORE-1397 Update the stock 404 response to not return the request URL +* General updates to optimize Docker container + +**Bug Fixes** + +* CORE-1399 Added fixes for complex SQL alias issues \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.2.md b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.2.md new file mode 100644 index 00000000..f1c192b6 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.2.md @@ -0,0 +1,15 @@ +--- +title: 3.1.2 +sidebar_position: 69897 +--- + +### HarperDB 3.1.2, Monkey Release +10/21/2021 + +**Features/Updates** + +* Updated the installation ASCII art to reflect the new HarperDB logo + +**Bug Fixes** + +* CORE-1408 Corrects issue where `drop_attribute` was not properly setting the LMDB version number causing tables to behave unexpectedly \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.3.md b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.3.md new file mode 100644 index 00000000..2d484f8d --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.3.md @@ -0,0 +1,11 @@ +--- +title: 3.1.3 +sidebar_position: 69896 +--- + +### HarperDB 3.1.3, Monkey Release +1/14/2022 + +**Bug Fixes** + +* CORE-1446 Fix for scans on indexes larger than 1 million entries causing queries to never return \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.4.md b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.4.md new file mode 100644 index 00000000..ae0074fd --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.4.md @@ -0,0 +1,11 @@ +--- +title: 3.1.4 +sidebar_position: 69895 +--- + +### HarperDB 3.1.4, Monkey Release +2/24/2022 + +**Features/Updates** + +* CORE-1460 Added new setting `STORAGE_WRITE_ASYNC`. If this setting is true, LMDB will have faster write performance at the expense of not being crash safe. The default for this setting is false, which results in HarperDB being crash safe. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.5.md b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.5.md new file mode 100644 index 00000000..eff4b5b0 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.1.5.md @@ -0,0 +1,11 @@ +--- +title: 3.1.5 +sidebar_position: 69894 +--- + +### HarperDB 3.1.5, Monkey Release +3/4/2022 + +**Features/Updates** + +* CORE-1498 Fixed incorrect autocasting of string that start with "0." that tries to convert to number but instead returns NaN. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.2.0.md b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.2.0.md new file mode 100644 index 00000000..003575d8 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.2.0.md @@ -0,0 +1,13 @@ +--- +title: 3.2.0 +sidebar_position: 69799 +--- + +### HarperDB 3.2.0, Monkey Release +3/25/2022 + +**Features/Updates** + +* CORE-1391 Bug fix related to orphaned HarperDB background processes. +* CORE-1509 Updated node version check, updated Node.js version, updated project dependencies. +* CORE-1518 Remove final call from logger. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.2.1.md b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.2.1.md new file mode 100644 index 00000000..dc511a70 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.2.1.md @@ -0,0 +1,11 @@ +--- +title: 3.2.1 +sidebar_position: 69798 +--- + +### HarperDB 3.2.1, Monkey Release +6/1/2022 + +**Features/Updates** + +* CORE-1573 Added logic to track the pid of the foreground process if running in foreground. Then on stop, use that pid to kill the process. Logic was also added to kill the pm2 daemon when stop is called. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.3.0.md b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.3.0.md new file mode 100644 index 00000000..3e3ca784 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v3-monkey/3.3.0.md @@ -0,0 +1,12 @@ +--- +title: 3.3.0 +sidebar_position: 69699 +--- + +### HarperDB 3.3.0 - Monkey + +* CORE-1595 Added new role type `structure_user`, this enables non-superusers to be able to create/drop schema/table/attribute. +* CORE-1501 Improved performance for drop_table. +* CORE-1599 Added two new operations for custom functions `install_node_modules` & `audit_node_modules`. +* CORE-1598 Added `skip_node_modules` flag to `package_custom_function_project` operation. This flag allows for not bundling project dependencies and deploying a smaller project to other nodes. Use this flag in tandem with `install_node_modules`. +* CORE-1707 Binaries are now included for Linux on AMD64, Linux on ARM64, and macOS. GCC, Make, Python are no longer required when installing on these platforms. diff --git a/site/versioned_docs/version-4.1/release-notes/v3-monkey/index.md b/site/versioned_docs/version-4.1/release-notes/v3-monkey/index.md new file mode 100644 index 00000000..84d3ac9e --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v3-monkey/index.md @@ -0,0 +1,11 @@ +--- +title: HarperDB Monkey (Version 3) +--- + +# HarperDB Monkey (Version 3) + +Did you know our release names are dedicated to employee pups? For our third release, we have Monkey. + +![picture of tan dog](/img/v4.1/dogs/monkey.webp) + +_Hi, I am Monkey, a.k.a. Monk, a.k.a. Monchichi. My dad is Aron Johnson, the Director of DevOps at HarperDB. I am an eight-year-old Australian Cattle dog mutt whose favorite pastime is hunting and collecting tennis balls from the park next to her home. I love burrowing in the Colorado snow, rolling in the cool grass on warm days, and cheese!_ diff --git a/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.0.md b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.0.md new file mode 100644 index 00000000..49770307 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.0.md @@ -0,0 +1,124 @@ +--- +title: 4.0.0 +sidebar_position: 59999 +--- + +### HarperDB 4.0.0, Tucker Release +11/2/2022 + +**Networking & Data Replication (Clustering)** + +The HarperDB clustering internals have been rewritten and the underlying technology for Clustering has been completely replaced with [NATS](https:/nats.io/), an enterprise grade connective technology responsible for addressing, discovery and exchanging of messages that drive the common patterns in distributed systems. +* CORE-1464, CORE-1470, : Remove SocketCluster dependencies and all code related to them. +* CORE-1465, CORE-1485, CORE-1537, CORE-1538, CORE-1558, CORE-1583, CORE_1665, CORE-1710, CORE-1801, CORE-1865 :Add nats-`server` code as dependency, on install of HarperDB download nats-`server` is possible else fallback to building from source code. +* CORE-1593, CORE-1761: Add `nats.js` as project dependency. +* CORE-1466: Build NATS configs on `harperdb run` based on HarperDB YAML configuration. +* CORE-1467, CORE-1508: Launch and manage NATS servers with PM2. +* CORE-1468, CORE-1507: Create a process which reads the work queue stream and processes transactions. +* CORE-1481, CORE-1529, CORE-1698, CORE-1502, CORE-1696: On upgrade to 4.0, update pre-existing clustering configurations, create table transaction streams, create work queue stream, update `hdb_nodes` table, create clustering folder structure, and rebuild self-signed certs. +* CORE-1494, CORE-1521, CORE-1755: Build out internals to interface with NATS. +* CORE-1504: Update existing hooks to save transactions to work with NATS. +* CORE-1514, CORE-1515, CORE-1516, CORE-1527, CORE-1532: Update `add_node`, `update_node`, and `remove_node` operations to no longer need host and port in payload. These operations now manage dynamically sourcing of table level transaction streams between nodes and work queues. +* CORE-1522: Create `NATSReplyService` process which handles the receiving NATS based requests from remote instances and sending back appropriate responses. +* CORE-1471, CORE-1568, CORE-1563, CORE-1534, CORE-1569: Update `cluster_status` operation. +* CORE-1611: Update pre-existing transaction log operations to be audit log operations. +* CORE-1541, CORE-1612, CORE-1613: Create translation log operations which interface with streams. +* CORE-1668: Update NATS serialization / deserialization to use MessagePack. +* CORE-1673: Add `system_info` param to `hdb_nodes` table and update on `add_node` and `cluster_status`. +* CORE-1477, CORE-1493, CORE-1557, CORE-1596, CORE-1577: Both a full HarperDB restart & just clustering restart call the NATS server with a reload directive to maintain full uptime while servers refresh. +* CORE-1474:HarperDB install adds clustering folder structure. +* CORE-1530: Post `drop_table` HarperDB purges the related transaction stream. +* CORE-1567: Set NATS config to always use TLS. +* CORE-1543: Removed the `transact_to_cluster` attribute from the bulk load operations. Now bulk loads always replicate. +* CORE-1533, CORE-1556, CORE-1561, CORE-1562, CORE-1564: New operation `configure_cluster`, this operation enables bulk publishing and subscription of multiple tables to multiple instances of HarperDB. +* CORE-1535: Create work queue stream on install of HarperDB. This stream receives transactions from remote instances of HarperDB which are then ingested in order. +* CORE-1551: Create transaction streams on the remote node if they do not exist when performing `add_node` or `update_node`. +* CORE-1594, CORE-1605, CORE-1749, CORE-1767, CORE-1770: Optimize the work queue stream and its consumer to be more performant and validate exact once delivery. +* CORE-1621, CORE-1692, CORE-1570, CORE-1693: NATS stream names are MD5 hashed to avoid characters that HarperDB allows, but NATS may not. +* CORE-1762: Add a new optional attribute to `add_node` and `update_node` named `opt_start_time`. This attribute sets a starting time to start synchronizing transactions. +* CORE-1785: Optimizations and bug fixes in regards to sourcing data from remote instances on HarperDB. +* CORE-1588: Created new operation `set_cluster_routes` to enable setting routes for instances of HarperDB to mesh together. +* CORE-1589: Created new operation `get_cluster_routes` to allow for retrieval of routes used to connect the instance of HarperDB to the mesh. +* CORE-1590: Created new operation `delete_cluster_routes` to allow for removal of routes used to connect the instance of HarperDB to the mesh. +* CORE-1667: Fix old environment variable `CLUSTERING_PORT` not mapping to new hub server port. +* CORE-1609: Allow `remove_node` to be called when the other node cannot be reached. +* CORE-1815: Add transaction lock to `add_node` and `update_node` to avoid concurrent nats source update bug. +* CORE-1848: Update stream configs if the node name has been changed in the YAML configuration. +* CORE-1873: Update `add_node` and `update_node` so that it auto-creates schema/table on both local and remote node respectively + + +**Data Storage** + +We have made improvements to how we store, index, and retrieve data. +* CORE-1619: Enabled new concurrent flushing technology for improved write performance. +* CORE-1701: Optimize search performance for `search_by_conditions` when executing multiple AND conditions. +* CORE-1652: Encode the values of secondary indices more efficiently for faster access. +* CORE-1670: Store updated timestamp in `lmdb.js`' version property. +* CORE-1651: Enabled multiple value indexing of array values which allows for the ability to search on specific elements in an array more efficiently. +* CORE-1649, CORE-1659: Large text values (larger than 255 bytes) are no longer stored in separate blob index. Now they are segmented and delimited in the same index to increase search performance. +* Complex objects and object arrays are no longer stored in a separate index to preserve storage and increase write throughput. +* CORE-1650, CORE-1724, CORE-1738: Improved internals around interpreting attribute values. +* CORE-1657: Deferred property decoding allows large objects to be stored, but individual attributes can be accessed (like with get_attributes) without incurring the cost of decoding the entire object. +* CORE-1658: Enable in-memory caching of records for even faster access to frequently accessed data. +* CORE-1693: Wrap updates in async transactions to ensure ACID-compliant updates. +* CORE-1653: Upgrade to 4.0 rebuilds tables to reflect changes made to index improvements. +* CORE-1753: Removed old `node-lmdb` dependency. +* CORE-1787: Freeze objects returned from queries. +* CORE-1821: Read the `WRITE_ASYNC` setting which enables LMDB nosync. + +**Logging** + +HarperDB has increased logging specificity by breaking out logs based on components logging. There are specific log files each for HarperDB Core, Custom Functions, Hub Server, Leaf Server, and more. +* CORE-1497: Remove `pino` and `winston` dependencies. +* CORE-1426: All logging is output via `stdout` and `stderr`, our default logging is then picked up by PM2 which handles writing out to file. +* CORE-1431: Improved `read_log` operation validation. +* CORE-1433, CORE-1463: Added log rotation. +* CORE-1553, CORE-1555, CORE-1552, CORE-1554, CORE-1704: Performance gain by only serializing objects and arrays if the log is for the level defined in configuration. +* CORE-1436: Upgrade to 4.0 updates internals for logging changes. +* CORE-1428, CORE-1440, CORE-1442, CORE-1434, CORE-1435, CORE-1439, CORE-1482, CORE-1751, CORE-1752: Bug fixes, performance improvements and improved unit tests. +* CORE-1691: Convert non-PM2 managed log file writes to use Node.js `fs.appendFileSync` function. + +**Configuration** + +HarperDB has updated its configuration from a properties file to YAML. +* CORE-1448, CORE-1449, CORE-1519, CORE-1587: Upgrade automatically converts the pre-existing settings file to YAML. +* CORE-1445, CORE-1534, CORE-1444, CORE-1858: Build out new logic to create, update, and interpret the YAML configuration file. +* Installer has updated prompts to reflect YAML settings. +* CORE-1447: Create an alias for the `configure_cluster` operation as `set_configuration`. +* CORE-1461, CORE-1462, CORE-1483: Unit test improvements. +* CORE-1492: Improvements to get_configuration and set_configuration operations. +* CORE-1503: Modify HarperDB configuration for more granular certificate definition. +* CORE-1591: Update `routes` IP param to `host` and to `leaf` config in `harperdb.conf` +* CORE-1519: Fix issue when switching between old and new versions of HarperDB we are getting the config parameter is undefined error on npm install. + +**Broad NodeJS and Platform Support** +* CORE-1624: HarperDB can now run on multiple versions of NodeJS, from v14 to v19. We primarily test on v18, so that is the preferred version. + +**Windows 10 and 11** +* CORE-1088: HarperDB now runs natively on Windows 10 and 11 without the need to run in a container or installed in WSL. Windows is only intended for evaluation and development purposes, not for production work loads. + +**Extra Changes and Bug Fixes** +* CORE-1520: Refactor installer to remove all waterfall code and update to use Promises. +* CORE-1573: Stop the PM2 daemon and any logging processes when stopping hdb. +* CORE-1586: When HarperDB is running in foreground stop any additional logging processes from being spawned. +* CORE-1626: Update docker file to accommodate new `harperdb.conf` file. +* CORE-1592, CORE-1526, CORE-1660, CORE-1646, CORE-1640, CORE-1689, CORE-1711, CORE-1601, CORE-1726, CORE-1728, CORE-1736, CORE-1735, CORE-1745, CORE-1729, CORE-1748, CORE-1644, CORE-1750, CORE-1757, CORE-1727, CORE-1740, CORE-1730, CORE-1777, CORE-1778, CORE-1782, CORE-1775, CORE-1771, CORE-1774, CORE-1759, CORE-1772, CORE-1861, CORE-1862, CORE-1863, CORE-1870, CORE-1869:Changes for CI/CD pipeline and integration tests. +* CORE-1661: Fixed issue where old boot properties file caused an error when attempting to install 4.0.0. +* CORE-1697, CORE-1814, CORE-1855: Upgrade fastify dependency to new major version 4. +* CORE-1629: Jobs are now running as processes managed by the PM2 daemon. +* CORE-1733: Update LICENSE to reflect our EULA on our site. +* CORE-1606: Enable Custom Functions by default. +* CORE-1714: Include pre-built binaries for most common platforms (darwin-arm64, darwin-x64, linux-arm64, linux-x64, win32-x64). +* CORE-1628: Fix issue where setting license through environment variable not working. +* CORE-1602, CORE-1760, CORE-1838, CORE-1839, CORE-1847, CORE-1773: HarperDB Docker container improvements. +* CORE-1706: Add support for encoding HTTP responses with MessagePack. +* CORE-1709: Improve the way lmdb.js dependencies are installed. +* CORE-1758: Remove/update unnecessary HTTP headers. +* CORE-1756: On `npm install` and `harperdb install` change the node version check from an error to a warning if the installed Node.js version does not match our preferred version. +* CORE-1791: Optimizations to authenticated user caching. +* CORE-1794: Update README to discuss Windows support & Node.js versions +* CORE-1837: Fix issue where Custom Function directory was not being created on install. +* CORE-1742: Add more validation to audit log - check schema/table exists and log is enabled. +* CORE-1768: Fix issue where when running in foreground HarperDB process is not stopping on `harperdb stop`. +* CORE-1864: Fix to semver checks on upgrade. +* CORE-1850: Fix issue where a `cluster_user` type role could not be altered. diff --git a/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.1.md b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.1.md new file mode 100644 index 00000000..9e148e63 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.1.md @@ -0,0 +1,12 @@ +--- +title: 4.0.1 +sidebar_position: 59998 +--- + +### HarperDB 4.0.1, Tucker Release +01/20/2023 + +**Bug Fixes** + +* CORE-1992 Local studio was not loading because the path got mangled in the build. +* CORE-2001 Fixed deploy_custom_function_project after node update broke it. diff --git a/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.2.md b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.2.md new file mode 100644 index 00000000..b65d1427 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.2.md @@ -0,0 +1,12 @@ +--- +title: 4.0.2 +sidebar_position: 59997 +--- + +### HarperDB 4.0.2, Tucker Release +01/24/2023 + +**Bug Fixes** + +* CORE-2003 Fix bug where if machine had one core thread config would default to zero. +* Update to lmdb 2.7.3 and msgpackr 1.7.0 diff --git a/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.3.md b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.3.md new file mode 100644 index 00000000..67aaae56 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.3.md @@ -0,0 +1,11 @@ +--- +title: 4.0.3 +sidebar_position: 59996 +--- + +### HarperDB 4.0.3, Tucker Release +01/26/2023 + +**Bug Fixes** + +* CORE-2007 Add update nodes 4.0.0 launch script to build script to fix clustering upgrade. diff --git a/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.4.md b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.4.md new file mode 100644 index 00000000..2a30c9d1 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.4.md @@ -0,0 +1,11 @@ +--- +title: 4.0.4 +sidebar_position: 59995 +--- + +### HarperDB 4.0.4, Tucker Release +01/27/2023 + +**Bug Fixes** + +* CORE-2009 Fixed bug where add node was not being called when upgrading clustering. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.5.md b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.5.md new file mode 100644 index 00000000..dc66721f --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.5.md @@ -0,0 +1,14 @@ +--- +title: 4.0.5 +sidebar_position: 59994 +--- + +### HarperDB 4.0.5, Tucker Release +02/15/2023 + +**Bug Fixes** + +* CORE-2029 Improved the upgrade process for handling existing user TLS certificates and correctly configuring TLS settings. Added a prompt to upgrade to determine if new certificates should be created or existing certificates should be kept/used. +* Fix the way NATS connections are honored in a local environment. +* Do not define the certificate authority path to NATS if it is not defined in the HarperDB config. + diff --git a/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.6.md b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.6.md new file mode 100644 index 00000000..bf97d148 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.6.md @@ -0,0 +1,11 @@ +--- +title: 4.0.6 +sidebar_position: 59993 +--- + +### HarperDB 4.0.6, Tucker Release +03/09/2023 + +**Bug Fixes** + +* Fixed a data serialization error that occurs when a large number of different record structures are persisted in a single table. diff --git a/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.7.md b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.7.md new file mode 100644 index 00000000..7d48666a --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.0.7.md @@ -0,0 +1,11 @@ +--- +title: 4.0.7 +sidebar_position: 59992 +--- + +### HarperDB 4.0.7, Tucker Release +03/10/2023 + +**Bug Fixes** + +* Update lmdb.js dependency \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.1.0.md b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.1.0.md new file mode 100644 index 00000000..539ed67d --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.1.0.md @@ -0,0 +1,61 @@ +--- +title: 4.1.0 +sidebar_position: 59899 +--- + +### HarperDB 4.1.0, Tucker Release + +HarperDB 4.1 introduces the ability to use worker threads for concurrently handling HTTP requests. Previously this was handled by processes. This shift provides important benefits in terms of better control of traffic delegation with support for optimized load tracking and session affinity, better debuggability, and reduced memory footprint. + +This means debugging will be much easier for custom functions. If you install/run HarperDB locally, most modern IDEs like WebStorm and VSCode support worker thread debugging, so you can start HarperDB in your IDE, and set breakpoints in your custom functions and debug them. + +The associated routing functionality now includes session affinity support. This can be used to consistently route users to the same thread which can improve caching locality, performance, and fairness. This can be enabled in with the [`http.sessionAffinity` option in your configuration](../../security/configuration#session-affinity). + +HarperDB 4.1's NoSQL query handling has been revamped to consistently use iterators, which provide an extremely memory efficient mechanism for directly streaming query results to the network _as_ the query results are computed. This results in faster Time to First Byte (TTFB) (only the first record/value in a query needs to be computed before data can start to be sent), and less memory usage during querying (the entire query result does not need to be stored in memory). These iterators are also available in query results for custom functions and can provide means for custom function code to iteratively access data from the database without loading entire results. This should be a completely transparent upgrade, all HTTP APIs function the same, with the one exception that custom functions need to be aware that they can't access query results by `[index]` (they should use array methods or for-in loops to handle query results). + +4.1 includes configuration options for specifying the location of database storage files. This allows you to specifically locate database directories and files on different volumes for better flexibility and utilization of disks and storage volumes. See the [storage configuration](../../configuration#storage) and [schemas configuration](../../configuration#schemas) for information on how to configure these locations. + +Logging has been revamped and condensed into one `hdb.log` file. See [logging](../../logging) for more information. + +A new operation called `cluster_network` was added, this operation will ping the cluster and return a list of enmeshed nodes. + +Custom Functions will no longer automatically load static file routes, instead the `@fastify/static` plugin will need to be registered with the Custom Function server. See [Host A Static Web UI-static](../../custom-functions/host-static). + +Updates to S3 import and export mean that these operations now require the bucket `region` in the request. Also, if referencing a nested object it should be done in the `key` parameter. See examples [here](https:/api.harperdb.io/#aa74bbdf-668c-4536-80f1-b91bb13e5024). + +Due to the AWS SDK v2 reaching end of life support we have updated to v3. This has caused some breaking changes in our operations `import_from_s3` and `export_to_s3`: +* A new attribute `region` will need to be supplied +* The `bucket` attribute can no longer have trailing slashes. Slashes will now need to be in the `key`. + +Starting HarperDB without any command (just `harperdb`) now runs HarperDB like a standard process, in the foreground. This means you can use standard unix tooling for interacting with the process and is conducive for running HarperDB with systemd or any other process management tool. If you wish to have HarperDB launch itself in separate background process (and immediately terminate the shell process), you can do so by running `harperdb start`. + +Internal Tickets completed: +* CORE-609 - Ensure that attribute names are always added to global schema as Strings +* CORE-1549 - Remove fastify-static code from Custom Functions server which auto serves content from "static" folder +* CORE-1655 - Iterator based queries +* CORE-1764 - Fix issue where describe_all operation returns an empty object for non super-users if schema(s) do not yet have table(s) +* CORE-1854 - Switch to using worker threads instead of processes for handling concurrency +* CORE-1877 - Extend the csv_url_load operation to allow for additional headers to be passed to the remote server when the csv is being downloaded +* CORE-1893 - Add last updated timestamp to describe operations +* CORE-1896 - Fix issue where Select * from system.hdb_info returns wrong HDB version number after Instance Upgrade +* CORE-1904 - Fix issue when executing GEOJSON query in SQL +* CORE-1905 - Add HarperDB YAML configuration setting which defines the storage location of NATS streams +* CORE-1906 - Add HarperDB YAML configuration setting defining the storage location of tables. +* CORE-1655 - Streaming binary format serialization +* CORE-1943 - Add configuration option to set mount point for audit tables +* CORE-1921 - Update NATS transaction lifecycle to handle message deduplication in work queue streams. +* CORE-1963 - Update logging for better readability, reduced duplication, and request context information. +* CORE-1968 - In server\nats\natsIngestService.js remove the js_msg.working(); line to improve performance. +* CORE-1976 - Fix error when calling describe_table operation with no schema or table defined in payload. +* CORE-1983 - Fix issue where create_attribute operation does not validate request for required attributes +* CORE-2015 - Remove PM2 logs that get logged in console when starting HDB +* CORE-2048 - systemd script for 4.1 +* CORE-2052 - Include thread information in system_information for visibility of threads +* CORE-2061 - Add a better error msg when clustering is enabled without a cluster user set +* CORE-2068 - Create new log rotate logic since pm2 log-rotate no longer used +* CORE-2072 - Update to Node 18.15.0 +* CORE-2090 - Upgrade Testing from v4.0.x and v3.x to v4.1. +* CORE-2091 - Run the performance tests +* CORE-2092 - Allow for automatic patch version updates of certain packages +* CORE-2109 - Add verify option to clustering TLS configuration +* CORE-2111 - Update AWS SDK to v3 diff --git a/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.1.1.md b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.1.1.md new file mode 100644 index 00000000..0dce0bd7 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.1.1.md @@ -0,0 +1,16 @@ +--- +title: 4.1.1 +sidebar_position: 59898 +--- + +### HarperDB 4.1.1, Tucker Release +06/16/2023 + +* HarperDB uses improved logic for determining default heap limits and thread counts. When running in a restricted container and on NodeJS 18.15+, HarperDB will use the constrained memory limit to determine heap limits for each thread. In more memory constrained servers with many CPU cores, a reduced default thread count will be used to ensure that excessive memory is not used by many workers. You may still define your own thread count (with `http`/`threads`) in the [configuration](../../configuration). +* An option has been added for [disabling the republishing NATS messages](../../configuration), which can provide improved replication performance in a fully connected network. +* Improvements to our OpenShift container. +* Dependency security updates. + +* **Bug Fixes** + +* Fixed a bug in reporting database metrics in the `system_information` operation. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.1.2.md b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.1.2.md new file mode 100644 index 00000000..2a62db64 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v4-tucker/4.1.2.md @@ -0,0 +1,13 @@ +--- +title: 4.1.2 +sidebar_position: 59897 +--- + +### HarperDB 4.1.2, Tucker Release +06/16/2023 + +* HarperDB has updated binary dependencies to support older glibc versions back 2.17. +* A new CLI command was added to get the current status of whether HarperDB is running and the cluster status. This is available with `harperdb status`. +* Improvements to our OpenShift container. +* Dependency security updates. + diff --git a/site/versioned_docs/version-4.1/release-notes/v4-tucker/index.md b/site/versioned_docs/version-4.1/release-notes/v4-tucker/index.md new file mode 100644 index 00000000..0d8d3fd0 --- /dev/null +++ b/site/versioned_docs/version-4.1/release-notes/v4-tucker/index.md @@ -0,0 +1,11 @@ +--- +title: HarperDB Tucker (Version 4) +--- + +# HarperDB Tucker (Version 4) + +Did you know our release names are dedicated to employee pups? For our fourth release, we have Tucker. + +![picture of grey and white dog](/img/v4.1/dogs/tucker.png) + +_G’day, I’m Tucker. My dad is David Cockerill, a software engineer here at HarperDB. I am a 3-year-old Labrador Husky mix. I love to protect my dad from all the squirrels and rabbits we have in our yard. I have very ticklish feet and love belly rubs!_ diff --git a/site/versioned_docs/version-4.1/security/basic-auth.md b/site/versioned_docs/version-4.1/security/basic-auth.md new file mode 100644 index 00000000..f251f27a --- /dev/null +++ b/site/versioned_docs/version-4.1/security/basic-auth.md @@ -0,0 +1,70 @@ +--- +title: Authentication +--- + +# Authentication + +HarperDB uses Basic Auth and JSON Web Tokens (JWTs) to secure our HTTP requests. In the context of an HTTP transaction, **basic access authentication** is a method for an HTTP user agent to provide a user name and password when making a request. + + + +** ***You do not need to log in separately. Basic Auth is added to each HTTP request like create_schema, create_table, insert etc… via headers.*** ** + + + +A header is added to each HTTP request. The header key is **“Authorization”** the header value is **“Basic <<your username and password buffer token>>”** + + + + + +## Authentication in HarperDB Studio + +In the below code sample, you can see where we add the authorization header to the request. This needs to be added for each and every HTTP request for HarperDB. + +_Note: This function uses btoa. Learn about [btoa here](https:/developer.mozilla.org/en-US/docs/Web/API/btoa)._ + +```javascript +function callHarperDB(call_object, operation, callback){ + + const options = { + "method": "POST", + "hostname": call_object.endpoint_url, + "port": call_object.endpoint_port, + "path": "/", + "headers": { + "content-type": "application/json", + "authorization": "Basic " + btoa(call_object.username + ':' + call_object.password), + "cache-control": "no-cache" + + } + }; + + const http_req = http.request(options, function (hdb_res) { + let chunks = []; + + hdb_res.on("data", function (chunk) { + chunks.push(chunk); + }); + + hdb_res.on("end", function () { + const body = Buffer.concat(chunks); + if (isJson(body)) { + return callback(null, JSON.parse(body)); + } else { + return callback(body, null); + + } + + }); + }); + + http_req.on("error", function (chunk) { + return callback("Failed to connect", null); + }); + + http_req.write(JSON.stringify(operation)); + http_req.end(); + +} +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/security/certificate-management.md b/site/versioned_docs/version-4.1/security/certificate-management.md new file mode 100644 index 00000000..2a840f78 --- /dev/null +++ b/site/versioned_docs/version-4.1/security/certificate-management.md @@ -0,0 +1,59 @@ +--- +title: Certificate Management +--- + +# Certificate Management + +This document is information on managing certificates for the Operations API and the Custom Functions API. For information on certificate managment for clustering see [clustering certificate management](../clustering/certificate-management). + +## Development + +An out of the box install of HarperDB does not have HTTPS enabled for the Operations API or the Custom Functions API (see [configuration](../configuration) for relevant configuration file settings.) This is great for local development. If you are developing using a remote server and your requests are traversing the Internet, we recommend that you enable HTTPS. + +To enable HTTPS, set the `operationsApi.network.https` and `customFunctions.network.https` to `true` and restart HarperDB. + +By default HarperDB will generate certificates and place them at `/keys/`. These certificates will not have a valid Common Name (CN) for your HarperDB node, so you will be able to use HTTPS, but your HTTPS client must be configured to accept the invalid certificate. + +## Production + +For production deployments, in addition to using HTTPS, we recommend using your own certificate authority (CA) or a public CA such as Let's Encrypt, to generate certificates with CNs that match the Fully Qualified Domain Name (FQDN) of your HarperDB node. + +We have a few recommended options for enabling HTTPS in a production setting. + +### Option: Enable HarperDB HTTPS and Replace Certificates + +To enable HTTPS, set the `operationsApi.network.https` and `customFunctions.network.https` to `true` and restart HarperDB. + +To replace the certificates, either replace the contents of the existing certificate files at `/keys/`, or update the HarperDB configuration with the path of your new certificate files, and then restart HarperDB. +```yaml +operationsApi: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` +```yaml +customFunctions: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +### Option: Nginx Reverse Proxy + +Instead of enabling HTTPS for HarperDB, Nginx can be used as a reverse proxy for HarperDB. + +Install Nginx, configure Nginx to use certificates issued from your own CA or a public CA, then configure Nginx to listen for HTTPS requests and forward to HarperDB as HTTP requests. + +[Certbot](https:/certbot.eff.org/) is a great tool for automatically requesting and renewing Let’s Encrypt certificates used by Nginx. + +### Option: External Reverse Proxy + +Instead of enabling HTTPS for HarperDB, a number of different external services can be used as a reverse proxy for HarperDB. These services typically have integrated certificate management. Configure the service to listen for HTTPS requests and forward (over a private network) to HarperDB as HTTP requests. + +Examples of these types of services include an AWS Application Load Balancer or a GCP external HTTP(S) load balancer. + +### Additional Considerations + +It is possible to use different certificates for the Operations API and the Custom Functions API. In scenarios where only your Custom Functions endpoints need to be exposed to the Internet and the Operations API is reserved for HarperDB administration, you may want to use a private CA to issue certificates for the Operations API and a public CA for the Custom Functions API certificates. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/security/configuration.md b/site/versioned_docs/version-4.1/security/configuration.md new file mode 100644 index 00000000..53fad411 --- /dev/null +++ b/site/versioned_docs/version-4.1/security/configuration.md @@ -0,0 +1,61 @@ +--- +title: Configuration +--- + +# Configuration + +HarperDB was set up to require very minimal configuration to work out of the box. There are, however, some best practices we encourage for anyone building an app with HarperDB. + + + +## CORS + +HarperDB allows for managing [cross-origin HTTP requests](https:/developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS). By default, HarperDB enables CORS for all domains if you need to disable CORS completely or set up an access list of domains you can do the following: + +1) Open the harperdb-config.yaml file this can be found in <ROOTPATH>, the location you specified during install. + +2) In harperdb-config.yaml there should be 2 entries under `operationsApi.network`: cors and corsAccessList. + * `cors` + + 1) To turn off, change to: `cors: false` + + 2) To turn on, change to: `cors: true` + + * `corsAccessList` + + 1) The `corsAccessList` will only be recognized by the system when `cors` is `true` + + 2) To create an access list you set `corsAccessList` to a comma-separated list of domains. + + i.e. `corsAccessList` is `http:/harperdb.io,http:/products.harperdb.io` + + 3) To clear out the access list and allow all domains: `corsAccessList` is `[null]` + + +## SSL + +HarperDB provides the option to use an HTTP or HTTPS and HTTP/2 interface. The default port for the server is 9925. + + + +These default ports can be changed by updating the `operationsApi.network.port` value in `/harperdb-config.yaml` + + + +By default, HTTPS is turned off and HTTP is turned on. It is recommended that you never directly expose HarperDB's HTTP interface through a publicly available port. HTTP is intended for local or private network use. + + + +You can toggle HTTPS and HTTP in the settings file. By setting `operationsApi.network.https` to true/false. When `https` is set to `false`, the server will use HTTP (version 1.1). Enabling HTTPS will enable both HTTPS/1.1 and HTTPS/2. + + + +HarperDB automatically generates a certificate (certificate.pem), a certificate authority (ca.pem) and a private key file (privateKey.pem) which live at `/keys/`. + + + +You can replace these with your own certificates and key. + + + +**Changes to these settings require a restart. Use operation `harperdb restart` from HarperDB Operations API.** \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/security/index.md b/site/versioned_docs/version-4.1/security/index.md new file mode 100644 index 00000000..51f8ce0b --- /dev/null +++ b/site/versioned_docs/version-4.1/security/index.md @@ -0,0 +1,13 @@ +--- +title: Security +--- + +# Security + +HarperDB uses role-based, attribute-level security to ensure that users can only gain access to the data they’re supposed to be able to access. Our granular permissions allow for unparalleled flexibility and control, and can actually lower the total cost of ownership compared to other database solutions, since you no longer have to replicate subsets of your data to isolate use cases. + +* [JWT Authentication](./jwt-auth) +* [Basic Authentication](./basic-auth) +* [Configuration](./configuration) +* [Users and Roles](./users-and-roles) + diff --git a/site/versioned_docs/version-4.1/security/jwt-auth.md b/site/versioned_docs/version-4.1/security/jwt-auth.md new file mode 100644 index 00000000..8447c5ff --- /dev/null +++ b/site/versioned_docs/version-4.1/security/jwt-auth.md @@ -0,0 +1,97 @@ +--- +title: JWT Authentication +--- + +# JWT Authentication +HarperDB uses token based authentication with JSON Web Tokens, JWTs. + +This consists of two primary operations `create_authentication_tokens` and `refresh_operation_token`. These generate two types of tokens, as follows: + +* The `operation_token` which is used to authenticate all HarperDB operations in the Bearer Token Authorization Header. The default expiry is one day. + +* The `refresh_token` which is used to generate a new `operation_token` upon expiry. This token is used in the Bearer Token Authorization Header for the `refresh_operation_token` operation only. The default expiry is thirty days. + +The `create_authentication_tokens` operation can be used at any time to refresh both tokens in the event that both have expired or been lost. + +## Create Authentication Tokens + +Users must initially create tokens using their HarperDB credentials. The following POST body is sent to HarperDB. No headers are required for this POST operation. + +```json +{ + "operation": "create_authentication_tokens", + "username": "username", + "password": "password" +} +``` + +A full cURL example can be seen here: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "operation": "create_authentication_tokens", + "username": "username", + "password": "password" +}' +``` + +An example expected return object is: + +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDUwNjQ2MDAsInN1YiI6Im9wZXJhdGlvbiJ9.MpQA-9CMjA-mn-7mHyUXSuSC_-kqMqJXp_NDiKLFtbtMRbodCuY3DzH401rvy_4vb0yCELf0B5EapLVY1545sv80nxSl6FoZFxQaDWYXycoia6zHpiveR8hKlmA6_XTWHJbY2FM1HAFrdtt3yUTiF-ylkdNbPG7u7fRjTmHfsZ78gd2MNWIDkHoqWuFxIyqk8XydQpsjULf2Uacirt9FmHfkMZ-Jr_rRpcIEW0FZyLInbm6uxLfseFt87wA0TbZ0ofImjAuaW_3mYs-3H48CxP152UJ0jByPb0kHsk1QKP7YHWx1-Wce9NgNADfG5rfgMHANL85zvkv8sJmIGZIoSpMuU3CIqD2rgYnMY-L5dQN1fgfROrPMuAtlYCRK7r-IpjvMDQtRmCiNG45nGsM4DTzsa5GyDrkGssd5OBhl9gr9z9Bb5HQVYhSKIOiy72dK5dQNBklD4eGLMmo-u322zBITmE0lKaBcwYGJw2mmkYcrjDOmsDseU6Bf_zVUd9WF3FqwNkhg4D7nrfNSC_flalkxPHckU5EC_79cqoUIX2ogufBW5XgYbU4WfLloKcIpb51YTZlZfwBHlHPSyaq_guaXFaeCUXKq39_i1n0HRF_mRaxNru0cNDFT9Fm3eD7V8axFijSVAMDyQs_JR7SY483YDKUfN4l-vw-EVynImr4", + "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDc1NzAyMDAsInN1YiI6InJlZnJlc2gifQ.acaCsk-CJWIMLGDZdGnsthyZsJfQ8ihXLyE8mTji8PgGkpbwhs7e1O0uitMgP_pGjHq2tey1BHSwoeCL49b18WyMIB10hK-q2BXGKQkykltjTrQbg7VsdFi0h57mGfO0IqAwYd55_hzHZNnyJMh4b0iPQFDwU7iTD7x9doHhZAvzElpkWbc_NKVw5_Mw3znjntSzbuPN105zlp4Niurin-_5BnukwvoJWLEJ-ZlF6hE4wKhaMB1pWTJjMvJQJE8khTTvlUN8tGxmzoaDYoe1aCGNxmDEQnx8Y5gKzVd89sylhqi54d2nQrJ2-ElfEDsMoXpR01Ps6fNDFtLTuPTp7ixj8LvgL2nCjAg996Ga3PtdvXJAZPDYCqqvaBkZZcsiqOgqLV0vGo3VVlfrcgJXQImMYRr_Inu0FCe47A93IAWuQTs-KplM1KdGJsHSnNBV6oe6QEkROJT5qZME-8xhvBYvOXqp9Znwg39bmiBCMxk26Ce66_vw06MNgoa3D5AlXPWemfdVKPZDnj_aLVjZSs0gAfFElcVn7l9yjWJOaT2Muk26U8bJl-2BEq_DSclqKHODuYM5kkPKIdE4NFrsqsDYuGxcA25rlNETFyl0q-UXj1aoz_joy5Hdnr4mFELmjnoo4jYQuakufP9xeGPsj1skaodKl0mmoGcCD6v1F60" +} +``` + +## Using JWT Authentication Tokens + +The `operation_token` value is used to authenticate all operations in place of our standard Basic auth. In order to pass the token you will need to create an Bearer Token Authorization Header like the following request: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDUwNjQ2MDAsInN1YiI6Im9wZXJhdGlvbiJ9.MpQA-9CMjA-mn-7mHyUXSuSC_-kqMqJXp_NDiKLFtbtMRbodCuY3DzH401rvy_4vb0yCELf0B5EapLVY1545sv80nxSl6FoZFxQaDWYXycoia6zHpiveR8hKlmA6_XTWHJbY2FM1HAFrdtt3yUTiF-ylkdNbPG7u7fRjTmHfsZ78gd2MNWIDkHoqWuFxIyqk8XydQpsjULf2Uacirt9FmHfkMZ-Jr_rRpcIEW0FZyLInbm6uxLfseFt87wA0TbZ0ofImjAuaW_3mYs-3H48CxP152UJ0jByPb0kHsk1QKP7YHWx1-Wce9NgNADfG5rfgMHANL85zvkv8sJmIGZIoSpMuU3CIqD2rgYnMY-L5dQN1fgfROrPMuAtlYCRK7r-IpjvMDQtRmCiNG45nGsM4DTzsa5GyDrkGssd5OBhl9gr9z9Bb5HQVYhSKIOiy72dK5dQNBklD4eGLMmo-u322zBITmE0lKaBcwYGJw2mmkYcrjDOmsDseU6Bf_zVUd9WF3FqwNkhg4D7nrfNSC_flalkxPHckU5EC_79cqoUIX2ogufBW5XgYbU4WfLloKcIpb51YTZlZfwBHlHPSyaq_guaXFaeCUXKq39_i1n0HRF_mRaxNru0cNDFT9Fm3eD7V8axFijSVAMDyQs_JR7SY483YDKUfN4l-vw-EVynImr4' \ +--data-raw '{ + "operation":"search_by_hash", + "schema":"dev", + "table":"dog", + "hash_values":[1], + "get_attributes": ["*"] +}' +``` + +## Token Expiration + +`operation_token` expires at a set interval. Once it expires it will no longer be accepted by HarperDB. This duration defaults to one day, and is configurable in [harperdb-config.yaml](../configuration). To generate a new `operation_token`, the `refresh_operation_token` operation is used, passing the `refresh_token` in the Bearer Token Authorization Header. A full cURL example can be seen here: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDc1NzAyMDAsInN1YiI6InJlZnJlc2gifQ.acaCsk-CJWIMLGDZdGnsthyZsJfQ8ihXLyE8mTji8PgGkpbwhs7e1O0uitMgP_pGjHq2tey1BHSwoeCL49b18WyMIB10hK-q2BXGKQkykltjTrQbg7VsdFi0h57mGfO0IqAwYd55_hzHZNnyJMh4b0iPQFDwU7iTD7x9doHhZAvzElpkWbc_NKVw5_Mw3znjntSzbuPN105zlp4Niurin-_5BnukwvoJWLEJ-ZlF6hE4wKhaMB1pWTJjMvJQJE8khTTvlUN8tGxmzoaDYoe1aCGNxmDEQnx8Y5gKzVd89sylhqi54d2nQrJ2-ElfEDsMoXpR01Ps6fNDFtLTuPTp7ixj8LvgL2nCjAg996Ga3PtdvXJAZPDYCqqvaBkZZcsiqOgqLV0vGo3VVlfrcgJXQImMYRr_Inu0FCe47A93IAWuQTs-KplM1KdGJsHSnNBV6oe6QEkROJT5qZME-8xhvBYvOXqp9Znwg39bmiBCMxk26Ce66_vw06MNgoa3D5AlXPWemfdVKPZDnj_aLVjZSs0gAfFElcVn7l9yjWJOaT2Muk26U8bJl-2BEq_DSclqKHODuYM5kkPKIdE4NFrsqsDYuGxcA25rlNETFyl0q-UXj1aoz_joy5Hdnr4mFELmjnoo4jYQuakufP9xeGPsj1skaodKl0mmoGcCD6v1F60' \ +--data-raw '{ + "operation":"refresh_operation_token" +}' +``` + +This will return a new `operation_token`. An example expected return object is: + +```bash +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6eyJfX2NyZWF0ZWR0aW1lX18iOjE2MDQ5NzgxODkxNTEsIl9fdXBkYXRlZHRpbWVfXyI6MTYwNDk3ODE4OTE1MSwiYWN0aXZlIjp0cnVlLCJyb2xlIjp7Il9fY3JlYXRlZHRpbWVfXyI6MTYwNDk0NDE1MTM0NywiX191cGRhdGVkdGltZV9fIjoxNjA0OTQ0MTUxMzQ3LCJpZCI6IjdiNDNlNzM1LTkzYzctNDQzYi05NGY3LWQwMzY3Njg5NDc4YSIsInBlcm1pc3Npb24iOnsic3VwZXJfdXNlciI6dHJ1ZSwic3lzdGVtIjp7InRhYmxlcyI6eyJoZGJfdGFibGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9hdHRyaWJ1dGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9zY2hlbWEiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl91c2VyIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfcm9sZSI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2pvYiI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2xpY2Vuc2UiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9pbmZvIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfbm9kZXMiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl90ZW1wIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119fX19LCJyb2xlIjoic3VwZXJfdXNlciJ9LCJ1c2VybmFtZSI6InVzZXJuYW1lIn0sImlhdCI6MTYwNDk3ODcxMywiZXhwIjoxNjA1MDY1MTEzLCJzdWIiOiJvcGVyYXRpb24ifQ.qB4FS7fzryCO5epQlFCQe4mQcUEhzXjfsXRFPgauXrGZwSeSr2o2a1tE1xjiI3qjK0r3f2bdi2xpFlDR1thdY-m0mOpHTICNOae4KdKzp7cyzRaOFurQnVYmkWjuV_Ww4PJgr6P3XDgXs5_B2d7ZVBR-BaAimYhVRIIShfpWk-4iN1XDk96TwloCkYx01BuN87o-VOvAnOG-K_EISA9RuEBpSkfUEuvHx8IU4VgfywdbhNMh6WXM0VP7ZzSpshgsS07MGjysGtZHNTVExEvFh14lyfjfqKjDoIJbo2msQwD2FvrTTb0iaQry1-Wwz9QJjVAUtid7tJuP8aBeNqvKyMIXRVnl5viFUr-Gs-Zl_WtyVvKlYWw0_rUn3ucmurK8tTy6iHyJ6XdUf4pYQebpEkIvi2rd__e_Z60V84MPvIYs6F_8CAy78aaYmUg5pihUEehIvGRj1RUZgdfaXElw90-m-M5hMOTI04LrzzVnBu7DcMYg4UC1W-WDrrj4zUq7y8_LczDA-yBC2-bkvWwLVtHLgV5yIEuIx2zAN74RQ4eCy1ffWDrVxYJBau4yiIyCc68dsatwHHH6bMK0uI9ib6Y9lsxCYjh-7MFcbP-4UBhgoDDXN9xoUToDLRqR9FTHqAHrGHp7BCdF5d6TQTVL5fmmg61MrLucOo-LZBXs1NY" +} +``` + +The `refresh_token` also expires at a set interval, but a longer interval. Once it expires it will no longer be accepted by HarperDB. This duration defaults to thirty days, and is configurable in [harperdb-config.yaml](../configuration). To generate a new `operation_token` and a new `refresh_token` the `create_authentication_tokensoperation` is called. + +## Configuration + +Token timeouts are configurable in [harperdb-config.yaml](../configuration) with the following parameters: + +* `operationsApi.authentication.operationTokenTimeout`: Defines the length of time until the operation_token expires (default 1d). + +* `operationsApi.authentication.refreshTokenTimeout`: Defines the length of time until the refresh_token expires (default 30d). + +A full list of valid values for both parameters can be found [here](https:/github.com/vercel/ms). \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/security/users-and-roles.md b/site/versioned_docs/version-4.1/security/users-and-roles.md new file mode 100644 index 00000000..1801fae2 --- /dev/null +++ b/site/versioned_docs/version-4.1/security/users-and-roles.md @@ -0,0 +1,288 @@ +--- +title: Users & Roles +--- + +# Users & Roles + +HarperDB utilizes a Role-Based Access Control (RBAC) framework to manage access to HarperDB instances. A user is assigned a role that determines the user’s permissions to access database resources and run core operations. + +## Roles in HarperDB + +Role permissions in HarperDB are broken into two categories – permissions around database manipulation and permissions around database definition. + + + +**Database Manipulation**: A role defines CRUD (create, read, update, delete) permissions against database resources (i.e. data) in a HarperDB instance. + +1) At the table-level access, permissions must be explicitly defined when adding or altering a role – *i.e. HarperDB will assume CRUD access to be FALSE if not explicitly provided in the permissions JSON passed to the `add_role` and/or `alter_role` API operations.* + +2) At the attribute-level, permissions for attributes in all tables included in the permissions set will be assigned based on either the specific attribute-level permissions defined in the table’s permission set or, if there are no attribute-level permissions defined, permissions will be based on the table’s CRUD set. + + +**Database Definition**: Permissions related to managing schemas, tables, roles, users, and other system settings and operations are restricted to the built-in `super_user` role. + + + +**Built-In Roles** + +There are three built-in roles within HarperDB. See full breakdown of operations restricted to only super_user roles [here](#role-based-operation-restrictions). + +* `super_user` - This role provides full access to all operations and methods within a HarperDB instance, this can be considered the admin role. + + * This role provides full access to all Database Definition operations and the ability to run Database Manipulation operations across the entire database schema with no restrictions. + +* `cluster_user` - This role is an internal system role type that is managed internally to allow clustered instances to communicate with one another. + + * This role is an internally managed role to facilitate communication between clustered instances. + +* `structure_user` - This role provides specific access for creation and deletion of data. + + * When defining this role type you can either assign a value of true which will allow the role to create and drop schemas & tables. Alternatively the role type can be assigned a string array. The values in this array are schemas and allows the role to only create and drop tables in the designated schemas. + +**User-Defined Roles** + +In addition to built-in roles, admins (i.e. users assigned to the super_user role) can create customized roles for other users to interact with and manipulate the data within explicitly defined tables and attributes. + +* Unless the user-defined role is given `super_user` permissions, permissions must be defined explicitly within the request body JSON. + +* Describe operations will return metadata for all schemas, tables, and attributes that a user-defined role has CRUD permissions for. + + +**Role Permissions** + +When creating a new, user-defined role in a HarperDB instance, you must provide a role name and the permissions to assign to that role. *Reminder, only super users can create and manage roles.* + +* `role` name used to easily identify the role assigned to individual users. + + *Roles can be altered/dropped based on the role name used in and returned from a successful `add_role` , `alter_role`, or `list_roles` operation.* + +* `permissions` used to explicitly defined CRUD access to existing table data. + + +Example JSON for `add_role` request + +```json +{ + "operation":"add_role", + "role":"software_developer", + "permission":{ + "super_user":false, + "schema_name":{ + "tables": { + "table_name1": { + "read":true, + "insert":true, + "update":true, + "delete":false, + "attribute_permissions":[ + { + "attribute_name":"attribute1", + "read":true, + "insert":true, + "update":true + } + ] + }, + "table_name2": { + "read":true, + "insert":true, + "update":true, + "delete":false, + "attribute_permissions":[] + } + } + } + } +} +``` + +**Setting Role Permissions** + +There are two parts to a permissions set: + +* `super_user` – boolean value indicating if role should be provided super_user access. + + *If `super_user` is set to true, there should be no additional schema-specific permissions values included since the role will have access to the entire database schema. If permissions are included in the body of the operation, they will stored within HarperDB, but ignored, as super_users have full access to the database.* + +* `permissions`: Schema tables that a role should have specific CRUD access to should be included in the final, schema-specific `permissions` JSON. + + *For user-defined roles (i.e. non-super_user roles, blank permissions will result in the user being restricted from accessing any of the database schema.* + + +**Table Permissions JSON** + +Each table that a role should be given some level of CRUD permissions to must be included in the `tables` array for its schema in the roles permissions JSON passed to the API (*see example above*). + +```json +{ + "table_name": { / the name of the table to define CRUD perms for + "read": boolean, / access to read from this table + "insert": boolean, / access to insert data to table + "update": boolean, / access to update data in table + "delete": boolean, / access to delete row data in table + "attribute_permissions": [ / permissions for specific table attributes + { + "attribute_name": "attribute_name", / attribute to assign permissions to + "read": boolean, / access to read this attribute from table + "insert": boolean, / access to insert this attribute into the table + "update": boolean / access to update this attribute in the table + } + ] +} +``` + + +**Important Notes About Table Permissions** + +1) If a schema and/or any of its tables are not included in the permissions JSON, the role will not have any CRUD access to the schema and/or tables. + +2) If a table-level CRUD permission is set to false, any attribute-level with that same CRUD permission set to true will return an error. + + +**Important Notes About Attribute Permissions** + +1) If there are attribute-specific CRUD permissions that need to be enforced on a table, those need to be explicitly described in the `attribute_permissions` array. + +2) If a non-hash attribute is given some level of CRUD access, that same access will be assigned to the table’s `hash_attribute`, even if it is not explicitly defined in the permissions JSON. + + *See table_name1’s permission set for an example of this – even though the table’s hash attribute is not specifically defined in the attribute_permissions array, because the role has CRUD access to ‘attribute1’, the role will have the same access to the table’s hash attribute.* + +3) If attribute-level permissions are set – *i.e. attribute_permissions.length > 0* – any table attribute not explicitly included will be assumed to have not CRUD access (with the exception of the `hash_attribute` described in #2). + + *See table_name1’s permission set for an example of this – in this scenario, the role will have the ability to create, insert and update ‘attribute1’ and the table’s hash attribute but no other attributes on that table.* + +4) If an `attribute_permissions` array is empty, the role’s access to a table’s attributes will be based on the table-level CRUD permissions. + + *See table_name2’s permission set for an example of this.* + +5) The `__createdtime__` and `__updatedtime__` attributes that HarperDB manages internally can have read perms set but, if set, all other attribute-level permissions will be ignored. + +6) Please note that DELETE permissions are not included as a part of an individual attribute-level permission set. That is because it is not possible to delete individual attributes from a row, rows must be deleted in full. + + * If a role needs the ability to delete rows from a table, that permission should be set on the table-level. + + * The practical approach to deleting an individual attribute of a row would be to set that attribute to null via an update statement. + +## Role-Based Operation Restrictions + +The table below includes all API operations available in HarperDB and indicates whether or not the operation is restricted to super_user roles. + +*Keep in mind that non-super_user roles will also be restricted within the operations they do have access to by the schema-level CRUD permissions set for the roles.* + +| Schemas and Tables | Restricted to Super_Users | +|--------------------|:---------------------------:| +| describe_all | | +| describe_schema | | +| describe_table | | +| create_schema | X | +| drop_schema | X | +| create_table | X | +| drop_table | X | +| create_attribute | | +| drop_attribute | X | + + +| NoSQL Operations | Restricted to Super_Users | +|----------------------|:---------------------------:| +| insert | | +| update | | +| upsert | | +| delete | | +| search_by_hash | | +| search_by_value | | +| search_by_conditions | | + +| SQL Operations | Restricted to Super_Users | +|-----------------|:---------------------------:| +| select | | +| insert | | +| update | | +| delete | | + +| Bulk Operations | Restricted to Super_Users | +|------------------|:---------------------------:| +| csv_data_load | | +| csv_file_load | | +| csv_url_load | | +| import_from_s3 | | + +| Users and Roles | Restricted to Super_Users | +|-----------------|:---------------------------:| +| list_roles | X | +| add_role | X | +| alter_role | X | +| drop_role | X | +| list_users | X | +| user_info | | +| add_user | X | +| alter_user | X | +| drop_user | X | + +| Clustering | Restricted to Super_Users | +|-----------------------|:---------------------------:| +| cluster_set_routes | X | +| cluster_get_routes | X | +| cluster_delete_routes | X | +| add_node | X | +| update_node | X | +| cluster_status | X | +| remove_node | X | +| configure_cluster | X | + + +| Custom Functions | Restricted to Super_Users | +|----------------------------------|:---------------------------:| +| custom_functions_status | X | +| get_custom_functions | X | +| get_custom_function | X | +| set_custom_function | X | +| drop_custom_function | X | +| add_custom_function_project | X | +| drop_custom_function_project | X | +| package_custom_function_project | X | +| deploy_custom_function_project | X | + +| Registration | Restricted to Super_Users | +|-------------------|:---------------------------:| +| registration_info | | +| get_fingerprint | X | +| set_license | X | + +| Jobs | Restricted to Super_Users | +|----------------------------|:---------------------------:| +| get_job | | +| search_jobs_by_start_date | X | + +| Logs | Restricted to Super_Users | +|--------------------------------|:---------------------------:| +| read_log | X | +| read_transaction_log | X | +| delete_transaction_logs_before | X | +| read_audit_log | X | +| delete_audit_logs_before | X | + +| Utilities | Restricted to Super_Users | +|-----------------------|:-------------------------:| +| delete_records_before | X | +| export_local | X | +| export_to_s3 | X | +| system_information | X | +| restart | X | +| restart_service | X | +| get_configuration | X | +| configure_cluster | X | + +| Token Authentication | Restricted to Super_Users | +|------------------------------|:---------------------------:| +| create_authentication_tokens | | +| refresh_operation_token | | + +## Error: Must execute as User + +**You may have gotten an error like,** `Error: Must execute as <>`. + +This means that you installed HarperDB as `<>`. Because HarperDB stores files natively on the operating system, we only allow the HarperDB executable to be run by a single user. This prevents permissions issues on files. + + + +For example if you installed as user_a, but later wanted to run as user_b. User_b may not have access to the hdb files HarperDB needs. This also keeps HarperDB more secure as it allows you to lock files down to a specific user and prevents other users from accessing your files. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/sql-guide/date-functions.md b/site/versioned_docs/version-4.1/sql-guide/date-functions.md new file mode 100644 index 00000000..f19d2126 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/date-functions.md @@ -0,0 +1,222 @@ +--- +title: SQL Date Functions +--- + +# SQL Date Functions + +HarperDB utilizes [Coordinated Universal Time (UTC)](https:/en.wikipedia.org/wiki/Coordinated_Universal_Time) in all internal SQL operations. This means that date values passed into any of the functions below will be assumed to be in UTC or in a format that can be translated to UTC. + +When parsing date values passed to SQL date functions in HDB, we first check for [ISO 8601](https:/en.wikipedia.org/wiki/ISO_8601) formats, then for [RFC 2822](https:/tools.ietf.org/html/rfc2822#section-3.3) date-time format and then fall back to new Date(date_string)if a known format is not found. + +### CURRENT_DATE() + +Returns the current date in UTC in `YYYY-MM-DD` String format. + +``` +"SELECT CURRENT_DATE() AS current_date_result" returns + { + "current_date_result": "2020-04-22" + } +``` + +### CURRENT_TIME() + +Returns the current time in UTC in `HH:mm:ss.SSS` String format. + +``` +"SELECT CURRENT_TIME() AS current_time_result" returns + { + "current_time_result": "15:18:14.639" + } +``` + +### CURRENT_TIMESTAMP + +Referencing this variable will evaluate as the current Unix Timestamp in milliseconds. + +``` +"SELECT CURRENT_TIMESTAMP AS current_timestamp_result" returns + { + "current_timestamp_result": 1587568845765 + } +``` +### DATE([date_string]) + +Formats and returns the date_string argument in UTC in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. + +If a date_string is not provided, the function will return the current UTC date/time value in the return format defined above. + +``` +"SELECT DATE(1587568845765) AS date_result" returns + { + "date_result": "2020-04-22T15:20:45.765+0000" + } +``` + +``` +"SELECT DATE(CURRENT_TIMESTAMP) AS date_result2" returns + { + "date_result2": "2020-04-22T15:20:45.765+0000" + } +``` + +### DATE_ADD(date, value, interval) + +Adds the defined amount of time to the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted interval values: Either string value (key or shorthand) can be passed as the interval argument. + + +| Key | Shorthand | +|--------------|-----------| +| years | y | +| quarters | Q | +| months | M | +| weeks | w | +| days | d | +| hours | h | +| minutes | m | +| seconds | s | +| milliseconds | ms | + + +``` +"SELECT DATE_ADD(1587568845765, 1, 'days') AS date_add_result" AND +"SELECT DATE_ADD(1587568845765, 1, 'd') AS date_add_result" both return + { + "date_add_result": 1587655245765 + } +``` + +``` +"SELECT DATE_ADD(CURRENT_TIMESTAMP, 2, 'years') +AS date_add_result2" returns + { + "date_add_result2": 1650643129017 + } +``` + +### DATE_DIFF(date_1, date_2[, interval]) + +Returns the difference between the two date values passed based on the interval as a Number. If an interval is not provided, the function will return the difference value in milliseconds. + +Accepted interval values: +* years +* months +* weeks +* days +* hours +* minutes +* seconds + +``` +"SELECT DATE_DIFF(CURRENT_TIMESTAMP, 1650643129017, 'hours') +AS date_diff_result" returns + { + "date_diff_result": -17519.753333333334 + } +``` + +### DATE_FORMAT(date, format) + +Formats and returns a date value in the String format provided. Find more details on accepted format values in the [moment.js docs](https:/momentjs.com/docs/#/displaying/format/). + +``` +"SELECT DATE_FORMAT(1524412627973, 'YYYY-MM-DD HH:mm:ss') +AS date_format_result" returns + { + "date_format_result": "2018-04-22 15:57:07" + } +``` + +### DATE_SUB(date, value, interval) + +Subtracts the defined amount of time from the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted date_sub interval values- Either string value (key or shorthand) can be passed as the interval argument. + +| Key | Shorthand | +|--------------|-----------| +| years | y | +| quarters | Q | +| months | M | +| weeks | w | +| days | d | +| hours | h | +| minutes | m | +| seconds | s | +| milliseconds | ms | + + +``` +"SELECT DATE_SUB(1587568845765, 2, 'years') AS date_sub_result" returns + { + "date_sub_result": 1524410445765 + } +``` + +### EXTRACT(date, date_part) + +Extracts and returns the date_part requested as a String value. Accepted date_part values below show value returned for date = “2020-03-26T15:13:02.041+000” + +| date_part | Example return value* | +|--------------|------------------------| +| year | “2020” | +| month | “3” | +| day | “26” | + | hour | “15” | +| minute | “13” | +| second | “2” | +| millisecond | “41” | + +``` +"SELECT EXTRACT(1587568845765, 'year') AS extract_result" returns + { + "extract_result": "2020" + } +``` + +### GETDATE() + +Returns the current Unix Timestamp in milliseconds. + +``` +"SELECT GETDATE() AS getdate_result" returns + { + "getdate_result": 1587568845765 + } +``` + +### GET_SERVER_TIME() +Returns the current date/time value based on the server’s timezone in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. + +``` +"SELECT GET_SERVER_TIME() AS get_server_time_result" returns + { + "get_server_time_result": "2020-04-22T15:20:45.765+0000" + } +``` + +### OFFSET_UTC(date, offset) +Returns the UTC date time value with the offset provided included in the return String value formatted as `YYYY-MM-DDTHH:mm:ss.SSSZZ`. The offset argument will be added as minutes unless the value is less than 16 and greater than -16, in which case it will be treated as hours. + +``` +"SELECT OFFSET_UTC(1587568845765, 240) AS offset_utc_result" returns + { + "offset_utc_result": "2020-04-22T19:20:45.765+0400" + } +``` + +``` +"SELECT OFFSET_UTC(1587568845765, 10) AS offset_utc_result2" returns + { + "offset_utc_result2": "2020-04-23T01:20:45.765+1000" + } +``` + +### NOW() +Returns the current Unix Timestamp in milliseconds. + +``` +"SELECT NOW() AS now_result" returns + { + "now_result": 1587568845765 + } +``` + diff --git a/site/versioned_docs/version-4.1/sql-guide/delete.md b/site/versioned_docs/version-4.1/sql-guide/delete.md new file mode 100644 index 00000000..6e227192 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/delete.md @@ -0,0 +1,14 @@ +--- +title: Delete +--- + +# Delete + +HarperDB supports deleting records from a table with condition support. + + + +``` +DELETE FROM dev.dog + WHERE age < 4 +``` diff --git a/site/versioned_docs/version-4.1/sql-guide/features-matrix.md b/site/versioned_docs/version-4.1/sql-guide/features-matrix.md new file mode 100644 index 00000000..f0ee3072 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/features-matrix.md @@ -0,0 +1,83 @@ +--- +title: SQL Features Matrix +--- + +# SQL Features Matrix + +HarperDB provides access to most SQL functions, and we’re always expanding that list. Check below to see if we cover what you need. If not, feel free to [add a Feature Request](https:/feedback.harperdb.io/). + + +| INSERT | | +|------------------------------------|-----| +| Values - multiple values supported | ✔ | +| Sub-SELECT | ✗ | + +| UPDATE | | +|-----------------|-----| +| SET | ✔ | +| Sub-SELECT | ✗ | +| Conditions | ✔ | +| Date Functions* | ✔ | +| Math Functions | ✔ | + +| DELETE | | +|------------|-----| +| FROM | ✔ | +| Sub-SELECT | ✗ | +| Conditions | ✔ | + +| SELECT | | +|-----------------------|-----| +| Column SELECT | ✔ | +| Aliases | ✔ | +| Aggregator Functions | ✔ | +| Date Functions* | ✔ | +| Math Functions | ✔ | +| Constant Values | ✔ | +| Distinct | ✔ | +| Sub-SELECT | ✗ | + +| FROM | | +|-------------------|-----| +| Multi-table JOIN | ✔ | +| INNER JOIN | ✔ | +| LEFT OUTER JOIN | ✔ | +| LEFT INNER JOIN | ✔ | +| RIGHT OUTER JOIN | ✔ | +| RIGHT INNER JOIN | ✔ | +| FULL JOIN | ✔ | +| UNION | ✗ | +| Sub-SELECT | ✗ | +| TOP | ✔ | + +| WHERE | | +|----------------------------|-----| +| Multi-Conditions | ✔ | +| Wildcards | ✔ | +| IN | ✔ | +| LIKE | ✔ | +| Bit-wise Operators AND, OR | ✔ | +| Bit-wise Operators NOT | ✔ | +| NULL | ✔ | +| BETWEEN | ✔ | +| EXISTS,ANY,ALL | ✔ | +| Compare columns | ✔ | +| Compare constants | ✔ | +| Date Functions* | ✔ | +| Math Functions | ✔ | +| Sub-SELECT | ✗ | + +| GROUP BY | | +|-----------------------|-----| +| Multi-Column GROUP BY | ✔ | + +| HAVING | | +|--------------------------------|-----| +| Aggregate function conditions | ✔ | + +| ORDER BY | | +|-----------------------|-----| +| Multi-Column ORDER BY | ✔ | +| Aliases | ✔ | +| Date Functions* | ✔ | +| Math Functions | ✔ | \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/sql-guide/functions.md b/site/versioned_docs/version-4.1/sql-guide/functions.md new file mode 100644 index 00000000..ccd6f247 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/functions.md @@ -0,0 +1,153 @@ +--- +title: HarperDB SQL Functions +--- + +# HarperDB SQL Functions + +This SQL keywords reference contains the SQL functions available in HarperDB. + +## Functions +### Aggregate + +| Keyword | Syntax | Description | +|-----------------|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------| +| AVG | AVG(_expression_) | Returns the average of a given numeric expression. | +| COUNT | SELECT COUNT(_column_name_) FROM _schema.table_ WHERE _condition_ | Returns the number records that match the given criteria. Nulls are not counted. | +| GROUP_CONCAT | GROUP_CONCAT(_expression_) | Returns a string with concatenated values that are comma separated and that are non-null from a group. Will return null when there are non-null values. | +| MAX | SELECT MAX(_column_name_) FROM _schema.table_ WHERE _condition_ | Returns largest value in a specified column. | +| MIN | SELECT MIN(_column_name_) FROM _schema.table_ WHERE _condition_ | Returns smallest value in a specified column. | +| SUM | SUM(_column_name_) | Returns the sum of the numeric values provided. | +| ARRAY* | ARRAY(_expression_) | Returns a list of data as a field. | +| DISTINCT_ARRAY* | DISTINCT_ARRAY(_expression_) | When placed around a standard ARRAY() function, returns a distinct (deduplicated) results set. | + +*For more information on ARRAY() and DISTINCT_ARRAY() see [this blog](https:/www.harperdb.io/post/sql-queries-to-complex-objects). + +### Conversion + +| Keyword | Syntax | Description | +|---------|--------------------------------------------------|------------------------------------------------------------------------| +| CAST | CAST(_expression AS datatype(length)_) | Converts a value to a specified datatype. | +| CONVERT | CONVERT(_data_type(length), expression, style_) | Converts a value from one datatype to a different, specified datatype. | + + +### Date & Time + +| Keyword | Syntax | Description | +|-------------------|-----------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CURRENT_DATE | CURRENT_DATE() | Returns the current date in UTC in “YYYY-MM-DD” String format. | +| CURRENT_TIME | CURRENT_TIME() | Returns the current time in UTC in “HH:mm:ss.SSS” string format. | +| CURRENT_TIMESTAMP | CURRENT_TIMESTAMP | Referencing this variable will evaluate as the current Unix Timestamp in milliseconds. For more information, go here. | +| +| DATE | DATE([_date_string_]) | Formats and returns the date_string argument in UTC in ‘YYYY-MM-DDTHH:mm:ss.SSSZZ’ string format. If a date_string is not provided, the function will return the current UTC date/time value in the return format defined above. For more information, go here. | +| +| DATE_ADD | DATE_ADD(_date, value, interval_) | Adds the defined amount of time to the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted interval values: Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | +| +| DATE_DIFF | DATEDIFF(_date_1, date_2[, interval]_) | Returns the difference between the two date values passed based on the interval as a Number. If an interval is not provided, the function will return the difference value in milliseconds. For more information, go here. | +| +| DATE_FORMAT | DATE_FORMAT(_date, format_) | Formats and returns a date value in the String format provided. Find more details on accepted format values in the moment.js docs. For more information, go here. | +| +| DATE_SUB | DATE_SUB(_date, format_) | Subtracts the defined amount of time from the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted date_sub interval values- Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | +| +| DAY | DAY(_date_) | Return the day of the month for the given date. | +| +| DAYOFWEEK | DAYOFWEEK(_date_) | Returns the numeric value of the weekday of the date given(“YYYY-MM-DD”).NOTE: 0=Sunday, 1=Monday, 2=Tuesday, 3=Wednesday, 4=Thursday, 5=Friday, and 6=Saturday. | +| EXTRACT | EXTRACT(_date, date_part_) | Extracts and returns the date_part requested as a String value. Accepted date_part values below show value returned for date = “2020-03-26T15:13:02.041+000” For more information, go here. | +| +| GETDATE | GETDATE() | Returns the current Unix Timestamp in milliseconds. | +| GET_SERVER_TIME | GET_SERVER_TIME() | Returns the current date/time value based on the server’s timezone in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. | +| OFFSET_UTC | OFFSET_UTC(_date, offset_) | Returns the UTC date time value with the offset provided included in the return String value formatted as `YYYY-MM-DDTHH:mm:ss.SSSZZ`. The offset argument will be added as minutes unless the value is less than 16 and greater than -16, in which case it will be treated as hours. | +| NOW | NOW() | Returns the current Unix Timestamp in milliseconds. | +| +| HOUR | HOUR(_datetime_) | Returns the hour part of a given date in range of 0 to 838. | +| +| MINUTE | MINUTE(_datetime_) | Returns the minute part of a time/datetime in range of 0 to 59. | +| +| MONTH | MONTH(_date_) | Returns month part for a specified date in range of 1 to 12. | +| +| SECOND | SECOND(_datetime_) | Returns the seconds part of a time/datetime in range of 0 to 59. | +| YEAR | YEAR(_date_) | Returns the year part for a specified date. | +| + +### Logical + +| Keyword | Syntax | Description | +|---------|--------------------------------------------------|--------------------------------------------------------------------------------------------| +| IF | IF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | +| IIF | IIF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | +| IFNULL | IFNULL(_expression, alt_value_) | Returns a specified value if the expression is null. | +| NULLIF | NULLIF(_expression_1, expression_2_) | Returns null if expression_1 is equal to expression_2, if not equal, returns expression_1. | + +### Mathematical + +| Keyword | Syntax | Description | +|---------|---------------------------------|-----------------------------------------------------------------------------------------------------| +| ABS | ABS(_expression_) | Returns the absolute value of a given numeric expression. | +| CEIL | CEIL(_number_) | Returns integer ceiling, the smallest integer value that is bigger than or equal to a given number. | +| EXP | EXP(_number_) | Returns e to the power of a specified number. | +| FLOOR | FLOOR(_number_) | Returns the largest integer value that is smaller than, or equal to, a given number. | +| RANDOM | RANDOM(_seed_) | Returns a pseudo random number. | +| ROUND | ROUND(_number,decimal_places_) | Rounds a given number to a specified number of decimal places. | +| SQRT | SQRT(_expression_) | Returns the square root of an expression. | + + +### String + +| Keyword | Syntax | Description | +|-------------|------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CONCAT | CONCAT(_string_1, string_2, ...., string_n_) | Concatenates, or joins, two or more strings together, resulting in a single string. | +| CONCAT_WS | CONCAT_WS(_separator, string_1, string_2, ...., string_n_) | Concatenates, or joins, two or more strings together with a separator, resulting in a single string. | +| INSTR | INSTR(_string_1, string_2_) | Returns the first position, as an integer, of string_2 within string_1. | +| LEN | LEN(_string_) | Returns the length of a string. | +| LOWER | LOWER(_string_) | Converts a string to lower-case. | +| REGEXP | SELECT _column_name_ FROM _schema.table_ WHERE _column_name_ REGEXP _pattern_ | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | +| REGEXP_LIKE | SELECT _column_name_ FROM _schema.table_ WHERE REGEXP_LIKE(_column_name, pattern_) | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | +| REPLACE | REPLACE(_string, old_string, new_string_) | Replaces all instances of old_string within new_string, with string. | +| SUBSTRING | SUBSTRING(_string, string_position, length_of_substring_) | Extracts a specified amount of characters from a string. | +| TRIM | TRIM([_character(s) FROM_] _string_) | Removes leading and trailing spaces, or specified character(s), from a string. | +| UPPER | UPPER(_string_) | Converts a string to upper-case. | + +## Operators +### Logical Operators + +| Keyword | Syntax | Description | +|----------|--------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------| +| BETWEEN | SELECT _column_name(s)_ FROM _schema.table_ WHERE _column_name_ BETWEEN _value_1_ AND _value_2_ | (inclusive) Returns values(numbers, text, or dates) within a given range. | +| IN | SELECT _column_name(s)_ FROM _schema.table_ WHERE _column_name_ IN(_value(s)_) | Used to specify multiple values in a WHERE clause. | +| LIKE | SELECT _column_name(s)_ FROM _schema.table_ WHERE _column_n_ LIKE _pattern_ | Searches for a specified pattern within a WHERE clause. | + +## Queries +### General + +| Keyword | Syntax | Description | +|-----------|--------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------| +| DISTINCT | SELECT DISTINCT _column_name(s)_ FROM _schema.table_ | Returns only unique values, eliminating duplicate records. | +| FROM | FROM _schema.table_ | Used to list the schema(s), table(s), and any joins required for a SQL statement. | +| GROUP BY | SELECT _column_name(s)_ FROM _schema.table_ WHERE _condition_ GROUP BY _column_name(s)_ ORDER BY _column_name(s)_ | Groups rows that have the same values into summary rows. | +| HAVING | SELECT _column_name(s)_ FROM _schema.table_ WHERE _condition_ GROUP BY _column_name(s)_ HAVING _condition_ ORDER BY _column_name(s)_ | Filters data based on a group or aggregate function. | +| SELECT | SELECT _column_name(s)_ FROM _schema.table_ | Selects data from table. | +| WHERE | SELECT _column_name(s)_ FROM _schema.table_ WHERE _condition_ | Extracts records based on a defined condition. | + +### Joins + +| Keyword | Syntax | Description | +|---------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CROSS JOIN | SELECT _column_name(s)_ FROM _schema.table_1_ CROSS JOIN _schema.table_2_ | Returns a paired combination of each row from _table_1_ with row from _table_2_. _Note: CROSS JOIN can return very large result sets and is generally considered bad practice._ | +| FULL OUTER | SELECT _column_name(s)_ FROM _schema.table_1_ FULL OUTER JOIN _schema.table_2_ ON _table_1.column_name_ _= table_2.column_name_ WHERE _condition_ | Returns all records when there is a match in either _table_1_ (left table) or _table_2_ (right table). | +| [INNER] JOIN | SELECT _column_name(s)_ FROM _schema.table_1_ INNER JOIN _schema.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return only matching records from _table_1_ (left table) and _table_2_ (right table). The INNER keyword is optional and does not affect the result. | +| LEFT [OUTER] JOIN | SELECT _column_name(s)_ FROM _schema.table_1_ LEFT OUTER JOIN _schema.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return all records from _table_1_ (left table) and matching data from _table_2_ (right table). The OUTER keyword is optional and does not affect the result. | +| RIGHT [OUTER] JOIN | SELECT _column_name(s)_ FROM _schema.table_1_ RIGHT OUTER JOIN _schema.table_2_ ON _table_1.column_name = table_2.column_name_ | Return all records from _table_2_ (right table) and matching data from _table_1_ (left table). The OUTER keyword is optional and does not affect the result. | + +### Predicates + +| Keyword | Syntax | Description | +|--------------|------------------------------------------------------------------------------|----------------------------| +| IS NOT NULL | SELECT _column_name(s)_ FROM _schema.table_ WHERE _column_name_ IS NOT NULL | Tests for non-null values. | +| IS NULL | SELECT _column_name(s)_ FROM _schema.table_ WHERE _column_name_ IS NULL | Tests for null values. | + +### Statements + +| Keyword | Syntax | Description | +|---------|---------------------------------------------------------------------------------------------|-------------------------------------| +| DELETE | DELETE FROM _schema.table_ WHERE condition | Deletes existing data from a table. | +| INSERT | INSERT INTO _schema.table(column_name(s))_ VALUES(_value(s)_) | Inserts new records into a table. | +| UPDATE | UPDATE _schema.table_ SET _column_1 = value_1, column_2 = value_2, ....,_ WHERE _condition_ | Alters existing records in a table. | diff --git a/site/versioned_docs/version-4.1/sql-guide/index.md b/site/versioned_docs/version-4.1/sql-guide/index.md new file mode 100644 index 00000000..a6a44875 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/index.md @@ -0,0 +1,11 @@ +--- +title: HarperDB SQL Guide +--- + +# HarperDB SQL Guide + +The purpose of this guide is to describe the available functionality of HarperDB as it relates to supported SQL functionality. The SQL parser is still actively being developed and this document will be updated as more features and functionality becomes available. **A high-level view of supported features can be found [here](./features-matrix).** + + + +HarperDB adheres to the concept of schemas & tables. This allows developers to isolate table structures from each other all within one database. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/sql-guide/insert.md b/site/versioned_docs/version-4.1/sql-guide/insert.md new file mode 100644 index 00000000..a929fe7a --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/insert.md @@ -0,0 +1,14 @@ +--- +title: Insert +--- + +# Insert + +HarperDB supports inserting 1 to n records into a table. The primary key must be unique (not used by any other record). If no primary key is provided, it will be assigned an auto-generated UUID. HarperDB does not support selecting from one table to insert into another at this time. + + + +``` +INSERT INTO dev.dog (id, dog_name, age, breed_id) + VALUES(1, 'Penny', 5, 347), (2, 'Kato', 4, 347) +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/sql-guide/joins.md b/site/versioned_docs/version-4.1/sql-guide/joins.md new file mode 100644 index 00000000..8e820a82 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/joins.md @@ -0,0 +1,25 @@ +--- +title: Joins +--- + +# Joins + +HarperDB allows developers to join any number of tables and currently supports the following join types: + +* INNER JOIN LEFT +* INNER JOIN LEFT +* OUTER JOIN + + +Here’s a basic example joining two tables from our Get Started example- joining a dogs table with a breeds table: + + + +``` +SELECT d.id, d.dog_name, d.owner_name, b.name, b.section + FROM dev.dog AS d + INNER JOIN dev.breed AS b ON d.breed_id = b.id + WHERE d.owner_name IN ('Kyle', 'Zach', 'Stephen') + AND b.section = 'Mutt' + ORDER BY d.dog_name +``` diff --git a/site/versioned_docs/version-4.1/sql-guide/json-search.md b/site/versioned_docs/version-4.1/sql-guide/json-search.md new file mode 100644 index 00000000..3c48c308 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/json-search.md @@ -0,0 +1,181 @@ +--- +title: SQL JSON Search +--- + +# SQL JSON Search + +HarperDB automatically indexes all top level attributes in a row / object written to a table. However, any attributes which holds JSON does not have its nested attributes indexed. In order to make searching and/or transforming these JSON documents easy, HarperDB offers a special SQL function called SEARCH_JSON. The SEARCH_JSON function works in SELECT & WHERE clauses allowing queries to perform powerful filtering on any element of your JSON by implementing the [JSONata library](http:/docs.jsonata.org/overview.html) into our SQL engine. + +## Syntax + +SEARCH_JSON(*expression, attribute*) + + + +Executes the supplied string _expression_ against data of the defined top level _attribute_ for each row. The expression both filters and defines output from the JSON document. +### Example 1 +#### Search a string array + +Here are two records in the database: + +```json +[ + { + "id": 1, + "name": ["Harper", "Penny"] + }, + { + "id": 2, + "name": ["Penny"] + } +] +``` +Here is a simple query that gets any record with "Harper" found in the name. + +``` +SELECT * +FROM dev.dog +WHERE search_json('"Harper" in *', name) +``` + +### Example 2 +The purpose of this query is to give us every movie where at least two of our favorite actors from Marvel films have acted together. The results will return the movie title, the overview, release date and an object array of the actor’s name and their character name in the movie. + + + +Both function calls evaluate the credits.cast attribute, this attribute is an object array of every cast member in a movie. + +``` +SELECT m.title, + m.overview, + m.release_date, + SEARCH_JSON($[name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"]].{"actor": name, "character": character}, c.`cast`) AS characters +FROM movies.credits c + INNER JOIN movies.movie m + ON c.movie_id = m.id +WHERE SEARCH_JSON($count($[name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"]]), c.`cast`) >= 2 +``` +A sample of this data from the movie The Avengers looks like + +```json +[ + { + "cast_id": 46, + "character": "Tony Stark / Iron Man", + "credit_id": "52fe4495c3a368484e02b251", + "gender": "male", + "id": 3223, + "name": "Robert Downey Jr.", + "order": 0 + }, + { + "cast_id": 2, + "character": "Steve Rogers / Captain America", + "credit_id": "52fe4495c3a368484e02b19b", + "gender": "male", + "id": 16828, + "name": "Chris Evans", + "order": 1 + }, + { + "cast_id": 307, + "character": "Bruce Banner / The Hulk", + "credit_id": "5e85e8083344c60015411cfa", + "gender": "male", + "id": 103, + "name": "Mark Ruffalo", + "order": 2 + } +] +``` +Let’s break down the SEARCH_JSON function call in the SELECT: + +``` +SEARCH_JSON( + $[name in [ + "Robert Downey Jr.", + "Chris Evans", + "Scarlett Johansson", + "Mark Ruffalo", + "Chris Hemsworth", + "Jeremy Renner", + "Clark Gregg", + "Samuel L. Jackson", + "Gwyneth Paltrow", + "Don Cheadle" + ]].{ + "actor": name, + "character": character + }, + c.`cast` +) +``` +The first argument passed to SEARCH_JSON is the expression to execute against the second argument which is the cast attribute on the credits table. This expression will execute for every row. Looking into the expression it starts with “$[…]” this tells the expression to iterate all elements of the cast array. + + + +Then the expression tells the function to only return entries where the name attribute matches any of the actors defined in the array: + +``` +name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"] +``` + + +So far, we’ve iterated the array and filtered out rows, but we also want the results formatted in a specific way, so we’ve chained an expression on our filter with: `{“actor”: name, “character”: character}`. This tells the function to create a specific object for each matching entry. + + + +##### Sample Result + +```json +[ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + } +] +``` + +Just having the SEARCH_JSON function in our SELECT is powerful, but given our criteria it would still return every other movie that doesn’t have our matching actors, in order to filter out the movies we do not want we also use SEARCH_JSON in the WHERE clause. + + + +This function call in the WHERE clause is similar, but we don’t need to perform the same transformation as occurred in the SELECT: + +``` +SEARCH_JSON( + $count( + $[name in [ + "Robert Downey Jr.", + "Chris Evans", + "Scarlett Johansson", + "Mark Ruffalo", + "Chris Hemsworth", + "Jeremy Renner", + "Clark Gregg", + "Samuel L. Jackson", + "Gwyneth Paltrow", + "Don Cheadle" + ]] + ), + c.`cast` +) >= 2 +``` + +As seen above we execute the same name filter against the cast array, the primary difference is we are wrapping the filtered results in $count(…). As it looks this returns a count of the results back which we then use against our SQL comparator of >= 2. + + + +To see further SEARCH_JSON examples in action view our Postman Collection that provides a sample schema & data with query examples: https:/api.harperdb.io/ + + + +To learn more about how to build expressions check out the JSONata documentation: http:/docs.jsonata.org/overview \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/sql-guide/reserved-word.md b/site/versioned_docs/version-4.1/sql-guide/reserved-word.md new file mode 100644 index 00000000..bcefa00a --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/reserved-word.md @@ -0,0 +1,203 @@ +--- +title: HarperDB SQL Reserved Words +--- + +# HarperDB SQL Reserved Words + +This is a list of reserved words in the SQL Parser. Use of these words or symbols may result in unexpected behavior or inaccessible tables/attributes. If any of these words must be used, any SQL call referencing a schema, table, or attribute must have backticks (`…`) or brackets ([…]) around the variable. + +For Example, for a table called ASSERT in the dev schema, a SQL select on that table would look like: + +``` +SELECT * from dev.`ASSERT` +``` + +Alternatively: + +``` +SELECT * from dev.[ASSERT] +``` + +### RESERVED WORD LIST + +* ABSOLUTE +* ACTION +* ADD +* AGGR +* ALL +* ALTER +* AND +* ANTI +* ANY +* APPLY +* ARRAY +* AS +* ASSERT +* ASC +* ATTACH +* AUTOINCREMENT +* AUTO_INCREMENT +* AVG +* BEGIN +* BETWEEN +* BREAK +* BY +* CALL +* CASE +* CAST +* CHECK +* CLASS +* CLOSE +* COLLATE +* COLUMN +* COLUMNS +* COMMIT +* CONSTRAINT +* CONTENT +* CONTINUE +* CONVERT +* CORRESPONDING +* COUNT +* CREATE +* CROSS +* CUBE +* CURRENT_TIMESTAMP +* CURSOR +* DATABASE +* DECLARE +* DEFAULT +* DELETE +* DELETED +* DESC +* DETACH +* DISTINCT +* DOUBLEPRECISION +* DROP +* ECHO +* EDGE +* END +* ENUM +* ELSE +* EXCEPT +* EXISTS +* EXPLAIN +* FALSE +* FETCH +* FIRST +* FOREIGN +* FROM +* GO +* GRAPH +* GROUP +* GROUPING +* HAVING +* HDB_HASH +* HELP +* IF +* IDENTITY +* IS +* IN +* INDEX +* INNER +* INSERT +* INSERTED +* INTERSECT +* INTO +* JOIN +* KEY +* LAST +* LET +* LEFT +* LIKE +* LIMIT +* LOOP +* MATCHED +* MATRIX +* MAX +* MERGE +* MIN +* MINUS +* MODIFY +* NATURAL +* NEXT +* NEW +* NOCASE +* NO +* NOT +* NULL +* OFF +* ON +* ONLY +* OFFSET +* OPEN +* OPTION +* OR +* ORDER +* OUTER +* OVER +* PATH +* PARTITION +* PERCENT +* PLAN +* PRIMARY +* PRINT +* PRIOR +* QUERY +* READ +* RECORDSET +* REDUCE +* REFERENCES +* RELATIVE +* REPLACE +* REMOVE +* RENAME +* REQUIRE +* RESTORE +* RETURN +* RETURNS +* RIGHT +* ROLLBACK +* ROLLUP +* ROW +* SCHEMA +* SCHEMAS +* SEARCH +* SELECT +* SEMI +* SET +* SETS +* SHOW +* SOME +* SOURCE +* STRATEGY +* STORE +* SYSTEM +* SUM +* TABLE +* TABLES +* TARGET +* TEMP +* TEMPORARY +* TEXTSTRING +* THEN +* TIMEOUT +* TO +* TOP +* TRAN +* TRANSACTION +* TRIGGER +* TRUE +* TRUNCATE +* UNION +* UNIQUE +* UPDATE +* USE +* USING +* VALUE +* VERTEX +* VIEW +* WHEN +* WHERE +* WHILE +* WITH +* WORK diff --git a/site/versioned_docs/version-4.1/sql-guide/select.md b/site/versioned_docs/version-4.1/sql-guide/select.md new file mode 100644 index 00000000..e2896029 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/select.md @@ -0,0 +1,27 @@ +--- +title: Select +--- + +# Select + +HarperDB has robust SELECT support, from simple queries all the way to complex joins with multi-conditions, aggregates, grouping & ordering. + + + +All results are returned as JSON object arrays. + + + +Query for all records and attributes in the dev.dog table: +``` +SELECT * FROM dev.dog +``` +Query specific columns from all rows in the dev.dog table: +``` +SELECT id, dog_name, age FROM dev.dog +``` +Query for all records and attributes in the dev.dog table ORDERED BY age in ASC order: +``` +SELECT * FROM dev.dog ORDER BY age +``` +*The ORDER BY keyword sorts in ascending order by default. To sort in descending order, use the DESC keyword. diff --git a/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geoarea.md b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geoarea.md new file mode 100644 index 00000000..d95a0237 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geoarea.md @@ -0,0 +1,41 @@ +--- +title: geoArea +--- + +# geoArea + +The geoArea() function returns the area of one or more features in square meters. + +### Syntax +geoArea(_geoJSON_) + +### Parameters +| Parameter | Description | +|-----------|---------------------------------| +| geoJSON | Required. One or more features. | + +#### Example 1 +Calculate the area, in square meters, of a manually passed GeoJSON polygon. + +``` +SELECT geoArea('{ + "type":"Feature", + "geometry":{ + "type":"Polygon", + "coordinates":[[ + [0,0], + [0.123456,0], + [0.123456,0.123456], + [0,0.123456] + ]] + } +}') +``` + +#### Example 2 +Find all records that have an area less than 1 square mile (or 2589988 square meters). + +``` +SELECT * FROM dev.locations +WHERE geoArea(geo_data) < 2589988 +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geocontains.md b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geocontains.md new file mode 100644 index 00000000..8a562e13 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geocontains.md @@ -0,0 +1,65 @@ +--- +title: geoContains +--- + +# geoContains +Determines if geo2 is completely contained by geo1. Returns a Boolean. + +## Syntax +geoContains(_geo1, geo2_) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------| +| geo1 | Required. Polygon or MultiPolygon GeoJSON feature. | +| geo2 | Required. Polygon or MultiPolygon GeoJSON feature tested to be contained by geo1. | + +### Example 1 +Return all locations within the state of Colorado (passed as a GeoJSON string). + +``` +SELECT * +FROM dev.locations +WHERE geoContains('{ + "type": "Feature", + "properties": { + "name":"Colorado" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-109.072265625,37.00255267], + [-102.01904296874999,37.00255267], + [-102.01904296874999,41.01306579], + [-109.072265625,41.01306579], + [-109.072265625,37.00255267] + ]] + } +}', geo_data) +``` + +### Example 2 +Return all locations which contain HarperDB Headquarters. + +``` +SELECT * +FROM dev.locations +WHERE geoContains(geo_data, '{ + "type": "Feature", + "properties": { + "name": "HarperDB Headquarters" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.98060941696167,39.760704817357905], + [-104.98053967952728,39.76065120861263], + [-104.98055577278137,39.760642961109674], + [-104.98037070035934,39.76049450588716], + [-104.9802714586258,39.76056254790385], + [-104.9805235862732,39.76076461167841], + [-104.98060941696167,39.760704817357905] + ]] + } +}') +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geoconvert.md b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geoconvert.md new file mode 100644 index 00000000..44dba079 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geoconvert.md @@ -0,0 +1,30 @@ +--- +title: geoConvert +--- + +# geoConvert + +Converts a series of coordinates into a GeoJSON of the specified type. + +## Syntax +geoConvert(_coordinates, geo_type_[, _properties_]) + +## Parameters +| Parameter | Description | +|--------------|------------------------------------------------------------------------------------------------------------------------------------| +| coordinates | Required. One or more coordinates | +| geo_type | Required. GeoJSON geometry type. Options are ‘point’, ‘lineString’, ‘multiLineString’, ‘multiPoint’, ‘multiPolygon’, and ‘polygon’ | +| properties | Optional. Escaped JSON array with properties to be added to the GeoJSON output. | + +### Example +Convert a given coordinate into a GeoJSON point with specified properties. + +``` +SELECT geoConvert( + '[-104.979127,39.761563]', + 'point', + '{ + "name": "HarperDB Headquarters" + }' +) +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geocrosses.md b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geocrosses.md new file mode 100644 index 00000000..dea03037 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geocrosses.md @@ -0,0 +1,44 @@ +--- +title: geoCrosses +--- + +# geoCrosses +Determines if the geometries cross over each other. Returns boolean. + +## Syntax +geoCrosses(_geo1, geo2_) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------| +| geo1 | Required. GeoJSON geometry or feature. | +| geo2 | Required. GeoJSON geometry or feature. | + +### Example +Find all locations that cross over a highway. + +``` +SELECT * +FROM dev.locations +WHERE geoCrosses( + geo_data, + '{ + "type": "Feature", + "properties": { + "name": "Highway I-25" + }, + "geometry": { + "type": "LineString", + "coordinates": [ + [-104.9139404296875,41.00477542222947], + [-105.0238037109375,39.715638134796336], + [-104.853515625,39.53370327008705], + [-104.853515625,38.81403111409755], + [-104.61181640625,38.39764411353178], + [-104.8974609375,37.68382032669382], + [-104.501953125,37.00255267215955] + ] + } + }' +) +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geodifference.md b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geodifference.md new file mode 100644 index 00000000..652dbc1a --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geodifference.md @@ -0,0 +1,56 @@ +--- +title: geoDifference +--- + +# geoDifference +Returns a new polygon with the difference of the second polygon clipped from the first polygon. + +## Syntax +geoDifference(_polygon1, polygon2_) + +## Parameters +| Parameter | Description | +|------------|----------------------------------------------------------------------------| +| polygon1 | Required. Polygon or MultiPolygon GeoJSON feature. | +| polygon2 | Required. Polygon or MultiPolygon GeoJSON feature to remove from polygon1. | + +### Example +Return a GeoJSON Polygon that removes City Park (_polygon2_) from Colorado (_polygon1_). + +``` +SELECT geoDifference('{ + "type": "Feature", + "properties": { + "name":"Colorado" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-109.072265625,37.00255267215955], + [-102.01904296874999,37.00255267215955], + [-102.01904296874999,41.0130657870063], + [-109.072265625,41.0130657870063], + [-109.072265625,37.00255267215955] + ]] + } + }', + '{ + "type": "Feature", + "properties": { + "name":"City Park" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.95973110198975,39.7543828214657], + [-104.95955944061278,39.744781185675386], + [-104.95904445648193,39.74422022399989], + [-104.95835781097412,39.74402223643582], + [-104.94097709655762,39.74392324244047], + [-104.9408483505249,39.75434982844515], + [-104.95973110198975,39.7543828214657] + ]] + } + }' +) +``` diff --git a/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geodistance.md b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geodistance.md new file mode 100644 index 00000000..ab7c9a53 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geodistance.md @@ -0,0 +1,33 @@ +--- +title: Geodistance +--- + +#geoDistance +Calculates the distance between two points in units (default is kilometers). + +## Syntax +geoDistance(_point1, point2_[_, units_]) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------| +| point1 | Required. GeoJSON Point specifying the origin. | +| point2 | Required. GeoJSON Point specifying the destination. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 +Calculate the distance, in miles, between HarperDB’s headquarters and the Washington Monument. + +``` +SELECT geoDistance('[-104.979127,39.761563]', '[-77.035248,38.889475]', 'miles') +``` + +### Example 2 +Find all locations that are within 40 kilometers of a given point, return that distance in miles, and sort by distance in an ascending order. + +``` +SELECT *, geoDistance('[-104.979127,39.761563]', geo_data, 'miles') as distance +FROM dev.locations +WHERE geoDistance('[-104.979127,39.761563]', geo_data, 'kilometers') < 40 +ORDER BY distance ASC +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geoequal.md b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geoequal.md new file mode 100644 index 00000000..6c665e06 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geoequal.md @@ -0,0 +1,41 @@ +--- +title: geoEqual +--- + +# geoEqual +Determines if two GeoJSON features are the same type and have identical X,Y coordinate values. For more information see https:/developers.arcgis.com/documentation/spatial-references/. Returns a Boolean. + +## Syntax +geoEqual(_geo1_, _geo2_) + +## Parameters +| Parameter | Description | +|------------|----------------------------------------| +| geo1 | Required. GeoJSON geometry or feature. | +| geo2 | Required. GeoJSON geometry or feature. | + +### Example +Find HarperDB Headquarters within all locations within the database. + +``` +SELECT * +FROM dev.locations +WHERE geoEqual(geo_data, '{ + "type": "Feature", + "properties": { + "name": "HarperDB Headquarters" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.98060941696167,39.760704817357905], + [-104.98053967952728,39.76065120861263], + [-104.98055577278137,39.760642961109674], + [-104.98037070035934,39.76049450588716], + [-104.9802714586258,39.76056254790385], + [-104.9805235862732,39.76076461167841], + [-104.98060941696167,39.760704817357905] + ]] + } +}') +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geolength.md b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geolength.md new file mode 100644 index 00000000..6b00cadd --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geolength.md @@ -0,0 +1,42 @@ +--- +title: geoLength +--- + +# geoLength +Takes a GeoJSON and measures its length in the specified units (default is kilometers). + +## Syntax +geoLength(_geoJSON_[_, units_]) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------| +| geoJSON | Required. GeoJSON to measure. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 +Calculate the length, in kilometers, of a manually passed GeoJSON linestring. + +``` +SELECT geoLength('{ + "type": "Feature", + "geometry": { + "type": "LineString", + "coordinates": [ + [-104.97963309288025,39.76163265441438], + [-104.9823260307312,39.76365323407955], + [-104.99193906784058,39.75616442110704] + ] + } +}') +``` + +### Example 2 +Find all data plus the calculated length in miles of the GeoJSON, restrict the response to only lengths less than 5 miles, and return the data in order of lengths smallest to largest. + +``` +SELECT *, geoLength(geo_data, 'miles') as length +FROM dev.locations +WHERE geoLength(geo_data, 'miles') < 5 +ORDER BY length ASC +``` diff --git a/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geonear.md b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geonear.md new file mode 100644 index 00000000..32028ed4 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/geonear.md @@ -0,0 +1,36 @@ +--- +title: geoNear +--- + +# geoNear +Determines if point1 and point2 are within a specified distance from each other, default units are kilometers. Returns a Boolean. + +## Syntax +geoNear(_point1, point2, distance_[_, units_]) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------| +| point1 | Required. GeoJSON Point specifying the origin. | +| point2 | Required. GeoJSON Point specifying the destination. | +| distance | Required. The maximum distance in units as an integer or decimal. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 +Return all locations within 50 miles of a given point. + +``` +SELECT * +FROM dev.locations +WHERE geoNear('[-104.979127,39.761563]', geo_data, 50, 'miles') +``` + +### Example 2 +Return all locations within 2 degrees of the earth of a given point. (Each degree lat/long is about 69 miles [111 kilometers]). Return all data and the distance in miles, sorted by ascending distance. + +``` +SELECT *, geoDistance('[-104.979127,39.761563]', geo_data, 'miles') as distance +FROM dev.locations +WHERE geoNear('[-104.979127,39.761563]', geo_data, 2, 'degrees') +ORDER BY distance ASC +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/index.md b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/index.md new file mode 100644 index 00000000..e692e812 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/sql-geospatial-functions/index.md @@ -0,0 +1,20 @@ +--- +title: SQL Geospatial Functions +--- + +# SQL Geospatial Functions + +HarperDB geospatial features require data to be stored in a single column using the [GeoJSON standard](http:/geojson.org/), a standard commonly used in geospatial technologies. Geospatial functions are available to be used in SQL statements. + + + +If you are new to GeoJSON you should check out the full specification here: http:/geojson.org/. There are a few important things to point out before getting started. + + + +1) All GeoJSON coordinates are stored in `[longitude, latitude]` format. +2) Coordinates or GeoJSON geometries must be passed as string when written directly in a SQL statement. +3) Note if you are using Postman for you testing. Due to limitations in the Postman client, you will need to escape quotes in your strings and your SQL will need to be passed on a single line. + + +In the examples contained in the left-hand navigation, schema and table names may change, but all GeoJSON data will be stored in a column named geo_data. diff --git a/site/versioned_docs/version-4.1/sql-guide/update.md b/site/versioned_docs/version-4.1/sql-guide/update.md new file mode 100644 index 00000000..a3c838e8 --- /dev/null +++ b/site/versioned_docs/version-4.1/sql-guide/update.md @@ -0,0 +1,15 @@ +--- +title: Update +--- + +# Update + +HarperDB supports updating existing table row(s) via UPDATE statements. Multiple conditions can be applied to filter the row(s) to update. At this time selecting from one table to update another is not supported. + + + +``` +UPDATE dev.dog + SET owner_name = 'Kyle' + WHERE id IN (1, 2) +``` diff --git a/site/versioned_docs/version-4.1/support.md b/site/versioned_docs/version-4.1/support.md new file mode 100644 index 00000000..b89a8e5f --- /dev/null +++ b/site/versioned_docs/version-4.1/support.md @@ -0,0 +1,84 @@ +--- +title: Support +--- + +# Support + +HarperDB support is available with all paid instances. Support tickets are managed via our [Zendesk portal](https:/harperdbhelp.zendesk.com/hc/en-us/requests/new). Once a ticket is submitted the HarperDB team will triage your request and get back to you as soon as possible. Additionally, you can join our [Slack community](https:/harperdbcommunity.slack.com/join/shared\_invite/zt-e8w6u1pu-2UFAXl\_f4ZHo7F7DVkHIDA#/) where HarperDB team members and others in the community are frequently active to help answer questions. + +* [Submit a Support Ticket](https:/harperdbhelp.zendesk.com/hc/en-us/requests/new) +* [Join Our Slack Community](https:/harperdbcommunity.slack.com/join/shared\_invite/zt-e8w6u1pu-2UFAXl\_f4ZHo7F7DVkHIDA#/) + +*** + +### Common Issues + +**1 Gigabyte Limit to Request Bodies** + +HarperDB supports the body of a request to be up to 1 GB in size. This limit does not impact the CSV file import function the reads from the local file system or from an external URL. We recommend if you do need to bulk import large record sets that you utilize the CSV import function, especially if you run up on the 1 GB body size limit. Documentation for these functions can be found here. + +**Do not install as sudo** + +HarperDB should be installed using a specific user for HarperDB. This allows you to restrict the permissions that user has and who has access to the HarperDB file system. The reason behind this is that HarperDB files are written directly to the file system, and by using a specific HarperDB user this gives you granular control over who has access to these files. + +**Error: Must execute as User** + +You may have gotten an error like, `Error: Must execute as <>.` This means that you installed HarperDB as `<>`. Because HarperDB stores files directly to the file system, we only allow the HarperDB executable to be run by a single user. This prevents permissions issues on files. For example if you installed as user\_a, but later wanted to run as user\_b. User\_b may not have access to the database files HarperDB needs. This also keeps HarperDB more secure as it allows you to lock files down to a specific user and prevents other users from accessing your files. + +*** + +### Frequently Asked Questions (FAQs) + +**What operating system should I use to run HarperDB?** + +All major operating systems: Linux, Windows, and macOS. However, running HarperDB on Windows and macOS is intended only for development and evaluation purposes. Linux is strongly recommended for production use. + +**How are HarperDB’s SQL and NoSQL capabilities different from other solutions?** + +Many solutions offer NoSQL capability and separate processing for SQL such as in-memory transformation or multi-model support. HarperDB’s unique mechanism for storing each data attribute individually allows for performing NoSQL and SQL operations in real-time on the stored data set. + +**How does HarperDB ensure high availability and consistency?** + +HarperDB's clustering and replication capabilities allow high availability and fault-tolerance; if a server goes down, traffic can be quickly routed to other HarperDB servers that can service requests. HarperDB's replication uses a consistent resolution strategy (last-write-wins by logical timestamp), to ensure eventual consistency. HarperDB offers auditing capabilities that can be enabled to preserve a record of all changes so that mistakes or even malicious data changes are recorded and can be reverted. + +**Is HarperDB ACID-compliant?** + +HarperDB operations are atomic, consist, and isolated per instance. This means that any query will provide an isolated consistent snapshot view of the database (based on when the query started. Updating and insertion operations are also performed atomically; any reads and writes are performed within an atomic, isolated transaction with serialization isolation level, and will rollback if it can not be fully completed successfully. Data is immediately flushed to disk after a write to ensure eventual durability. ACID compliance is not guaranteed across instances in a cluster, rather the eventual consistency will propagate changes with last-write-wins (by last logical timestamp) resolution. + +**How Does HarperDB Secure My Data?** + +HarperDB has role and user based security allowing you to simply and easily control that the right people have access to your data. We also implement a number of authentication mechanisms to ensure the transactions submitted are trusted and secure. + +**Is HarperDB row or column oriented?** + +HarperDB can be considered column oriented, however, the exploded data model creates an interface that is free from either of these orientations. A user can search and update with columnar benefits and be as ACID as row oriented restrictions. + +**What do you mean when you say HarperDB is single model?** + +HarperDB takes every attribute of a database table object and creates a key:value for both the key and its corresponding value. For example, the attribute eye color will be represented by a key “eye-color” and the corresponding value “green” will be represented by a key with the value “green”. We use LMDB’s lightning-fast key:value store to underpin all these interrelated keys and values, meaning that every “column” is automatically indexed, and you get huge performance in a tiny package. + +**Are Primary Keys Case-Sensitive?** + +When using HarperDB, primary keys are case-sensitive. This can cause confusion for developers. For example, if you have a user table, it might make sense to use `user.email` as the primary key. This can cause problems as Harper@harperdb.io and harper@harperdb.io would be seen as two different records. We recommend enforcing case on keys within your app to avoid this issue. + +**How Do I Move My HarperDB Data Directory?** + +HarperDB’s data directory can be moved from one location to another by simply updating the `rootPath` in the config file (where the data lives, which you specified during installation) to a new location. + +Next, edit HarperDB’s hdb\_boot\_properties.file to point HarperDB to the new location by updating the settings\_path variable. Substitute the NEW\_HDB\_ROOT variable in the snippets below with the new path to your new data directory, making sure you escape any slashes. + +On MacOS/OSX + +```bash +sed -i '' -E 's/^(settings_path[[:blank:]]*=[[:blank:]]*).*/\1NEW_HDB_ROOT\/harperdb-config.yaml/' ~/.harperdb/hdb_boot_properties.file +``` + +On Linux + +```bash +sed -i -E 's/^(settings_path[[:blank:]]*=[[:blank:]]*).*/\1NEW_HDB_ROOT\/harperdb-config.yaml/' ~/hdb_boot_properties.file +``` + +Finally, edit the config file in the root folder you just moved: + +* Edit the `rootPath` parameter to reflect the new location of your data directory. diff --git a/site/versioned_docs/version-4.1/transaction-logging.md b/site/versioned_docs/version-4.1/transaction-logging.md new file mode 100644 index 00000000..06c6cb38 --- /dev/null +++ b/site/versioned_docs/version-4.1/transaction-logging.md @@ -0,0 +1,89 @@ +--- +title: Transaction Logging +--- + +# Transaction Logging + +HarperDB offers two options for logging transactions executed against a table. The options are similar but utilize different storage layers. + +## Transaction log + +The first option is `read_transaction_log`. The transaction log is built upon clustering streams. Clustering streams are per-table message stores that enable data to be propagated across a cluster. HarperDB leverages streams for use with the transaction log. When clustering is enabled all transactions that occur against a table are pushed to its stream, and thus make up the transaction log. + +If you would like to use the transaction log, but have not set up clustering yet, please see ["How to Cluster"](./clustering/). + + +## Transaction Log Operations + +### read_transaction_log + +The `read_transaction_log` operation returns a prescribed set of records, based on given parameters. The example below will give a maximum of 2 records within the timestamps provided. + +```json +{ + "operation": "read_transaction_log", + "schema": "dev", + "table": "dog", + "from": 1598290235769, + "to": 1660249020865, + "limit": 2 +} +``` + +_See example response below._ + +### read_transaction_log Response + + +```json +[ + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619736, + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38, + "__updatedtime__": 1660165619688, + "__createdtime__": 1660165619688 + } + ] + }, + { + "operation": "update", + "user": "admin", + "timestamp": 1660165620040, + "records": [ + { + "id": 1, + "dog_name": "Penny B", + "__updatedtime__": 1660165620036 + } + ] + } +] +``` + +_See example request above._ + +### delete_transaction_logs_before + +The `delete_transaction_logs_before` operation will delete transaction log data according to the given parameters. The example below will delete records older than the timestamp provided. + +```json +{ + "operation": "delete_transaction_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1598290282817 +} +``` + +_Note: Streams are used for catchup if a node goes down. If you delete messages from a stream there is a chance catchup won't work._ + +Read on for `read_audit_log`, the second option, for logging transactions executed against a table. \ No newline at end of file diff --git a/site/versioned_docs/version-4.1/upgrade-hdb-instance.md b/site/versioned_docs/version-4.1/upgrade-hdb-instance.md new file mode 100644 index 00000000..af7ba7b1 --- /dev/null +++ b/site/versioned_docs/version-4.1/upgrade-hdb-instance.md @@ -0,0 +1,90 @@ +--- +title: Upgrade a HarperDB Instance +--- + +# Upgrade a HarperDB Instance + +This document describes best practices for upgrading self-hosted HarperDB instances. HarperDB can be upgraded using a combination of npm and built-in HarperDB upgrade scripts. Whenever upgrading your HarperDB installation it is recommended you make a backup of your data first. Note: This document applies to self-hosted HarperDB instances only. All HarperDB Cloud instances will be upgraded by the HarperDB Cloud team. + +## Upgrading + +Upgrading HarperDB is a two-step process. First the latest version of HarperDB must be downloaded from npm, then the HarperDB upgrade scripts will be utilized to ensure the newest features are available on the system. + +1. Install the latest version of HarperDB using `npm install -g harperdb`. + + Note `-g` should only be used if you installed HarperDB globally (which is recommended). +1. Run `harperdb` to initiate the upgrade process. + + HarperDB will then prompt you for all appropriate inputs and then run the upgrade directives. + +## Node Version Manager (nvm) + +[Node Version Manager (nvm)](http:/nvm.sh/) is an easy way to install, remove, and switch between different versions of Node.js as required by various applications. More information, including directions on installing nvm can be found here: https:/nvm.sh/. + +HarperDB supports Node.js versions 14.0.0 and higher, however, **please check our** [**NPM page**](https:/www.npmjs.com/package/harperdb) **for our recommended Node.js version.** To install a different version of Node.js with nvm, run the command: + +```bash +nvm install +``` + +To switch to a version of Node run: + +```bash +nvm use +``` + +To see the current running version of Node run: + +```bash +node --version +``` + +With a handful of different versions of Node.js installed, run nvm with the `ls` argument to list out all installed versions: + +```bash +nvm ls +``` + +When upgrading HarperDB, we recommend also upgrading your Node version. Here we assume you're running on an older version of Node; the execution may look like this: + +Switch to the older version of Node that HarperDB is running on (if it is not the current version): + +```bash +nvm use 14.19.0 +``` + +Make sure HarperDB is not running: + +```bash +harperdb stop +``` + +Uninstall HarperDB. Note, this step is not required, but will clean up old artifacts of HarperDB. We recommend removing all other HarperDB installations to ensure the most recent version is always running. + +```bash +npm uninstall -g harperdb +``` + +Switch to the newer version of Node: + +```bash +nvm use +``` + +Install HarperDB globally + +```bash +npm install -g harperdb +``` + +Run the upgrade script + +```bash +harperdb +``` + +Start HarperDB + +```bash +harperdb start +``` diff --git a/site/versioned_docs/version-4.2/administration/_category_.json b/site/versioned_docs/version-4.2/administration/_category_.json new file mode 100644 index 00000000..828e0998 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Administration", + "position": 2, + "link": { + "type": "generated-index", + "title": "Administration Documentation", + "description": "Guides for managing and administering HarperDB instances", + "keywords": [ + "administration" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/administration/administration.md b/site/versioned_docs/version-4.2/administration/administration.md new file mode 100644 index 00000000..7478f12f --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/administration.md @@ -0,0 +1,23 @@ +--- +title: Best Practices and Recommendations +--- + +# Best Practices and Recommendations + +HarperDB is designed for minimal administrative effort, and with managed services these are handled for you. But there are important things to consider for managing your own HarperDB servers. + +### Data Protection and (Backup and) Recovery + +As a distributed database, data protection and recovery can benefit from different data protection strategies than a traditional single-server database. But multiple aspects of data protection and recovery should be considered: + +* Availability: As a distributed database HarperDB is intrinsically built for high-availability and a cluster will continue to run even with complete server(s) failure. This is the first and primary defense for protecting against any downtime or data loss. HarperDB provides fast horizontal scaling functionality with node cloning, which facilitates ease of establishing high availability clusters. +* [Audit log](./logging/audit-logging): HarperDB defaults to tracking data changes so malicious data changes can be found, attributed, and reverted. This provides security-level defense against data loss, allowing for fine-grained isolation and reversion of individual data without the large-scale reversion/loss of data associated with point-in-time recovery approaches. +* Snapshots: When used as a source-of-truth database for crucial data, we recommend using snapshot tools to regularly snapshot databases as a final backup/defense against data loss (this should only be used as a last resort in recovery). HarperDB has a [`get_backup`](../developers/operations-api/databases-and-tables#get-backup) operation, which provides direct support for making and retrieving database snapshots. An HTTP request can be used to get a snapshot. Alternatively, volume snapshot tools can be used to snapshot data at the OS/VM level. HarperDB can also provide scripts for replaying transaction logs from snapshots to facilitate point-in-time recovery when necessary (often customization may be preferred in certain recovery situations to minimize data loss). + +### Horizontal Scaling with Node Cloning + +HarperDB provides rapid horizontal scaling capabilities through [node cloning functionality described here](./cloning). + +### Replication Transaction Logging + +HarperDB utilizes NATS for replication, which maintains a transaction log. See the [transaction log documentation for information on how to query this log](./logging/transaction-logging). diff --git a/site/versioned_docs/version-4.2/administration/cloning.md b/site/versioned_docs/version-4.2/administration/cloning.md new file mode 100644 index 00000000..32f73933 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/cloning.md @@ -0,0 +1,153 @@ +--- +title: Clone Node +--- + +# Clone Node + +Clone node is a configurable node script that can be pointed to another instance of HarperDB and create a full clone. + +To start clone node install `harperdb` as you would normally but have the clone node environment or command line (CLI) variables set (see below). + +To run clone node either of the following variables must be set: + +#### Environment variables + +* `HDB_LEADER_URL` - The URL of the leader node's operation API (usually port 9925). +* `HDB_LEADER_USERNAME` - The leader node admin username. +* `HDB_LEADER_PASSWORD` - The leader node admin password. +* `HDB_LEADER_CLUSTERING_HOST` - _(optional)_ The leader clustering host. This value will be added to the clustering routes on the clone node. If this value is not set, replication will not be setup between the leader and clone. + +For example: +``` +HDB_LEADER_URL=https:/node-1.my-domain.com:9925 HDB_LEADER_CLUSTERING_HOST=node-1.my-domain.com HDB_LEADER_USERNAME=... HDB_LEADER_PASSWORD=... harperdb +``` + +#### Command line variables + +* `--HDB_LEADER_URL` - The URL of the leader node's operation API (usually port 9925). +* `--HDB_LEADER_USERNAME` - The leader node admin username. +* `--HDB_LEADER_PASSWORD` - The leader node admin password. +* `--HDB_LEADER_CLUSTERING_HOST` - _(optional)_ The leader clustering host. This value will be added to the clustering routes on the clone node. If this value is not set, replication will not be setup between the leader and clone. + +For example: +``` +harperdb --HDB_LEADER_URL https:/node-1.my-domain.com:9925 --HDB_LEADER_CLUSTERING_HOST node-1.my-domain.com --HDB_LEADER_USERNAME ... --HDB_LEADER_PASSWORD ... +``` + +If an instance already exists in the location you are cloning to, clone node will not run. It will instead proceed with starting HarperDB. +This is unless you are cloning overtop (see below) of an existing instance. + +Clone node does not require any additional configuration apart from the variables referenced above. +However, it can be configured through `clone-node-config.yaml`, which should be located in the `ROOTPATH` directory of your clone. +If no configuration is supplied, default values will be used. + +By default: +* The HarperDB Terms and Conditions will be accepted +* The Root path will be ``/hdb +* The Operations API port will be set to 9925 +* The admin and clustering username and password will be the same as the leader node +* A unique node name will be generated +* All tables will be cloned and have replication added, the subscriptions will be `publish: true` and `subscribe: true` +* The users and roles system tables will be cloned and have replication added both ways +* All components will be cloned +* All routes will be cloned + +**Leader node** - the instance of HarperDB you are cloning.\ +**Clone node** - the new node which will be a clone of the leader node. + +The following configuration is used exclusively by clone node. + +```yaml +databaseConfig: + excludeDatabases: + - database: dev + excludeTables: + - database: prod + table: dog +``` + +Set any databases or tables that you wish to exclude from cloning. + +```yaml +componentConfig: + skipNodeModules: true + exclude: + - name: my-cool-component +``` + +`skipNodeModules` will not include the node\_modules directory when clone node is packaging components in `hdb/components`. + +`exclude` can be used to set any components that you do not want cloned. + +```yaml +clusteringConfig: + publishToLeaderNode: true + subscribeToLeaderNode: true +``` + +`publishToLeaderNode`, `subscribeToLeaderNode` the clustering subscription to set up with the leader node. + +```yaml +httpsRejectUnauthorized: false +``` + +Clone node makes http requests to the leader node, `httpsRejectUnauthorized` is used to set if https requests should be verified. + +Any HarperDB configuration can also be used in the `clone-node-config.yaml` file and will be applied to the cloned node, for example: + +```yaml +rootPath: null +operationsApi: + network: + port: 9925 +clustering: + nodeName: null + logLevel: info +logging: + level: error +``` + +_Note: any required configuration needed to install/run HarperDB will be default values or auto-generated unless it is provided in the config file._ + +### Fully connected clone + +A fully connected topology is when all nodes are replicating (publish and subscribing) with all other nodes. A fully connected clone maintains this topology with addition of the new node. When a clone is created, replication is added between the leader and the clone and any nodes the leader is replicating with. For example, if the leader is replicating with node-a and node-b, the clone will replicate with the leader, node-a and node-b. + +To run clone node with the fully connected option simply pass the environment variable `HDB_FULLY_CONNECTED=true` or CLI variable `--HDB_FULLY_CONNECTED true`. + +### Cloning overtop of an existing HarperDB instance + +_Note: this will completely overwrite any system tables (user, roles, nodes, etc.) and any other databases that are named the same as ones that exist on the leader node. It will also do the same for any components._ + +To create a clone over an existing install of HarperDB use the environment `HDB_CLONE_OVERTOP=true` or CLI variable `--HDB_CLONE_OVERTOP true`. + +## Cloning steps + +When run clone node will execute the following steps: + +1. Clone any user defined tables and the hdb\_role and hdb\_user system tables. +1. Install Harperdb overtop of the cloned tables. +1. Clone the configuration, this includes: + * Copy the clustering routes and clustering user. + * Copy component references. + * Using any provided clone config to populate new cloud node harperdb-config.yaml +1. Clone any components in the `hdb/component` directory. +1. Start the cloned HarperDB Instance. +1. Cluster all cloned tables. + +## Custom database and table pathing + +Currently, clone node will not clone a table if it has custom pathing configured. In this situation the full database that the table is located in will not be cloned. + +If a database has custom pathing (no individual table pathing) it will be cloned, however if no custom pathing is provided in the clone config the database will be stored in the default database directory. + +To provide custom pathing for a database in the clone config follow this configuration: + +```yaml +databases: + : + path: /Users/harper/hdb +``` + +`` the name of the database which will be located at the custom path.\ +`path` the path where the database will reside. diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/create-account.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/create-account.md new file mode 100644 index 00000000..635de7f4 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/create-account.md @@ -0,0 +1,26 @@ +--- +title: Create a Studio Account +--- + +# Create a Studio Account +Start at the [HarperDB Studio sign up page](https:/studio.harperdb.io/sign-up). + +1) Provide the following information: + * First Name + * Last Name + * Email Address + * Subdomain + + *Part of the URL that will be used to identify your HarperDB Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com.* + * Coupon Code (optional) +2) Review the Privacy Policy and Terms of Service. +3) Click the sign up for free button. +4) You will be taken to a new screen to add an account password. Enter your password. + *Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character.* +5) Click the add account password button. + +You will receive a Studio welcome email confirming your registration. + + + +Note: Your email address will be used as your username and cannot be changed. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/enable-mixed-content.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/enable-mixed-content.md new file mode 100644 index 00000000..1948d6be --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/enable-mixed-content.md @@ -0,0 +1,11 @@ +--- +title: Enable Mixed Content +--- + +# Enable Mixed Content + +Enabling mixed content is required in cases where you would like to connect the HarperDB Studio to HarperDB Instances via HTTP. This should not be used for production systems, but may be convenient for development and testing purposes. Doing so will allow your browser to reach HTTP traffic, which is considered insecure, through an HTTPS site like the Studio. + + + +A comprehensive guide is provided by Adobe [here](https:/experienceleague.adobe.com/docs/target/using/experiences/vec/troubleshoot-composer/mixed-content.html). \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/index.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/index.md new file mode 100644 index 00000000..93ba1af7 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/index.md @@ -0,0 +1,15 @@ +--- +title: HarperDB Studio +--- + +# HarperDB Studio +HarperDB Studio is the web-based GUI for HarperDB. Studio enables you to administer, navigate, and monitor all of your HarperDB instances in a simple, user friendly interface without any knowledge of the underlying HarperDB API. It’s free to sign up, get started today! + +[Sign up for free!](https:/studio.harperdb.io/sign-up) + +--- +## How does Studio Work? +While HarperDB Studio is web based and hosted by us, all database interactions are performed on the HarperDB instance the studio is connected to. The HarperDB Studio loads in your browser, at which point you login to your HarperDB instances. Credentials are stored in your browser cache and are not transmitted back to HarperDB. All database interactions are made via the HarperDB Operations API directly from your browser to your instance. + +## What type of instances can I manage? +HarperDB Studio enables users to manage both HarperDB Cloud instances and privately hosted instances all from a single UI. All HarperDB instances feature identical behavior whether they are hosted by us or by you. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/instance-configuration.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/instance-configuration.md new file mode 100644 index 00000000..55c01be1 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/instance-configuration.md @@ -0,0 +1,119 @@ +--- +title: Instance Configuration +--- + +# Instance Configuration + +HarperDB instance configuration can be viewed and managed directly through the HarperDB Studio. HarperDB Cloud instances can be resized in two different ways via this page, either by modifying machine RAM or by increasing drive storage. User-installed instances can have their licenses modified by modifying licensed RAM. + + + +All instance configuration is handled through the **config** page of the HarperDB Studio, accessed with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click config in the instance control bar. + +*Note, the **config** page will only be available to super users and certain items are restricted to Studio organization owners.* + +## Instance Overview + +The **instance overview** panel displays the following instance specifications: + +* Instance URL + +* Instance Node Name (for clustering) + +* Instance API Auth Header (this user) + + *The Basic authentication header used for the logged in HarperDB database user* + +* Created Date (HarperDB Cloud only) + +* Region (HarperDB Cloud only) + + *The geographic region where the instance is hosted.* + +* Total Price + +* RAM + +* Storage (HarperDB Cloud only) + +* Disk IOPS (HarperDB Cloud only) + +## Update Instance RAM + +HarperDB Cloud instance size and user-installed instance licenses can be modified with the following instructions. This option is only available to Studio organization owners. + + + +Note: For HarperDB Cloud instances, upgrading RAM may add additional CPUs to your instance as well. Click here to see how many CPUs are provisioned for each instance size. + +1) In the **update ram** panel at the bottom left: + + * Select the new instance size. + + * If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. + + * If you do have a credit card associated, you will be presented with the updated billing information. + + * Click **Upgrade**. + +2) The instance will shut down and begin reprovisioning/relicensing itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. + +3) Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. + +*Note, if HarperDB Cloud instance reprovisioning takes longer than 20 minutes, please submit a support ticket here: https:/harperdbhelp.zendesk.com/hc/en-us/requests/new.* + +## Update Instance Storage + +The HarperDB Cloud instance storage size can be increased with the following instructions. This option is only available to Studio organization owners. + +Note: Instance storage can only be upgraded once every 6 hours. + +1) In the **update storage** panel at the bottom left: + + * Select the new instance storage size. + + * If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. + + * If you do have a credit card associated, you will be presented with the updated billing information. + + * Click **Upgrade**. + +2) The instance will shut down and begin reprovisioning itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. + +3) Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. + +*Note, if this process takes longer than 20 minutes, please submit a support ticket here: https:/harperdbhelp.zendesk.com/hc/en-us/requests/new.* + +## Remove Instance + +The HarperDB instance can be deleted/removed from the Studio with the following instructions. Once this operation is started it cannot be undone. This option is only available to Studio organization owners. + +1) In the **remove instance** panel at the bottom left: + * Enter the instance name in the text box. + + * The Studio will present you with a warning. + + * Click **Remove**. + +2) The instance will begin deleting immediately. + +## Restart Instance + +The HarperDB Cloud instance can be restarted with the following instructions. + +1) In the **restart instance** panel at the bottom right: + * Enter the instance name in the text box. + + * The Studio will present you with a warning. + + * Click **Restart**. + +2) The instance will begin restarting immediately. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/instance-example-code.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/instance-example-code.md new file mode 100644 index 00000000..b4b74e5f --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/instance-example-code.md @@ -0,0 +1,62 @@ +--- +title: Instance Example Code +--- + +# Instance Example Code + +Example code prepopulated with the instance URL and authorization token for the logged in database user can be found on the **example code** page of the HarperDB Studio. Code samples are generated based on the HarperDB API Documentation Postman collection. Code samples accessed with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **example code** in the instance control bar. + +5) Select the appropriate **category** from the left navigation. + +6) Select the appropriate **operation** from the left navigation. + +7) Select your desired language/variant from the **Choose Programming Language** dropdown. + +8) Copy code from the sample code panel using the copy icon. + +## Supported Languages + +Sample code uses two identifiers: **language** and **variant**. + +* **language** is the programming language that the sample code is generated in. + +* **variant** is the methodology or library used by the language to send HarperDB requests. + +The list of available language/variants are as follows: + +| Language | Variant | +|--------------|---------------| +| C# | RestSharp | +| cURL | cURL | +| Go | Native | +| HTTP | HTTP | +| Java | OkHttp | +| Java | Unirest | +| JavaScript | Fetch | +| JavaScript | jQuery | +| JavaScript | XHR | +| NodeJs | Axios | +| NodeJs | Native | +| NodeJs | Request | +| NodeJs | Unirest | +| Objective-C | NSURLSession | +| OCaml | Cohttp | +| PHP | cURL | +| PHP | HTTP_Request2 | +| PowerShell | RestMethod | +| Python | http.client | +| Python | Requests | +| Ruby | Net:HTTP | +| Shell | Httpie | +| Shell | wget | +| Swift | URLSession | + + diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/instance-metrics.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/instance-metrics.md new file mode 100644 index 00000000..f084df63 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/instance-metrics.md @@ -0,0 +1,16 @@ +--- +title: Instance Metrics +--- + +# Instance Metrics + +The HarperDB Studio display instance status and metrics on the instance status page, which can be accessed with the following instructions: + +1. Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **status** in the instance control bar. + +Once on the instance browse page you can view host system information, [HarperDB logs](../logging/standard-logging), and [HarperDB Cloud alarms](../../deployments/harperdb-cloud/alarms) (if it is a cloud instance). + +_Note, the **status** page will only be available to super users._ diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/instances.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/instances.md new file mode 100644 index 00000000..7c209629 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/instances.md @@ -0,0 +1,131 @@ +--- +title: Instances +--- + +# Instances + +The HarperDB Studio allows you to administer all of your HarperDB instances in one place. HarperDB currently offers the following instance types: + +* **HarperDB Cloud Instance** Managed installations of HarperDB, what we call [HarperDB Cloud](../../deployments/harperdb-cloud/). +* **5G Wavelength Instance** Managed installations of HarperDB running on the Verizon network through AWS Wavelength, what we call [5G Wavelength Instances](../../deployments/harperdb-cloud/verizon-5g-wavelength-instances). _Note, these instances are only accessible via the Verizon network._ +* **User-Installed Instance** Any HarperDB installation that is managed by you. These include instances hosted within your cloud provider accounts (for example, from the AWS or Digital Ocean Marketplaces), privately hosted instances, or instances installed locally. + +All interactions between the Studio and your instances take place directly from your browser. HarperDB stores metadata about your instances, which enables the Studio to display these instances when you log in. Beyond that, all traffic is routed from your browser to the HarperDB instances using the standard [HarperDB API](../../developers/operations-api/). + +## Organization Instance List + +A summary view of all instances within an organization can be viewed by clicking on the appropriate organization from the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. Each instance gets their own card. HarperDB Cloud and user-installed instances are listed together. + +## Create a New Instance + +1. Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization for the instance to be created under. +1. Click the **Create New HarperDB Cloud Instance + Register User-Installed Instance** card. +1. Select your desired Instance Type. +1. For a HarperDB Cloud Instance or a HarperDB 5G Wavelength Instance, click **Create HarperDB Cloud Instance**. + 1. Fill out Instance Info. + 1. Enter Instance Name + + _This will be used to build your instance URL. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com. The Instance URL will be previewed below._ + 1. Enter Instance Username + + _This is the username of the initial HarperDB instance super user._ + 1. Enter Instance Password + + _This is the password of the initial HarperDB instance super user._ + 1. Click **Instance Details** to move to the next page. + 1. Select Instance Specs + 1. Select Instance RAM + + _HarperDB Cloud Instances are billed based on Instance RAM, this will select the size of your provisioned instance._ [_More on instance specs_](../../deployments/harperdb-cloud/instance-size-hardware-specs)_._ + 1. Select Storage Size + + _Each instance has a mounted storage volume where your HarperDB data will reside. Storage is provisioned based on space and IOPS._ [_More on IOPS Impact on Performance_](../../deployments/harperdb-cloud/iops-impact)_._ + 1. Select Instance Region + + _The geographic area where your instance will be provisioned._ + 1. Click **Confirm Instance Details** to move to the next page. + 1. Review your Instance Details, if there is an error, use the back button to correct it. + 1. Review the [Privacy Policy](https:/harperdb.io/legal/privacy-policy/) and [Terms of Service](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. + 1. Click **Add Instance**. + 1. Your HarperDB Cloud instance will be provisioned in the background. Provisioning typically takes 5-15 minutes. You will receive an email notification when your instance is ready. + + +## Register User-Installed Instance + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +2) Click the appropriate organization for the instance to be created under. +3) Click the **Create New HarperDB Cloud Instance + Register User-Installed Instance** card. +4) Select **Register User-Installed Instance**. + 1. Fill out Instance Info. + 1. Enter Instance Name + + _This is used for descriptive purposes only._ + 1. Enter Instance Username + + _The username of a HarperDB super user that is already configured in your HarperDB installation._ + 1. Enter Instance Password + + _The password of a HarperDB super user that is already configured in your HarperDB installation._ + 1. Enter Host + + _The host to access the HarperDB instance. For example, `harperdb.myhost.com` or `localhost`._ + 1. Enter Port + + _The port to access the HarperDB instance. HarperDB defaults `9925` for HTTP and `31283` for HTTPS._ + 1. Select SSL + + _If your instance is running over SSL, select the SSL checkbox. If not, you will need to enable mixed content in your browser to allow the HTTPS Studio to access the HTTP instance. If there are issues connecting to the instance, the Studio will display a red error message._ + 1. Click **Instance Details** to move to the next page. + 1. Select Instance Specs + 1. Select Instance RAM + + _HarperDB instances are billed based on Instance RAM. Selecting additional RAM will enable the ability for faster and more complex queries._ + 1. Click **Confirm Instance Details** to move to the next page. + 1. Review your Instance Details, if there is an error, use the back button to correct it. + 1. Review the [Privacy Policy](https:/harperdb.io/legal/privacy-policy/) and [Terms of Service](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. + 1. Click **Add Instance**. + 1. The HarperDB Studio will register your instance and restart it for the registration to take effect. Your instance will be immediately available after this is complete. + +## Delete an Instance + +Instance deletion has two different behaviors depending on the instance type. + +* **HarperDB Cloud Instance** This instance will be permanently deleted, including all data. This process is irreversible and cannot be undone. +* **User-Installed Instance** The instance will be removed from the HarperDB Studio only. This does not uninstall HarperDB from your system and your data will remain intact. + +An instance can be deleted as follows: + +1. Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card and click the trash can icon. +1. Enter the instance name into the text box. + + _This is done for confirmation purposes to ensure you do not accidentally delete an instance._ +1. Click the **Do It** button. + +## Upgrade an Instance + +HarperDB instances can be resized on the [Instance Configuration](./instance-configuration) page. + +## Instance Log In/Log Out + +The Studio enables users to log in and out of different database users from the instance control panel. To log out of an instance: + +1. Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card and click the lock icon. +1. You will immediately be logged out of the instance. + +To log in to an instance: + +1. Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card, it will have an unlocked icon and a status reading PLEASE LOG IN, and click the center of the card. +1. Enter the database username. + + _The username of a HarperDB user that is already configured in your HarperDB instance._ +1. Enter the database password. + + _The password of a HarperDB user that is already configured in your HarperDB instance._ +1. Click **Log In**. diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/login-password-reset.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/login-password-reset.md new file mode 100644 index 00000000..dddda5c1 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/login-password-reset.md @@ -0,0 +1,42 @@ +--- +title: Login and Password Reset +--- + +# Login and Password Reset + +## Log In to Your HarperDB Studio Account + +To log into your existing HarperDB Studio account: + +1) Navigate to the [HarperDB Studio](https:/studio.harperdb.io/). +2) Enter your email address. +3) Enter your password. +4) Click **sign in**. + +## Reset a Forgotten Password + +To reset a forgotten password: + +1) Navigate to the HarperDB Studio password reset page. +2) Enter your email address. +3) Click **send password reset email**. +4) If the account exists, you will receive an email with a temporary password. +5) Navigate back to the HarperDB Studio login page. +6) Enter your email address. +7) Enter your temporary password. +8) Click **sign in**. +9) You will be taken to a new screen to reset your account password. Enter your new password. +*Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character.* +10) Click the **add account password** button. + +## Change Your Password + +If you are already logged into the Studio, you can change your password though the user interface. + +1) Navigate to the HarperDB Studio profile page. +2) In the **password** section, enter: + + * Current password. + * New password. + * New password again *(for verification)*. +4) Click the **Update Password** button. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-charts.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-charts.md new file mode 100644 index 00000000..38c8bc0d --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-charts.md @@ -0,0 +1,65 @@ +--- +title: Manage Charts +--- + +# Manage Charts + +The HarperDB Studio includes a charting feature within an instance. They are generated in real time based on your existing data and automatically refreshed every 15 seconds. Instance charts can be accessed with the following instructions: + +1. Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **charts** in the instance control bar. + +## Creating a New Chart + +Charts are generated based on SQL queries, therefore to build a new chart you first need to build a query. Instructions as follows (starting on the charts page described above): + +1. Click **query** in the instance control bar. +1. Enter the SQL query you would like to generate a chart from. + + _For example, using the dog demo data from the API Docs, we can get the average dog age per owner with the following query: `SELECT AVG(age) as avg_age, owner_name FROM dev.dog GROUP BY owner_name`._ +1. Click **Execute**. +1. Click **create chart** at the top right of the results table. +1. Configure your chart. + 1. Choose chart type. + + _HarperDB Studio offers many standard charting options like line, bar, etc._ + 1. Choose a data column. + + _This column will be used to plot the data point. Typically, this is the values being calculated in the `SELECT` statement. Depending on the chart type, you can select multiple data columns to display on a single chart._ + 1. Depending on the chart type, you will need to select a grouping. + + _This could be labeled as x-axis, label, etc. This will be used to group the data, typically this is what you used in your **GROUP BY** clause._ + 1. Enter a chart name. + + _Used for identification purposes and will be displayed at the top of the chart._ + 1. Choose visible to all org users toggle. + + _Leaving this option off will limit chart visibility to just your HarperDB Studio user. Toggling it on will enable all users with this Organization to view this chart._ + 1. Click **Add Chart**. + 1. The chart will now be visible on the **charts** page. + +The example query above, configured as a bar chart, results in the following chart: + +![Average Age per Owner Example](/img/v4.2/ave-age-per-owner-ex.png) + +## Downloading Charts + +HarperDB Studio charts can be downloaded in SVG, PNG, and CSV format. Instructions as follows (starting on the charts page described above): + +1. Identify the chart you would like to export. +1. Click the three bars icon. +1. Select the appropriate download option. +1. The Studio will generate the export and begin downloading immediately. + +## Delete a Chart + +Delete a chart as follows (starting on the charts page described above): + +1. Identify the chart you would like to delete. +1. Click the X icon. +1. Click the **confirm delete chart** button. +1. The chart will be deleted. + +Deleting a chart that is visible to all Organization users will delete it for all users. diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-clustering.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-clustering.md new file mode 100644 index 00000000..7155249d --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-clustering.md @@ -0,0 +1,94 @@ +--- +title: Manage Clustering +--- + +# Manage Clustering + +HarperDB instance clustering and replication can be configured directly through the HarperDB Studio. It is recommended to read through the clustering documentation first to gain a strong understanding of HarperDB clustering behavior. + + + +All clustering configuration is handled through the **cluster** page of the HarperDB Studio, accessed with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **cluster** in the instance control bar. + +Note, the **cluster** page will only be available to super users. + +--- +## Initial Configuration + +HarperDB instances do not have clustering configured by default. The HarperDB Studio will walk you through the initial configuration. Upon entering the **cluster** screen for the first time you will need to complete the following configuration. Configurations are set in the **enable clustering** panel on the left while actions are described in the middle of the screen. + +1) Create a cluster user, read more about this here: Clustering Users and Roles. + * Enter username. + + * Enter password. + + * Click **Create Cluster User**. + +2) Click **Set Cluster Node Name**. +3) Click **Enable Instance Clustering**. + +At this point the Studio will restart your HarperDB Instance, required for the configuration changes to take effect. + +--- + +## Manage Clustering +Once initial clustering configuration is completed you a presented with a clustering management screen with the following properties: + +* **connected instances** + + Displays all instances within the Studio Organization that this instance manages a connection with. + +* **unconnected instances** + + Displays all instances within the Studio Organization that this instance does not manage a connection with. + +* **unregistered instances** + + Displays all instances outside of the Studio Organization that this instance manages a connection with. + +* **manage clustering** + + Once instances are connected, this will display clustering management options for all connected instances and all schemas and tables. +--- + +## Connect an Instance + +HarperDB Instances can be clustered together with the following instructions. + +1) Ensure clustering has been configured on both instances and a cluster user with identical credentials exists on both. + +2) Identify the instance you would like to connect from the **unconnected instances** panel. + +3) Click the plus icon next the appropriate instance. + +4) If configurations are correct, all schemas will sync across the cluster, then appear in the **manage clustering** panel. If there is a configuration issue, a red exclamation icon will appear, click it to learn more about what could be causing the issue. + +--- + +## Disconnect an Instance + +HarperDB Instances can be disconnected with the following instructions. + +1) Identify the instance you would like to disconnect from the **connected instances** panel. + +2) Click the minus icon next the appropriate instance. + +--- + +## Manage Replication + +Subscriptions must be configured in order to move data between connected instances. Read more about subscriptions here: Creating A Subscription. The **manage clustering** panel displays a table with each row representing an channel per instance. Cells are bolded to indicate a change in the column. Publish and subscribe replication can be configured per table with the following instructions: + +1) Identify the instance, schema, and table for replication to be configured. + +2) For publish, click the toggle switch in the **publish** column. + +3) For subscribe, click the toggle switch in the **subscribe** column. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-functions.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-functions.md new file mode 100644 index 00000000..3a74d7e5 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-functions.md @@ -0,0 +1,163 @@ +--- +title: Manage Functions +--- + +# Manage Functions + +HarperDB Custom Functions are enabled by default and can be configured further through the HarperDB Studio. It is recommended to read through the Custom Functions documentation first to gain a strong understanding of HarperDB Custom Functions behavior. + + + +All Custom Functions configuration is handled through the **functions** page of the HarperDB Studio, accessed with the following instructions: + +1) Navigate to the HarperDB Studio Organizations page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **functions** in the instance control bar. + +*Note, the **functions** page will only be available to super users.* + +## Manage Projects + +On the **functions** page of the HarperDB Studio you are presented with a functions management screen with the following properties: + +* **projects** + + Displays a list of Custom Functions projects residing on this instance. +* **/project_name/routes** + + Only displayed if there is an existing project. Displays the routes files contained within the selected project. +* **/project_name/helpers** + + Only displayed if there is an existing project. Displays the helper files contained within the selected project. +* **/project_name/static** + + Only displayed if there is an existing project. Displays the static file count and a link to the static files contained within the selected project. Note, static files cannot currently be deployed through the Studio and must be deployed via the [HarperDB API](https:/api.harperdb.io/) or manually to the server (not applicable with HarperDB Cloud). +* **Root File Directory** + + Displays the root file directory where the Custom Functions projects reside on this instance. +* **Custom Functions Server URL** + + Displays the base URL in which all Custom Functions are accessed for this instance. + + +## Create a Project + +HarperDB Custom Functions Projects can be initialized with the following instructions. + +1) If this is your first project, skip this step. Click the plus icon next to the **projects** heading. + +2) Enter the project name in the text box located under the **projects** heading. + +3) Click the check mark icon next the appropriate instance. + +4) The Studio will take a few moments to provision a new project based on the [Custom Functions template](https:/github.com/HarperDB/harperdb-custom-functions-template). + +5) The Custom Functions project is now created and ready to modify. + +## Modify a Project + +Custom Functions routes and helper functions can be modified directly through the Studio. From the **functions** page: + +1) Select the appropriate **project**. + +2) Select the appropriate **route** or **helper**. + +3) Modify the code with your desired changes. + +4) Click the save icon at the bottom right of the screen. + + *Note, saving modifications will restart the Custom Functions server on your HarperDB instance and may result in up to 60 seconds of downtime for all Custom Functions.* + +## Create Additional Routes/Helpers + +To create an additional **route** to your Custom Functions project. From the **functions** page: + +1) Select the appropriate Custom Functions **project**. + +2) Click the plus icon to the right of the **routes** header. + +3) Enter the name of the new route in the textbox that appears. + +4) Click the check icon to create the new route. + + *Note, adding a route will restart the Custom Functions server on your HarperDB instance and may result in up to 60 seconds of downtime for all Custom Functions.* + +To create an additional **helper** to your Custom Functions project. From the **functions** page: + +1) Select the appropriate Custom Functions **project**. + +2) Click the plus icon to the right of the **helpers** header. + +3) Enter the name of the new helper in the textbox that appears. + +4) Click the check icon to create the new helper. + + *Note, adding a helper will restart the Custom Functions server on your HarperDB instance and may result in up to 60 seconds of downtime for all Custom Functions.* + +## Delete a Project/Route/Helper + +To delete a Custom Functions project from the **functions** page: + +1) Click the minus icon to the right of the **projects** header. + +2) Click the red minus icon to the right of the Custom Functions project you would like to delete. + +3) Confirm deletion by clicking the red check icon. + + *Note, deleting a project will restart the Custom Functions server on your HarperDB instance and may result in up to 60 seconds of downtime for all Custom Functions.* + +To delete a Custom Functions _project route_ from the **functions** page: + +1) Select the appropriate Custom Functions **project**. + +2) Click the minus icon to the right of the **routes** header. + +3) Click the red minus icon to the right of the Custom Functions route you would like to delete. + +4) Confirm deletion by clicking the red check icon. + + *Note, deleting a route will restart the Custom Functions server on your HarperDB instance and may result in up to 60 seconds of downtime for all Custom Functions.* + +To delete a Custom Functions _project helper_ from the **functions** page: + +1) Select the appropriate Custom Functions **project**. + +2) Click the minus icon to the right of the **helper** header. + +3) Click the red minus icon to the right of the Custom Functions header you would like to delete. + +4) Confirm deletion by clicking the red check icon. + + *Note, deleting a header will restart the Custom Functions server on your HarperDB instance and may result in up to 60 seconds of downtime for all Custom Functions.* + +## Deploy Custom Functions Project to Other Instances + +The HarperDB Studio provides the ability to deploy Custom Functions projects to additional HarperDB instances within the same Studio Organization. To deploy Custom Functions projects to additional instances, starting from the **functions** page: + +1) Select the **project** you would like to deploy. + +2) Click the **deploy** button at the top right. + +3) A list of instances (excluding the current instance) within the organization will be displayed in tabular with the following information: + + * **Instance Name**: The name used to describe the instance. + + * **Instance URL**: The URL used to access the instance. + + * **CF Capable**: Describes if the instance version supports Custom Functions (yes/no). + + * **CF Enabled**: Describes if Custom Functions are configured and enabled on the instance (yes/no). + + * **Has Project**: Describes if the selected Custom Functions project has been previously deployed to the instance (yes/no). + + * **Deploy**: Button used to deploy the project to the instance. + + * **Remote**: Button used to remove the project from the instance. *Note, this will only be visible if the project has been previously deployed to the instance.* + +4) In the appropriate instance row, click the **deploy** button. + + *Note, deploying a project will restart the Custom Functions server on the HarperDB instance receiving the deployment and may result in up to 60 seconds of downtime for all Custom Functions.* diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-instance-roles.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-instance-roles.md new file mode 100644 index 00000000..e301e7d8 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-instance-roles.md @@ -0,0 +1,76 @@ +--- +title: Manage Instance Roles +--- + +# Manage Instance Roles + +HarperDB users can be managed directly through the HarperDB Studio. It is recommended to read through the users & roles documentation to gain a strong understanding of how they operate. + + + +Instance role configuration is handled through the roles page of the HarperDB Studio, accessed with the following instructions: + +1) Navigate to the HarperDB Studio Organizations page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **rules** in the instance control bar. + +*Note, the **roles** page will only be available to super users.* + + + +The *roles management* screen consists of the following panels: + +* **super users** + + Displays all super user roles for this instance. +* **cluster users** + + Displays all cluster user roles for this instance. +* **standard roles** + + Displays all standard roles for this instance. +* **role permission editing** + + Once a role is selected for editing, permissions will be displayed here in JSON format. + +*Note, when new tables are added that are not configured, the Studio will generate configuration values with permissions defaulting to `false`.* + +## Role Management + +#### Create a Role + +1) Click the plus icon at the top right of the appropriate role section. + +2) Enter the role name. + +3) Click the green check mark. + +4) Configure the role permissions in the role permission editing panel. + + *Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel.* + +5) Click **Update Role Permissions**. + +#### Modify a Role + +1) Click the appropriate role from the appropriate role section. + +2) Modify the role permissions in the role permission editing panel. + + *Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel.* + +3) Click **Update Role Permissions**. + +#### Delete a Role + +Deleting a role is permanent and irreversible. A role cannot be remove if users are associated with it. + +1) Click the minus icon at the top right of the schemas section. + +2) Identify the appropriate role to delete and click the red minus sign in the same row. + +3) Click the red check mark to confirm deletion. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-instance-users.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-instance-users.md new file mode 100644 index 00000000..4871cf88 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-instance-users.md @@ -0,0 +1,63 @@ +--- +title: Manage Instance Users +--- + +# Manage Instance Users + +HarperDB instance clustering and replication can be configured directly through the HarperDB Studio. It is recommended to read through the clustering documentation first to gain a strong understanding of HarperDB clustering behavior. + + + +Instance user configuration is handled through the **users** page of the HarperDB Studio, accessed with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **users** in the instance control bar. + +*Note, the **users** page will only be available to super users.* + +## Add a User + +HarperDB instance users can be added with the following instructions. + +1) In the **add user** panel on the left enter: + + * New user username. + + * New user password. + + * Select a role. + + *Learn more about role management here: [Manage Instance Roles](./manage-instance-roles).* + +2) Click **Add User**. + +## Edit a User + +HarperDB instance users can be modified with the following instructions. + +1) In the **existing users** panel, click the row of the user you would like to edit. + +2) To change a user’s password: + + 1) In the **Change user password** section, enter the new password. + + 2) Click **Update Password**. + +3) To change a user’s role: + + 1) In the **Change user role** section, select the new role. + + 2) Click **Update Role**. + +4) To delete a user: + + 1) In the **Delete User** section, type the username into the textbox. + + *This is done for confirmation purposes.* + + 2) Click **Delete User**. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-schemas-browse-data.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-schemas-browse-data.md new file mode 100644 index 00000000..41493b96 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/manage-schemas-browse-data.md @@ -0,0 +1,132 @@ +--- +title: Manage Schemas / Browse Data +--- + +# Manage Schemas / Browse Data + +Manage instance schemas/tables and browse data in tabular format with the following instructions: + +1) Navigate to the HarperDB Studio Organizations page. +2) Click the appropriate organization that the instance belongs to. +3) Select your desired instance. +4) Click **browse** in the instance control bar. + +Once on the instance browse page you can view data, manage schemas and tables, add new data, and more. + +## Manage Schemas and Tables + +#### Create a Schema + +1) Click the plus icon at the top right of the schemas section. +2) Enter the schema name. +3) Click the green check mark. + + +#### Delete a Schema + +Deleting a schema is permanent and irreversible. Deleting a schema removes all tables and data within it. + +1) Click the minus icon at the top right of the schemas section. +2) Identify the appropriate schema to delete and click the red minus sign in the same row. +3) Click the red check mark to confirm deletion. + + +#### Create a Table + +1) Select the desired schema from the schemas section. +2) Click the plus icon at the top right of the tables section. +3) Enter the table name. +4) Enter the primary key. + + *The primary key is also often referred to as the hash attribute in the studio, and it defines the unique identifier for each row in your table.* +5) Click the green check mark. + + +#### Delete a Table +Deleting a table is permanent and irreversible. Deleting a table removes all data within it. + +1) Select the desired schema from the schemas section. +2) Click the minus icon at the top right of the tables section. +3) Identify the appropriate table to delete and click the red minus sign in the same row. +4) Click the red check mark to confirm deletion. + +## Manage Table Data + +The following section assumes you have selected the appropriate table from the schema/table browser. + + + +#### Filter Table Data + +1) Click the magnifying glass icon at the top right of the table browser. +2) This expands the search filters. +3) The results will be filtered appropriately. + + +#### Load CSV Data + +1) Click the data icon at the top right of the table browser. You will be directed to the CSV upload page where you can choose to import a CSV by URL or upload a CSV file. +2) To import a CSV by URL: + 1) Enter the URL in the **CSV file URL** textbox. + 2) Click **Import From URL**. + 3) The CSV will load, and you will be redirected back to browse table data. +3) To upload a CSV file: + 1) Click **Click or Drag to select a .csv file** (or drag your CSV file from your file browser). + 2) Navigate to your desired CSV file and select it. + 3) Click **Insert X Records**, where X is the number of records in your CSV. + 4) The CSV will load, and you will be redirected back to browse table data. + + +#### Add a Record + +1) Click the plus icon at the top right of the table browser. +2) The Studio will pre-populate existing table attributes in JSON format. + + *The primary key is not included, but you can add it in and set it to your desired value. Auto-maintained fields are not included and cannot be manually set. You may enter a JSON array to insert multiple records in a single transaction.* +3) Enter values to be added to the record. + + *You may add new attributes to the JSON; they will be reflexively added to the table.* +4) Click the **Add New** button. + + +#### Edit a Record + +1) Click the record/row you would like to edit. +2) Modify the desired values. + + *You may add new attributes to the JSON; they will be reflexively added to the table.* + +3) Click the **save icon**. + + +#### Delete a Record + +Deleting a record is permanent and irreversible. If transaction logging is turned on, the delete transaction will be recorded as well as the data that was deleted. + +1) Click the record/row you would like to delete. +2) Click the **delete icon**. +3) Confirm deletion by clicking the **check icon**. + +## Browse Table Data + +The following section assumes you have selected the appropriate table from the schema/table browser. + +#### Browse Table Data + +The first page of table data is automatically loaded on table selection. Paging controls are at the bottom of the table. Here you can: + +* Page left and right using the arrows. +* Type in the desired page. +* Change the page size (the amount of records displayed in the table). + + +#### Refresh Table Data + +Click the refresh icon at the top right of the table browser. + + + +#### Automatically Refresh Table Data + +Toggle the auto switch at the top right of the table browser. The table data will now automatically refresh every 15 seconds. Filters and pages will remain set for refreshed data. + diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/organizations.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/organizations.md new file mode 100644 index 00000000..f9d5cb50 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/organizations.md @@ -0,0 +1,105 @@ +--- +title: Organizations +--- + +# Organizations +HarperDB Studio organizations provide the ability to group HarperDB Cloud Instances. Organization behavior is as follows: + +* Billing occurs at the organization level to a single credit card. +* Organizations retain their own unique HarperDB Cloud subdomain. +* Cloud instances reside within an organization. +* Studio users can be invited to organizations to share instances. + + +An organization is automatically created for you when you sign up for HarperDB Studio. If you only have one organization, the Studio will automatically bring you to your organization’s page. + +--- + +## List Organizations +A summary view of all organizations your user belongs to can be viewed on the [HarperDB Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. You can navigate to this page at any time by clicking the **all organizations** link at the top of the HarperDB Studio. + +## Create a New Organization +A new organization can be created as follows: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the **Create a New Organization** card. +3) Fill out new organization details + * Enter Organization Name + *This is used for descriptive purposes only.* + * Enter Organization Subdomain + *Part of the URL that will be used to identify your HarperDB Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com.* +4) Click Create Organization. + +## Delete an Organization +An organization cannot be deleted until all instances have been removed. An organization can be deleted as follows: + +1) Navigate to the HarperDB Studio Organizations page. +2) Identify the proper organization card and click the trash can icon. +3) Enter the organization name into the text box. + + *This is done for confirmation purposes to ensure you do not accidentally delete an organization.* +4) Click the **Do It** button. + +## Manage Users +HarperDB Studio organization owners can manage users including inviting new users, removing users, and toggling ownership. + + + +#### Inviting a User +A new user can be invited to an organization as follows: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the appropriate organization card. +3) Click **users** at the top of the screen. +4) In the **add user** box, enter the new user’s email address. +5) Click **Add User**. + +Users may or may not already be HarperDB Studio users when adding them to an organization. If the HarperDB Studio account already exists, the user will receive an email notification alerting them to the organization invitation. If the user does not have a HarperDB Studio account, they will receive an email welcoming them to HarperDB Studio. + +--- + +#### Toggle a User’s Organization Owner Status +Organization owners have full access to the organization including the ability to manage organization users, create, modify, and delete instances, and delete the organization. Users must have accepted their invitation prior to being promoted to an owner. A user’s organization owner status can be toggled owner as follows: + +1) Navigate to the HarperDB Studio Organizations page. +2) Click the appropriate organization card. +3) Click **users** at the top of the screen. +4) Click the appropriate user from the **existing users** section. +5) Toggle the **Is Owner** switch to the desired status. +--- + +#### Remove a User from an Organization +Users may be removed from an organization at any time. Removing a user from an organization will not delete their HarperDB Studio account, it will only remove their access to the specified organization. A user can be removed from an organization as follows: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the appropriate organization card. +3) Click **users** at the top of the screen. +4) Click the appropriate user from the **existing users** section. +5) Type **DELETE** in the text box in the **Delete User** row. + + *This is done for confirmation purposes to ensure you do not accidentally delete a user.* +6) Click **Delete User**. + +## Manage Billing + +Billing is configured per organization and will be billed to the stored credit card at appropriate intervals (monthly or annually depending on the registered instance). Billing settings can be configured as follows: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the appropriate organization card. +3) Click **billing** at the top of the screen. + +Here organization owners can view invoices, manage coupons, and manage the associated credit card. + + + +*HarperDB billing and payments are managed via Stripe.* + + + +### Add a Coupon + +Coupons are applicable towards any paid tier or user-installed instance and you can change your subscription at any time. Coupons can be added to your Organization as follows: + +1) In the coupons panel of the **billing** page, enter your coupon code. +2) Click **Add Coupon**. +3) The coupon will then be available and displayed in the coupons panel. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/administration/harperdb-studio/query-instance-data.md b/site/versioned_docs/version-4.2/administration/harperdb-studio/query-instance-data.md new file mode 100644 index 00000000..5c3ae28f --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/harperdb-studio/query-instance-data.md @@ -0,0 +1,53 @@ +--- +title: Query Instance Data +--- + +# Query Instance Data + +SQL queries can be executed directly through the HarperDB Studio with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +2) Click the appropriate organization that the instance belongs to. +3) Select your desired instance. +4) Click **query** in the instance control bar. +5) Enter your SQL query in the SQL query window. +6) Click **Execute**. + +*Please note, the Studio will execute the query exactly as entered. For example, if you attempt to `SELECT *` from a table with millions of rows, you will most likely crash your browser.* + +## Browse Query Results Set + +#### Browse Results Set Data + +The first page of results set data is automatically loaded on query execution. Paging controls are at the bottom of the table. Here you can: + +* Page left and right using the arrows. +* Type in the desired page. +* Change the page size (the amount of records displayed in the table). + +#### Refresh Results Set + +Click the refresh icon at the top right of the results set table. + +#### Automatically Refresh Results Set + +Toggle the auto switch at the top right of the results set table. The results set will now automatically refresh every 15 seconds. Filters and pages will remain set for refreshed data. + +## Query History + +Query history is stored in your local browser cache. Executed queries are listed with the most recent at the top in the **query history** section. + + +#### Rerun Previous Query + +* Identify the query from the **query history** list. +* Click the appropriate query. It will be loaded into the **sql query** input box. +* Click **Execute**. + +#### Clear Query History + +Click the trash can icon at the top right of the **query history** section. + +## Create Charts + +The HarperDB Studio includes a charting feature where you can build charts based on your specified queries. Visit the Charts documentation for more information. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/administration/jobs.md b/site/versioned_docs/version-4.2/administration/jobs.md new file mode 100644 index 00000000..e7eccad2 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/jobs.md @@ -0,0 +1,112 @@ +--- +title: Jobs +--- + +# Jobs + +HarperDB Jobs are asynchronous tasks performed by the Operations API. + +## Job Summary + +Jobs uses an asynchronous methodology to account for the potential of a long-running operation. For example, exporting millions of records to S3 could take some time, so that job is started and the id is provided to check on the status. + +The job status can be **COMPLETE** or **IN\_PROGRESS**. + +## Example Job Operations + +Example job operations include: + +[csv data load](https:/api.harperdb.io/#0186bc25-b9ae-44e7-bd9e-8edc0f289aa2) + +[csv file load](https:/api.harperdb.io/#c4b71011-8a1d-4cb2-8678-31c0363fea5e) + +[csv url load](https:/api.harperdb.io/#d1e9f433-e250-49db-b44d-9ce2dcd92d32) + +[import from s3](https:/api.harperdb.io/#820b3947-acbe-41f9-858b-2413cabc3a18) + +[delete\_records\_before](https:/api.harperdb.io/#8de87e47-73a8-4298-b858-ca75dc5765c2) + +[export\_local](https:/api.harperdb.io/#49a02517-ada9-4198-b48d-8707db905be0) + +[export\_to\_s3](https:/api.harperdb.io/#f6393e9f-e272-4180-a42c-ff029d93ddd4) + +Example Response from a Job Operation + +``` +{ + "message": "Starting job with id 062a1892-6a0a-4282-9791-0f4c93b12e16" +} +``` + +Whenever one of these operations is initiated, an asynchronous job is created and the request contains the ID of that job which can be used to check on its status. + +## Managing Jobs + +To check on a job's status, use the [get\_job](https:/api.harperdb.io/#d501bef7-dbb7-4714-b535-e466f6583dce) operation. + +Get Job Request + +``` +{ + "operation": "get_job", + "id": "4a982782-929a-4507-8794-26dae1132def" +} +``` + +Get Job Response + +``` +[ + { + "__createdtime__": 1611615798782, + "__updatedtime__": 1611615801207, + "created_datetime": 1611615798774, + "end_datetime": 1611615801206, + "id": "4a982782-929a-4507-8794-26dae1132def", + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "start_datetime": 1611615798805, + "status": "COMPLETE", + "type": "csv_url_load", + "user": "HDB_ADMIN", + "start_datetime_converted": "2021-01-25T23:03:18.805Z", + "end_datetime_converted": "2021-01-25T23:03:21.206Z" + } +] +``` + +## Finding Jobs + +To find jobs (if the ID is not known) use the [search\_jobs\_by\_start\_date](https:/api.harperdb.io/#4474ca16-e4c2-4740-81b5-14ed98c5eeab) operation. + +Search Jobs Request + +``` +{ + "operation": "search_jobs_by_start_date", + "from_date": "2021-01-25T22:05:27.464+0000", + "to_date": "2021-01-25T23:05:27.464+0000" +} +``` + +Search Jobs Response + +``` +[ + { + "id": "942dd5cb-2368-48a5-8a10-8770ff7eb1f1", + "user": "HDB_ADMIN", + "type": "csv_url_load", + "status": "COMPLETE", + "start_datetime": 1611613284781, + "end_datetime": 1611613287204, + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "created_datetime": 1611613284764, + "__createdtime__": 1611613284767, + "__updatedtime__": 1611613287207, + "start_datetime_converted": "2021-01-25T22:21:24.781Z", + "end_datetime_converted": "2021-01-25T22:21:27.204Z" + } +] +``` diff --git a/site/versioned_docs/version-4.2/administration/logging/audit-logging.md b/site/versioned_docs/version-4.2/administration/logging/audit-logging.md new file mode 100644 index 00000000..5871586b --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/logging/audit-logging.md @@ -0,0 +1,135 @@ +--- +title: Audit Logging +--- + +# Audit Logging + +### Audit log + +The audit log uses a standard HarperDB table to track transactions. For each table a user creates, a corresponding table will be created to track transactions against that table. + +Audit log is enabled by default. To diable the audit log, set `logging.auditLog` to false in the config file, `harperdb-config.yaml`. Then restart HarperDB for those changes to take place. Note, the audit is required to be enabled for real-time messaging. + +### Audit Log Operations + +#### read\_audit\_log + +The `read_audit_log` operation is flexible, enabling users to query with many parameters. All operations search on a single table. Filter options include timestamps, usernames, and table hash values. Additional examples found in the [HarperDB API documentation](../../developers/operations-api/logs). + +**Search by Timestamp** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "timestamp", + "search_values": [ + 1660585740558 + ] +} +``` + +There are three outcomes using timestamp. + +* `"search_values": []` - All records returned for specified table +* `"search_values": [1660585740558]` - All records after provided timestamp +* `"search_values": [1660585740558, 1760585759710]` - Records "from" and "to" provided timestamp + +*** + +**Search by Username** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "username", + "search_values": [ + "admin" + ] +} +``` + +The above example will return all records whose `username` is "admin." + +*** + +**Search by Primary Key** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "hash_value", + "search_values": [ + 318 + ] +} +``` + +The above example will return all records whose primary key (`hash_value`) is 318. + +*** + +#### read\_audit\_log Response + +The example that follows provides records of operations performed on a table. One thing of note is that the `read_audit_log` operation gives you the `original_records`. + +```json +{ + "operation": "update", + "user_name": "HDB_ADMIN", + "timestamp": 1607035559122.277, + "hash_values": [ + 1, + 2 + ], + "records": [ + { + "id": 1, + "breed": "Muttzilla", + "age": 6, + "__updatedtime__": 1607035559122 + }, + { + "id": 2, + "age": 7, + "__updatedtime__": 1607035559121 + } + ], + "original_records": [ + { + "__createdtime__": 1607035556801, + "__updatedtime__": 1607035556801, + "age": 5, + "breed": "Mutt", + "id": 2, + "name": "Penny" + }, + { + "__createdtime__": 1607035556801, + "__updatedtime__": 1607035556801, + "age": 5, + "breed": "Mutt", + "id": 1, + "name": "Harper" + } + ] +} +``` + +#### delete\_audit\_logs\_before + +Just like with transaction logs, you can clean up your audit logs with the `delete_audit_logs_before` operation. It will delete audit log data according to the given parameters. The example below will delete records older than the timestamp provided. + +```json +{ + "operation": "delete_audit_logs_before", + "schema": "dev", + "table": "cat", + "timestamp": 1598290282817 +} +``` diff --git a/site/versioned_docs/version-4.2/administration/logging/index.md b/site/versioned_docs/version-4.2/administration/logging/index.md new file mode 100644 index 00000000..2ed92774 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/logging/index.md @@ -0,0 +1,11 @@ +--- +title: Logging +--- + +# Logging + +HarperDB provides many different logging options for various features and functionality. + +* [Standard Logging](./standard-logging): HarperDB maintains a log of events that take place throughout operation. +* [Audit Logging](./audit-logging): HarperDB uses a standard HarperDB table to track transactions. For each table a user creates, a corresponding table will be created to track transactions against that table. +* [Transaction Logging](./transaction-logging): HarperDB stores a verbose history of all transactions logged for specified database tables, including original data records. diff --git a/site/versioned_docs/version-4.2/administration/logging/standard-logging.md b/site/versioned_docs/version-4.2/administration/logging/standard-logging.md new file mode 100644 index 00000000..d586da1c --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/logging/standard-logging.md @@ -0,0 +1,65 @@ +--- +title: Standard Logging +--- + +# Standard Logging + +HarperDB maintains a log of events that take place throughout operation. Log messages can be used for diagnostics purposes as well as monitoring. + +All logs (except for the install log) are stored in the main log file in the hdb directory `/log/hdb.log`. The install log is located in the HarperDB application directory most likely located in your npm directory `npm/harperdb/logs`. + +Each log message has several key components for consistent reporting of events. A log message has a format of: + +``` + [] [] ...[]: +``` + +For example, a typical log entry looks like: + +``` +2023-03-09T14:25:05.269Z [notify] [main/0]: HarperDB successfully started. +``` + +The components of a log entry are: + +* timestamp - This is the date/time stamp when the event occurred +* level - This is an associated log level that gives a rough guide to the importance and urgency of the message. The available log levels in order of least urgent (and more verbose) are: `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. +* thread/ID - This reports the name of the thread and the thread ID that the event was reported on. Note that NATS logs are recorded by their process name and there is no thread id for them since they are a separate process. Key threads are: + * main - This is the thread that is responsible for managing all other threads and routes incoming requests to the other threads + * http - These are the worker threads that handle the primary workload of incoming HTTP requests to the operations API and custom functions. + * Clustering\* - These are threads and processes that handle replication. + * job - These are job threads that have been started to handle operations that are executed in a separate job thread. +* tags - Logging from a custom function will include a "custom-function" tag in the log entry. Most logs will not have any additional tags. +* message - This is the main message that was reported. + +We try to keep logging to a minimum by default, to do this the default log level is `error`. If you require more information from the logs, increasing the log level down will provide that. + +The log level can be changed by modifying `logging.level` in the config file `harperdb-config.yaml`. + +## Clustering Logging + +HarperDB clustering utilizes two [Nats](https:/nats.io/) servers, named Hub and Leaf. The Hub server is responsible for establishing the mesh network that connects instances of HarperDB and the Leaf server is responsible for managing the message stores (streams) that replicate and store messages between instances. Due to the verbosity of these servers there is a separate log level configuration for them. To adjust their log verbosity, set `clustering.logLevel` in the config file `harperdb-config.yaml`. Valid log levels from least verbose are `error`, `warn`, `info`, `debug` and `trace`. + +## Log File vs Standard Streams + +HarperDB logs can optionally be streamed to standard streams. Logging to standard streams (stdout/stderr) is primarily used for container logging drivers. For more traditional installations, we recommend logging to a file. Logging to both standard streams and to a file can be enabled simultaneously. To log to standard streams effectively, make sure to directly run `harperdb` and don't start it as a separate process (don't use `harperdb start`) and `logging.stdStreams` must be set to true. Note, logging to standard streams only will disable clustering catchup. + +## Logging Rotation + +Log rotation allows for managing log files, such as compressing rotated log files, archiving old log files, determining when to rotate, and the like. This will allow for organized storage and efficient use of disk space. For more information see “logging” in our [config docs](../../deployments/configuration). + +## Read Logs via the API + +To access specific logs you may query the HarperDB API. Logs can be queried using the `read_log` operation. `read_log` returns outputs from the log based on the provided search criteria. + +```json +{ + "operation": "read_log", + "start": 0, + "limit": 1000, + "level": "error", + "from": "2021-01-25T22:05:27.464+0000", + "until": "2021-01-25T23:05:27.464+0000", + "order": "desc" +} +``` diff --git a/site/versioned_docs/version-4.2/administration/logging/transaction-logging.md b/site/versioned_docs/version-4.2/administration/logging/transaction-logging.md new file mode 100644 index 00000000..a65c4714 --- /dev/null +++ b/site/versioned_docs/version-4.2/administration/logging/transaction-logging.md @@ -0,0 +1,87 @@ +--- +title: Transaction Logging +--- + +# Transaction Logging + +HarperDB offers two options for logging transactions executed against a table. The options are similar but utilize different storage layers. + +## Transaction log + +The first option is `read_transaction_log`. The transaction log is built upon clustering streams. Clustering streams are per-table message stores that enable data to be propagated across a cluster. HarperDB leverages streams for use with the transaction log. When clustering is enabled all transactions that occur against a table are pushed to its stream, and thus make up the transaction log. + +If you would like to use the transaction log, but have not set up clustering yet, please see ["How to Cluster"](../../developers/clustering/). + +## Transaction Log Operations + +### read\_transaction\_log + +The `read_transaction_log` operation returns a prescribed set of records, based on given parameters. The example below will give a maximum of 2 records within the timestamps provided. + +```json +{ + "operation": "read_transaction_log", + "schema": "dev", + "table": "dog", + "from": 1598290235769, + "to": 1660249020865, + "limit": 2 +} +``` + +_See example response below._ + +### read\_transaction\_log Response + +```json +[ + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619736, + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38, + "__updatedtime__": 1660165619688, + "__createdtime__": 1660165619688 + } + ] + }, + { + "operation": "update", + "user": "admin", + "timestamp": 1660165620040, + "records": [ + { + "id": 1, + "dog_name": "Penny B", + "__updatedtime__": 1660165620036 + } + ] + } +] +``` + +_See example request above._ + +### delete\_transaction\_logs\_before + +The `delete_transaction_logs_before` operation will delete transaction log data according to the given parameters. The example below will delete records older than the timestamp provided. + +```json +{ + "operation": "delete_transaction_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1598290282817 +} +``` + +_Note: Streams are used for catchup if a node goes down. If you delete messages from a stream there is a chance catchup won't work._ + +Read on for `read_audit_log`, the second option, for logging transactions executed against a table. diff --git a/site/versioned_docs/version-4.2/deployments/_category_.json b/site/versioned_docs/version-4.2/deployments/_category_.json new file mode 100644 index 00000000..8fdd6e17 --- /dev/null +++ b/site/versioned_docs/version-4.2/deployments/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Deployments", + "position": 3, + "link": { + "type": "generated-index", + "title": "Deployments Documentation", + "description": "Installation and deployment guides for HarperDB", + "keywords": [ + "deployments" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/deployments/configuration.md b/site/versioned_docs/version-4.2/deployments/configuration.md new file mode 100644 index 00000000..c10ba2a8 --- /dev/null +++ b/site/versioned_docs/version-4.2/deployments/configuration.md @@ -0,0 +1,746 @@ +--- +title: Configuration File +--- + +# Configuration File + +HarperDB is configured through a [YAML](https:/yaml.org/) file called `harperdb-config.yaml` located in the operations API root directory (by default this is a directory named `hdb` located in the home directory of the current user). + +All available configuration will be populated by default in the config file on install, regardless of whether it is used. + +*** + +## Using the Configuration File and Naming Conventions + +The configuration elements in `harperdb-config.yaml` use camelcase: `operationsApi`. + +To change a configuration value edit the `harperdb-config.yaml` file and save any changes. HarperDB must be restarted for changes to take effect. + +Alternately, configuration can be changed via environment and/or command line variables or via the API. To access lower level elements, use underscores to append parent/child elements (when used this way elements are case insensitive): + +``` +- Environment variables: `OPERATIONSAPI_NETWORK_PORT=9925` +- Command line variables: `--OPERATIONSAPI_NETWORK_PORT 9925` +- Calling `set_configuration` through the API: `operationsApi_network_port: 9925` +``` + +\_Note: Component configuration cannot be added or updated via CLI or ENV variables. + +## Importing installation configuration + +To use a custom configuration file to set values on install, use the CLI/ENV variable `HDB_CONFIG` and set it to the path of your custom configuration file. + +To install HarperDB overtop of an existing configuration file, set `HDB_CONFIG` to the root path of your install `/harperdb-config.yaml` + +*** + +## Configuration Options + +### `http` + +`sessionAffinity` - _Type_: string; _Default_: null + +HarperDB is a multi-threaded server designed to scale to utilize many CPU cores with high concurrency. Session affinity can help improve the efficiency and fairness of thread utilization by routing multiple requests from the same client to the same thread. This provides a fairer method of request handling by keeping a single user contained to a single thread, can improve caching locality (multiple requests from a single user are more likely to access the same data), and can provide the ability to share information in-memory in user sessions. Enabling session affinity will cause subsequent requests from the same client to be routed to the same thread. + +To enable `sessionAffinity`, you need to specify how clients will be identified from the incoming requests. If you are using HarperDB to directly serve HTTP requests from users from different remote addresses, you can use a setting of `ip`. However, if you are using HarperDB behind a proxy server or application server, all the remote ip addresses will be the same and HarperDB will effectively only run on a single thread. Alternately, you can specify a header to use for identification. If you are using basic authentication, you could use the "Authorization" header to route requests to threads by the user's credentials. If you have another header that uniquely identifies users/clients, you can use that as the value of sessionAffinity. But be careful to ensure that the value does provide sufficient uniqueness and that requests are effectively distributed to all the threads and fully utilizing all your CPU cores. + +```yaml +http: + sessionAffinity: ip +``` + +`compressionThreshold` - _Type_: number; _Default_: 1200 (bytes) + +For HTTP clients that support (Brotli) compression encoding, responses that are larger than than this threshold will be compressed (also note that for clients that accept compression, any streaming responses from queries are compressed as well, since the size is not known beforehand). + +```yaml +http: + compressionThreshold: 1200 +``` + +`cors` - _Type_: boolean; _Default_: true + +Enable Cross Origin Resource Sharing, which allows requests across a domain. + +`corsAccessList` - _Type_: array; _Default_: null + +An array of allowable domains with CORS + +`headersTimeout` - _Type_: integer; _Default_: 60,000 milliseconds (1 minute) + +Limit the amount of time the parser will wait to receive the complete HTTP headers with. + +`keepAliveTimeout` - _Type_: integer; _Default_: 30,000 milliseconds (30 seconds) + +Sets the number of milliseconds of inactivity the server needs to wait for additional incoming data after it has finished processing the last response. + +`port` - _Type_: integer; _Default_: 9926 + +The port used to access the component server. + +`securePort` - _Type_: integer; _Default_: null + +The port the HarperDB component server uses for HTTPS connections. This requires a valid certificate and key. + +`timeout` - _Type_: integer; _Default_: Defaults to 120,000 milliseconds (2 minutes) + +The length of time in milliseconds after which a request will timeout. + +```yaml +http: + cors: true + corsAccessList: + - null + headersTimeout: 60000 + https: false + keepAliveTimeout: 30000 + port: 9926 + securePort: null + timeout: 120000 +``` + +*** + +### `threads` + +`threads` - _Type_: number; _Default_: One less than the number of logical cores/ processors + +The `threads` option specifies the number of threads that will be used to service the HTTP requests for the operations API and custom functions. Generally, this should be close to the number of CPU logical cores/processors to ensure the CPU is fully utilized (a little less because HarperDB does have other threads at work), assuming HarperDB is the main service on a server. + +```yaml +threads: 11 +``` + +*** + +### `clustering` + +The `clustering` section configures the clustering engine, this is used to replicate data between instances of HarperDB. + +Clustering offers a lot of different configurations, however in a majority of cases the only options you will need to pay attention to are: + +* `clustering.enabled` Enable the clustering processes. +* `clustering.hubServer.cluster.network.port` The port other nodes will connect to. This port must be accessible from other cluster nodes. +* `clustering.hubServer.cluster.network.routes`The connections to other instances. +* `clustering.nodeName` The name of your node, must be unique within the cluster. +* `clustering.user` The name of the user credentials used for Inter-node authentication. + +`enabled` - _Type_: boolean; _Default_: false + +Enable clustering. + +_Note: If you enabled clustering but do not create and add a cluster user you will get a validation error. See `user` description below on how to add a cluster user._ + +```yaml +clustering: + enabled: true +``` + +`clustering.hubServer.cluster` + +Clustering’s `hubServer` facilitates the HarperDB mesh network and discovery service. + +```yaml +clustering: + hubServer: + cluster: + name: harperdb + network: + port: 9932 + routes: + - host: 3.62.184.22 + port: 9932 + - host: 3.735.184.8 + port: 9932 +``` + +`name` - _Type_: string, _Default_: harperdb + +The name of your cluster. This name needs to be consistent for all other nodes intended to be meshed in the same network. + +`port` - _Type_: integer, _Default_: 9932 + +The port the hub server uses to accept cluster connections + +`routes` - _Type_: array, _Default_: null + +An object array that represent the host and port this server will cluster to. Each object must have two properties `port` and `host`. Multiple entries can be added to create network resiliency in the event one server is unavailable. Routes can be added, updated and removed either by directly editing the `harperdb-config.yaml` file or by using the `cluster_set_routes` or `cluster_delete_routes` API endpoints. + +`host` - _Type_: string + +The host of the remote instance you are creating the connection with. + +`port` - _Type_: integer + +The port of the remote instance you are creating the connection with. This is likely going to be the `clustering.hubServer.cluster.network.port` on the remote instance. + +`clustering.hubServer.leafNodes` + +```yaml +clustering: + hubServer: + leafNodes: + network: + port: 9931 +``` + +`port` - _Type_: integer; _Default_: 9931 + +The port the hub server uses to accept leaf server connections. + +`clustering.hubServer.network` + +```yaml +clustering: + hubServer: + network: + port: 9930 +``` + +`port` - _Type_: integer; _Default_: 9930 + +Use this port to connect a client to the hub server, for example using the NATs SDK to interact with the server. + +`clustering.leafServer` + +Manages streams, streams are ‘message stores’ that store table transactions. + +```yaml +clustering: + leafServer: + network: + port: 9940 + routes: + - host: 3.62.184.22 + port: 9931 + - host: node3.example.com + port: 9931 + streams: + maxAge: 3600 + maxBytes: 10000000 + maxMsgs: 500 + path: /user/hdb/clustering/leaf +``` + +`port` - _Type_: integer; _Default_: 9940 + +Use this port to connect a client to the leaf server, for example using the NATs SDK to interact with the server. + +`routes` - _Type_: array; _Default_: null + +An object array that represent the host and port the leaf node will directly connect with. Each object must have two properties `port` and `host`. Unlike the hub server, the leaf server will establish connections to all listed hosts. Routes can be added, updated and removed either by directly editing the `harperdb-config.yaml` file or by using the `cluster_set_routes` or `cluster_delete_routes` API endpoints. + +`host` - _Type_: string + +The host of the remote instance you are creating the connection with. + +`port` - _Type_: integer + +The port of the remote instance you are creating the connection with. This is likely going to be the `clustering.hubServer.cluster.network.port` on the remote instance. + +\ + + +`clustering.leafServer.streams` + +`maxAge` - _Type_: integer; _Default_: null + +The maximum age of any messages in the stream, expressed in seconds. + +`maxBytes` - _Type_: integer; _Default_: null + +The maximum size of the stream in bytes. Oldest messages are removed if the stream exceeds this size. + +`maxMsgs` - _Type_: integer; _Default_: null + +How many messages may be in a stream. Oldest messages are removed if the stream exceeds this number. + +`path` - _Type_: string; _Default_: \/clustering/leaf + +The directory where all the streams are kept. + +*** + +`logLevel` - _Type_: string; _Default_: error + +Control the verbosity of clustering logs. + +```yaml +clustering: + logLevel: error +``` + +There exists a log level hierarchy in order as `trace`, `debug`, `info`, `warn`, and `error`. When the level is set to `trace` logs will be created for all possible levels. Whereas if the level is set to `warn`, the only entries logged will be `warn` and `error`. The default value is `error`. + +`nodeName` - _Type_: string; _Default_: null + +The name of this node in your HarperDB cluster topology. This must be a value unique from the rest of the cluster node names. + +_Note: If you want to change the node name make sure there are no subscriptions in place before doing so. After the name has been changed a full restart is required._ + +```yaml +clustering: + nodeName: great_node +``` + +`tls` + +Transport Layer Security default values are automatically generated on install. + +```yaml +clustering: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem + insecure: true + verify: true +``` + +`certificate` - _Type_: string; _Default_: \/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: \/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: \/keys/privateKey.pem + +Path to the private key file. + +`insecure` - _Type_: boolean; _Default_: true + +When true, will skip certificate verification. For use only with self-signed certs. + +`republishMessages` - _Type_: boolean; _Default_: false + +When true, all transactions that are received from other nodes are republished to this node's stream. When subscriptions are not fully connected between all nodes, this ensures that messages are routed to all nodes through intermediate nodes. This also ensures that all writes, whether local or remote, are written to the NATS transaction log. However, there is additional overhead with republishing, and setting this is to false can provide better data replication performance. When false, you need to ensure all subscriptions are fully connected between every node to every other node, and be aware that the NATS transaction log will only consist of local writes. + +`verify` - _Type_: boolean; _Default_: true + +When true, hub server will verify client certificate using the CA certificate. + +*** + +`user` - _Type_: string; _Default_: null + +The username given to the `cluster_user`. All instances in a cluster must use the same clustering user credentials (matching username and password). + +Inter-node authentication takes place via a special HarperDB user role type called `cluster_user`. + +The user can be created either through the API using an `add_user` request with the role set to `cluster_user`, or on install using environment variables `CLUSTERING_USER=cluster_person` `CLUSTERING_PASSWORD=pass123!` or CLI variables `harperdb --CLUSTERING_USER cluster_person` `--CLUSTERING_PASSWORD` `pass123!` + +```yaml +clustering: + user: cluster_person +``` + +*** + +### `localStudio` + +The `localStudio` section configures the local HarperDB Studio, a simplified GUI for HarperDB hosted on the server. A more comprehensive GUI is hosted by HarperDB at https:/studio.harperdb.io. Note, all database traffic from either `localStudio` or HarperDB Studio is made directly from your browser to the instance. + +`enabled` - _Type_: boolean; _Default_: false + +Enabled the local studio or not. + +```yaml +localStudio: + enabled: false +``` + +*** + +### `logging` + +The `logging` section configures HarperDB logging across all HarperDB functionality. This includes standard text logging of application and database events as well as structured data logs of record changes. Logging of application/database events are logged in text format to the `~/hdb/log/hdb.log` file (or location specified by `logging.root`). + +In addition, structured logging of data changes are also available: + +`auditLog` - _Type_: boolean; _Default_: false + +Enabled table transaction logging. + +```yaml +logging: + auditLog: false +``` + +To access the audit logs, use the API operation `read_audit_log`. It will provide a history of the data, including original records and changes made, in a specified table. + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog" +} +``` + +`file` - _Type_: boolean; _Default_: true + +Defines whether or not to log to a file. + +```yaml +logging: + file: true +``` + +`auditRetention` - _Type_: string|number; _Default_: 3d + +This specifies how long audit logs should be retained. + +`level` - _Type_: string; _Default_: error + +Control the verbosity of text event logs. + +```yaml +logging: + level: error +``` + +There exists a log level hierarchy in order as `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. When the level is set to `trace` logs will be created for all possible levels. Whereas if the level is set to `fatal`, the only entries logged will be `fatal` and `notify`. The default value is `error`. + +`root` - _Type_: string; _Default_: \/log + +The path where the log files will be written. + +```yaml +logging: + root: ~/hdb/log +``` + +`rotation` + +Rotation provides the ability for a user to systematically rotate and archive the `hdb.log` file. To enable `interval` and/or `maxSize` must be set. + +_**Note:**_ `interval` and `maxSize` are approximates only. It is possible that the log file will exceed these values slightly before it is rotated. + +```yaml +logging: + rotation: + enabled: true + compress: false + interval: 1D + maxSize: 100K + path: /user/hdb/log +``` + +`enabled` - _Type_: boolean; _Default_: false + +Enables logging rotation. + +`compress` - _Type_: boolean; _Default_: false + +Enables compression via gzip when logs are rotated. + +`interval` - _Type_: string; _Default_: null + +The time that should elapse between rotations. Acceptable units are D(ays), H(ours) or M(inutes). + +`maxSize` - _Type_: string; _Default_: null + +The maximum size the log file can reach before it is rotated. Must use units M(egabyte), G(igabyte), or K(ilobyte). + +`path` - _Type_: string; _Default_: \/log + +Where to store the rotated log file. File naming convention is `HDB-YYYY-MM-DDT-HH-MM-SSSZ.log`. + +`stdStreams` - _Type_: boolean; _Default_: false + +Log HarperDB logs to the standard output and error streams. + +```yaml +logging: + stdStreams: false +``` + +*** + +### `authentication` + +The authentication section defines the configuration for the default authentication mechanism in HarperDB. + +```yaml +authentication: + authorizeLocal: true + cacheTTL: 30000 + enableSessions: true + operationTokenTimeout: 1d + refreshTokenTimeout: 30d +``` + +`authorizeLocal` - _Type_: boolean; _Default_: true + +This will automatically authorize any requests from the loopback IP address as the superuser. This should be disabled for any HarperDB servers that may be accessed by untrusted users from the same instance. For example, this should be disabled if you are using a local proxy, or for general server hardening. + +`cacheTTL` - _Type_: number; _Default_: 30000 + +This defines the length of time (in milliseconds) that an authentication (a particular Authorization header or token) can be cached. + +`enableSessions` - _Type_: boolean; _Default_: true + +This will enable cookie-based sessions to maintain an authenticated session. This is generally the preferred mechanism for maintaining authentication in web browsers as it allows cookies to hold an authentication token securely without giving JavaScript code access to token/credentials that may open up XSS vulnerabilities. + +`operationTokenTimeout` - _Type_: string; _Default_: 1d + +Defines the length of time an operation token will be valid until it expires. Example values: https:/github.com/vercel/ms. + +`refreshTokenTimeout` - _Type_: string; _Default_: 1d + +Defines the length of time a refresh token will be valid until it expires. Example values: https:/github.com/vercel/ms. + +### `operationsApi` + +The `operationsApi` section configures the HarperDB Operations API.\ +All the `operationsApi` configuration is optional. Any configuration that is not provided under this section will default to the `http` configuration section. + +`network` + +```yaml +operationsApi: + network: + cors: true + corsAccessList: + - null + headersTimeout: 60000 + keepAliveTimeout: 5000 + port: 9925 + securePort: null + timeout: 120000 +``` + +`cors` - _Type_: boolean; _Default_: true + +Enable Cross Origin Resource Sharing, which allows requests across a domain. + +`corsAccessList` - _Type_: array; _Default_: null + +An array of allowable domains with CORS + +`headersTimeout` - _Type_: integer; _Default_: 60,000 milliseconds (1 minute) + +Limit the amount of time the parser will wait to receive the complete HTTP headers with. + +`keepAliveTimeout` - _Type_: integer; _Default_: 5,000 milliseconds (5 seconds) + +Sets the number of milliseconds of inactivity the server needs to wait for additional incoming data after it has finished processing the last response. + +`port` - _Type_: integer; _Default_: 9925 + +The port the HarperDB operations API interface will listen on. + +`securePort` - _Type_: integer; _Default_: null + +The port the HarperDB operations API uses for HTTPS connections. This requires a valid certificate and key. + +`timeout` - _Type_: integer; _Default_: Defaults to 120,000 milliseconds (2 minutes) + +The length of time in milliseconds after which a request will timeout. + +`tls` + +This configures the Transport Layer Security for HTTPS support. + +```yaml +operationsApi: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`certificate` - _Type_: string; _Default_: \/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: \/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: \/keys/privateKey.pem + +Path to the private key file. + +*** + +#### `componentsRoot` + +`componentsRoot` - _Type_: string; _Default_: \/components + +The path to the folder containing the local component files. + +```yaml +componentsRoot: ~/hdb/components +``` + +*** + +#### `rootPath` + +`rootPath` - _Type_: string; _Default_: home directory of the current user + +The HarperDB database and applications/API/interface are decoupled from each other. The `rootPath` directory specifies where the HarperDB application persists data, config, logs, and Custom Functions. + +```yaml +rootPath: /Users/jonsnow/hdb +``` + +*** + +#### `storage` + +`writeAsync` - _Type_: boolean; _Default_: false + +The `writeAsync` option turns off disk flushing/syncing, allowing for faster write operation throughput. However, this does not provide storage integrity guarantees, and if a server crashes, it is possible that there may be data loss requiring restore from another backup/another node. + +```yaml +storage: + writeAsync: false +``` + +`caching` - _Type_: boolean; _Default_: true + +The `caching` option enables in-memory caching of records, providing faster access to frequently accessed objects. This can incur some extra overhead for situations where reads are extremely random and don't benefit from caching. + +```yaml +storage: + caching: true +``` + +`compression` - _Type_: boolean; _Default_: false + +The `compression` option enables compression of records in the database. This can be helpful for very large databases in reducing storage requirements and potentially allowing more data to be cached. This uses the very fast LZ4 compression algorithm, but this still incurs extra costs for compressing and decompressing. + +```yaml +storage: + compression: false +``` + +`noReadAhead` - _Type_: boolean; _Default_: true + +The `noReadAhead` option advises the operating system to not read ahead when reading from the database. This provides better memory utilization, except in situations where large records are used or frequent range queries are used. + +```yaml +storage: + noReadAhead: true +``` + +`prefetchWrites` - _Type_: boolean; _Default_: true + +The `prefetchWrites` option loads data prior to write transactions. This should be enabled for databases that are larger than memory (although it can be faster to disable this for smaller databases). + +```yaml +storage: + prefetchWrites: true +``` + +`path` - _Type_: string; _Default_: `/schema` + +The `path` configuration sets where all database files should reside. + +```yaml +storage: + path: /users/harperdb/storage +``` + +_**Note:**_ This configuration applies to all database files, which includes system tables that are used internally by HarperDB. For this reason if you wish to use a non default `path` value you must move any existing schemas into your `path` location. Existing schemas is likely to include the system schema which can be found at `/schema/system`. + +*** + +#### `tls` + +Transport Layer Security + +```yaml +tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`certificate` - _Type_: string; _Default_: \/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: \/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: \/keys/privateKey.pem + +Path to the private key file. + +*** + +#### `databases` + +The `databases` section is an optional configuration that can be used to define where database files should reside down to the table level.\ +\ +This configuration should be set before the database and table have been created.\ +\ +The configuration will not create the directories in the path, that must be done by the user.\ + + +To define where a database and all its tables should reside use the name of your database and the `path` parameter. + +```yaml +databases: + nameOfDatabase: + path: /path/to/database +``` + +To define where specific tables within a database should reside use the name of your database, the `tables` parameter, the name of your table and the `path` parameter. + +```yaml +databases: + nameOfDatabase: + tables: + nameOfTable: + path: /path/to/table +``` + +This same pattern can be used to define where the audit log database files should reside. To do this use the `auditPath` parameter. + +```yaml +databases: + nameOfDatabase: + auditPath: /path/to/database +``` + +\ + + +**Setting the database section through the command line, environment variables or API** + +When using command line variables,environment variables or the API to configure the databases section a slightly different convention from the regular one should be used. To add one or more configurations use a JSON object array. + +Using command line variables: + +```bash +--DATABASES [{\"nameOfSchema\":{\"tables\":{\"nameOfTable\":{\"path\":\"\/path\/to\/table\"}}}}] +``` + +Using environment variables: + +```bash +DATABASES=[{"nameOfSchema":{"tables":{"nameOfTable":{"path":"/path/to/table"}}}}] +``` + +Using the API: + +```json +{ + "operation": "set_configuration", + "databases": [{ + "nameOfDatabase": { + "tables": { + "nameOfTable": { + "path": "/path/to/table" + } + } + } + }] +} +``` diff --git a/site/versioned_docs/version-4.2/deployments/harperdb-cli.md b/site/versioned_docs/version-4.2/deployments/harperdb-cli.md new file mode 100644 index 00000000..333d9979 --- /dev/null +++ b/site/versioned_docs/version-4.2/deployments/harperdb-cli.md @@ -0,0 +1,95 @@ +--- +title: HarperDB CLI +--- + +# HarperDB CLI + +The HarperDB command line interface (CLI) is used to administer [self-installed HarperDB instances](./install-harperdb/). + +## Installing HarperDB + +To install HarperDB with CLI prompts, run the following command: + +```bash +harperdb install +``` + +Alternatively, HarperDB installations can be automated with environment variables or command line arguments; [see a full list of configuration parameters here](./configuration#using-the-configuration-file-and-naming-conventions). Note, when used in conjunction, command line arguments will override environment variables. + +#### Environment Variables + +```bash +#minimum required parameters for no additional CLI prompts +export TC_AGREEMENT=yes +export HDB_ADMIN_USERNAME=HDB_ADMIN +export HDB_ADMIN_PASSWORD=password +export ROOTPATH=/tmp/hdb/ +export OPERATIONSAPI_NETWORK_PORT=9925 +harperdb install +``` + +#### Command Line Arguments + +```bash +#minimum required parameters for no additional CLI prompts +harperdb install --TC_AGREEMENT yes --HDB_ADMIN_USERNAME HDB_ADMIN --HDB_ADMIN_PASSWORD password --ROOTPATH /tmp/hdb/ --OPERATIONSAPI_NETWORK_PORT 9925 +``` + +*** + +## Starting HarperDB + +To start HarperDB after it is installed, run the following command: + +```bash +harperdb start +``` + +*** + +## Stopping HarperDB + +To stop HarperDB once it is running, run the following command: + +```bash +harperdb stop +``` + +*** + +## Restarting HarperDB + +To restart HarperDB once it is running, run the following command: + +```bash +harperdb restart +``` +*** + +## Getting the HarperDB Version + +To check the version of HarperDB that is installed run the following command: + +```bash +harperdb version +``` + +## Get all available CLI commands + +To display all available HarperDB CLI commands along with a brief description run: + +```bash +harperdb help +``` + +## Get the status of HarperDB and clustering + +To display the status of the HarperDB process, the clustering hub and leaf processes, the clustering network and replication statuses, run: + +```bash +harperdb status +``` + +## Backups + +HarperDB uses a transactional commit process that ensures that data on disk is always transactionally consistent with storage. This means that HarperDB maintains database integrity in the event of a crash. It also means that you can use any standard volume snapshot tool to make a backup of a HarperDB database. Database files are stored in the hdb/database directory. As long as the snapshot is an atomic snapshot of these database files, the data can be copied/moved back into the database directory to restore a previous backup (with HarperDB shut down) , and database integrity will be preserved. Note that simply copying an in-use database file (using `cp`, for example) is _not_ a snapshot, and this would progressively read data from the database at different points in time, which yields unreliable copy that likely will not be usable. Standard copying is only reliable for a database file that is not in use. diff --git a/site/versioned_docs/version-4.2/deployments/harperdb-cloud/alarms.md b/site/versioned_docs/version-4.2/deployments/harperdb-cloud/alarms.md new file mode 100644 index 00000000..03526fa8 --- /dev/null +++ b/site/versioned_docs/version-4.2/deployments/harperdb-cloud/alarms.md @@ -0,0 +1,20 @@ +--- +title: Alarms +--- + +# Alarms + +HarperDB Cloud instance alarms are triggered when certain conditions are met. Once alarms are triggered organization owners will immediately receive an email alert and the alert will be available on the [Instance Configuration](../../administration/harperdb-studio/instance-configuration) page. The below table describes each alert and their evaluation metrics. + +### Heading Definitions + +* **Alarm**: Title of the alarm. +* **Threshold**: Definition of the alarm threshold. +* **Intervals**: The number of occurrences before an alarm is triggered and the period that the metric is evaluated over. +* **Proposed Remedy**: Recommended solution to avoid the alert in the future. + +| Alarm | Threshold | Intervals | Proposed Remedy | +| ------- | ---------- | --------- | -------------------------------------------------------------------------------------------------------------------------------- | +| Storage | > 90% Disk | 1 x 5min | [Increased storage volume](../../administration/harperdb-studio/instance-configuration#update-instance-storage) | +| CPU | > 90% Avg | 2 x 5min | [Increase instance size for additional CPUs](../../administration/harperdb-studio/instance-configuration#update-instance-ram) | +| Memory | > 90% RAM | 2 x 5min | [Increase instance size](../../administration/harperdb-studio/instance-configuration#update-instance-ram) | diff --git a/site/versioned_docs/version-4.2/deployments/harperdb-cloud/index.md b/site/versioned_docs/version-4.2/deployments/harperdb-cloud/index.md new file mode 100644 index 00000000..ae2ec1a7 --- /dev/null +++ b/site/versioned_docs/version-4.2/deployments/harperdb-cloud/index.md @@ -0,0 +1,9 @@ +--- +title: HarperDB Cloud +--- + +# HarperDB Cloud + +[HarperDB Cloud](https:/studio.harperdb.io/) is the easiest way to test drive HarperDB, it’s HarperDB-as-a-Service. Cloud handles deployment and management of your instances in just a few clicks. HarperDB Cloud is currently powered by AWS with additional cloud providers on our roadmap for the future. + +You can create a new [HarperDB Cloud instance in the HarperDB Studio](../../administration/harperdb-studio/instances#create-a-new-instance). diff --git a/site/versioned_docs/version-4.2/deployments/harperdb-cloud/instance-size-hardware-specs.md b/site/versioned_docs/version-4.2/deployments/harperdb-cloud/instance-size-hardware-specs.md new file mode 100644 index 00000000..0e970b13 --- /dev/null +++ b/site/versioned_docs/version-4.2/deployments/harperdb-cloud/instance-size-hardware-specs.md @@ -0,0 +1,23 @@ +--- +title: Instance Size Hardware Specs +--- + +# Instance Size Hardware Specs + +While HarperDB Cloud bills by RAM, each instance has other specifications associated with the RAM selection. The following table describes each instance size in detail\*. + +| AWS EC2 Instance Size | RAM (GiB) | # vCPUs | Network (Gbps) | Processor | +| --------------------- | --------- | ------- | -------------- | -------------------------------------- | +| t3.micro | 1 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| t3.small | 2 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| t3.medium | 4 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| m5.large | 8 | 2 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.xlarge | 16 | 4 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.2xlarge | 32 | 8 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.4xlarge | 64 | 16 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.8xlarge | 128 | 32 | 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.12xlarge | 192 | 48 | 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.16xlarge | 256 | 64 | 20 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.24xlarge | 384 | 96 | 25 | Up to 3.1 GHz Intel Xeon Platinum 8000 | + +\*Specifications are subject to change. For the most up to date information, please refer to AWS documentation: [https:/aws.amazon.com/ec2/instance-types/](https:/aws.amazon.com/ec2/instance-types/). diff --git a/site/versioned_docs/version-4.2/deployments/harperdb-cloud/iops-impact.md b/site/versioned_docs/version-4.2/deployments/harperdb-cloud/iops-impact.md new file mode 100644 index 00000000..1c8496d5 --- /dev/null +++ b/site/versioned_docs/version-4.2/deployments/harperdb-cloud/iops-impact.md @@ -0,0 +1,42 @@ +--- +title: IOPS Impact on Performance +--- + +# IOPS Impact on Performance + +HarperDB, like any database, can place a tremendous load on its storage resources. Storage, not CPU or memory, will more often be the bottleneck of server, virtual machine, or a container running HarperDB. Understanding how storage works, and how much storage performance your workload requires, is key to ensuring that HarperDB performs as expected. + +## IOPS Overview + +The primary measure of storage performance is the number of input/output operations per second (IOPS) that a storage device can perform. Different storage devices can have dramatically different performance profiles. A hard drive (HDD) might only perform a hundred or so IOPS, while a solid state drive (SSD) might be able to perform tens or hundreds of thousands of IOPS. + +Cloud providers like AWS, which powers HarperDB Cloud, don’t typically attach individual disks to a virtual machine or container. Instead, they combine large numbers of storage drives to create very high performance storage servers. Chunks (volumes) of that storage are then carved out and presented to many different virtual machines and containers. Due to the shared nature of this type of storage, the cloud provider places configurable limits on the number of IOPS that a volume can perform. The same way that cloud providers charge more for larger capacity volumes, they also charge more for volumes with more IOPS. + +## HarperDB Cloud Storage + +HarperDB Cloud utilizes AWS Elastic Block Storage (EBS) General Purpose SSD (gp3) volumes. This is the most common storage type used in AWS, as it provides reasonable performance for most workloads, at a reasonable price. + +AWS EBS gp3 volumes have a baseline performance level of 3,000 IOPS, as a result, all HarperDB Cloud storage options will offer 3,000 IOPS. We plan to offer scalable IOPS as an option in the future. + +You can read more about AWS EBS volume IOPS here: https:/docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html. + +## Estimating IOPS for HarperDB Instance + +The number of IOPS required for a particular workload is influenced by many factors. Testing your particular application is the best way to determine the number of IOPS required. A reliable method is to estimate about two IOPS for every index, including the primary key itself. So if a table has two indices besides primary key, estimate that an insert or update will require about six IOPS. Note that that can often be closer to one IOPS per index under load due to internal batching of writes, and sometimes even better when doing sequential inserts. Again it is best to test to verify this with application specific data and write patterns. + +For assistance in estimating IOPS requirements feel free to contact HarperDB Support or join our Community Slack Channel. + +## Example Use Case IOPS Requirements + +* **Sensor Data Collection** + + In the case of IoT sensors where data collection will be sustained, high IOPS are required. While there are not typically large queries going on in this case, there is a high volume of data being ingested. This implies that IOPS will be sustained at a high level. For example, if you are collecting 100 records per second you would expect to need roughly 3,000 IOPS just to handle the data inserts. +* **Data Analytics/BI Server** + + Providing a server for analytics purposes typically requires a larger machine. Typically these cases involve large scale SQL joins and aggregations, which puts a large strain on reads. HarperDB utilizes an in-memory cache, which provides a significant performance boost on machines with large amounts of memory. However, if disparate datasets are constantly being queried and/or new data is frequently being loaded, you will find that the system still needs to have high IOPS to meet performance demand. +* **Web Services** + + Typical web service implementations with discrete reads and writes often do not need high IOPS to perform as expected. This is often the case in more transactional systems without the requirement for high performance load. A good rule to follow is that any HarperDB operation that requires a data scan will be IOPS intensive, but if these are not frequent then the EBS boost will suffice. Queries utilizing equals operations in either SQL or NoSQL do not require a scan due to HarperDB’s native indexing. +* **High Performance Database** + + Ultimately, if performance is your top priority, HarperDB should be run on bare metal hardware. Cloud providers offer these options at a higher cost, but they come with obvious performance improvements. diff --git a/site/versioned_docs/version-4.2/deployments/harperdb-cloud/verizon-5g-wavelength-instances.md b/site/versioned_docs/version-4.2/deployments/harperdb-cloud/verizon-5g-wavelength-instances.md new file mode 100644 index 00000000..c5a565e9 --- /dev/null +++ b/site/versioned_docs/version-4.2/deployments/harperdb-cloud/verizon-5g-wavelength-instances.md @@ -0,0 +1,31 @@ +--- +title: Verizon 5G Wavelength +--- + +# Verizon 5G Wavelength + +These instances are only accessible from the Verizon network. When accessing your HarperDB instance please ensure you are connected to the Verizon network, examples include Verizon 5G Internet, Verizon Hotspots, or Verizon mobile devices. + +HarperDB on Verizon 5G Wavelength brings HarperDB closer to the end user exclusively on the Verizon network resulting in as little as single-digit millisecond response time from HarperDB to the client. + +Instances are built via AWS Wavelength. You can read more about [AWS Wavelength here](https:/aws.amazon.com/wavelength/). + +HarperDB 5G Wavelength Instance Specs While HarperDB 5G Wavelength bills by RAM, each instance has other specifications associated with the RAM selection. The following table describes each instance size in detail\*. + +| AWS EC2 Instance Size | RAM (GiB) | # vCPUs | Network (Gbps) | Processor | +| --------------------- | --------- | ------- | -------------- | ------------------------------------------- | +| t3.medium | 4 | 2 | Up to 5 | Up to 3.1 GHz Intel Xeon Platinum Processor | +| t3.xlarge | 16 | 4 | Up to 5 | Up to 3.1 GHz Intel Xeon Platinum Processor | +| r5.2xlarge | 64 | 8 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum Processor | + +\*Specifications are subject to change. For the most up to date information, please refer to [AWS documentation](https:/aws.amazon.com/ec2/instance-types/). + +## HarperDB 5G Wavelength Storage + +HarperDB 5G Wavelength utilizes AWS Elastic Block Storage (EBS) General Purpose SSD (gp2) volumes. This is the most common storage type used in AWS, as it provides reasonable performance for most workloads, at a reasonable price. + +AWS EBS gp2 volumes have a baseline performance level, which determines the number of IOPS it can perform indefinitely. The larger the volume, the higher its baseline performance. Additionally, smaller gp2 volumes are able to burst to a higher number of IOPS for periods of time. + +Smaller gp2 volumes are perfect for trying out the functionality of HarperDB, and might also work well for applications that don’t perform many database transactions. For applications that perform a moderate or high number of transactions, we recommend that you use a larger HarperDB volume. Learn more about the [impact of IOPS on performance here](./iops-impact). + +You can read more about [AWS EBS gp2 volume IOPS here](https:/docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html#ebsvolumetypes\_gp2). diff --git a/site/versioned_docs/version-4.2/deployments/install-harperdb/index.md b/site/versioned_docs/version-4.2/deployments/install-harperdb/index.md new file mode 100644 index 00000000..32a23e0b --- /dev/null +++ b/site/versioned_docs/version-4.2/deployments/install-harperdb/index.md @@ -0,0 +1,63 @@ +--- +title: Install HarperDB +--- + +# Install HarperDB + +## Install HarperDB + +This documentation contains information for installing HarperDB locally. Note that if you’d like to get up and running quickly, you can try a [managed instance with HarperDB Cloud](https:/studio.harperdb.io/sign-up). HarperDB is a cross-platform database; we recommend Linux for production use, but HarperDB can run on Windows and Mac as well, for development purposes. Installation is usually very simple and just takes a few steps, but there are a few different options documented here. + +HarperDB runs on Node.js, so if you do not have it installed, you need to do that first (if you have installed, you can skip to installing HarperDB, itself). Node.js can be downloaded and installed from [their site](https:/nodejs.org/). For Linux and Mac, we recommend installing and managing Node versions with [NVM, which has instructions for installation](https:/github.com/nvm-sh/nvm). Generally NVM can be installed with the following command: + +```bash +curl -o- https:/raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash +``` + +And then logout and login, and then install Node.js using nvm. We recommend using LTS, but support all currently maintained Node versions (which is currently version 14 and newer, and make sure to always uses latest minor/patch for the major version): + +```bash +nvm install --lts +``` + +#### `Install and Start HarperDB ` + +Then you can install HarperDB with NPM and start it: + +```bash +npm install -g harperdb +harperdb +``` + +HarperDB will automatically start after installation. + +If you are setting up a production server on Linux, [we have much more extensive documentation on how to configure volumes for database storage, set up a systemd script, and configure your operating system to use as a database server in our linux installation guide](./linux). + +## With Docker + +If you would like to run HarperDB in Docker, install [Docker Desktop](https:/docs.docker.com/desktop/) on your Mac or Windows computer. Otherwise, install the [Docker Engine](https:/docs.docker.com/engine/install/) on your Linux server. + +Once Docker Desktop or Docker Engine is installed, visit our [Docker Hub page](https:/hub.docker.com/r/harperdb/harperdb) for information and examples on how to run a HarperDB container. + +## Offline Install + +If you need to install HarperDB on a device that doesn't have an Internet connection, you can choose your version and download the npm package and install it directly (you’ll still need Node.js and NPM): + +[Download Install Package](https:/products-harperdb-io.s3.us-east-2.amazonaws.com/index.html) + +Once you’ve downloaded the .tgz file, run the following command from the directory where you’ve placed it: + +```bash +npm install -g harperdb-X.X.X.tgz harperdb install +``` + +For more information visit the [HarperDB Command Line Interface](../harperdb-cli) guide. + +## Installation on Less Common Platforms + +HarperDB comes with binaries for standard AMD64/x64 or ARM64 CPU architectures on Linux, Windows (x64 only), and Mac (including Apple Silicon). However, if you are installing on a less common platform (Alpine, for example), you will need to ensure that you have build tools installed for the installation process to compile the binaries (this is handled automatically), including: + +* [Go](https:/go.dev/dl/): version 1.19.1 +* GCC +* Make +* Python v3.7, v3.8, v3.9, or v3.10 diff --git a/site/versioned_docs/version-4.2/deployments/install-harperdb/linux.md b/site/versioned_docs/version-4.2/deployments/install-harperdb/linux.md new file mode 100644 index 00000000..bf02830e --- /dev/null +++ b/site/versioned_docs/version-4.2/deployments/install-harperdb/linux.md @@ -0,0 +1,212 @@ +--- +title: On Linux +--- + +# On Linux + +If you wish to install locally or already have a configured server, see the basic [Installation Guide](./) + +The following is a recommended way to configure Linux and install HarperDB. These instructions should work reasonably well for any public cloud or on-premises Linux instance. + +*** + +These instructions assume that the following has already been completed: + +1. Linux is installed +1. Basic networking is configured +1. A non-root user account dedicated to HarperDB with sudo privileges exists +1. An additional volume for storing HarperDB files is attached to the Linux instance +1. Traffic to ports 9925 (HarperDB Operations API) 9926 (HarperDB Application Interface) and 9932 (HarperDB Clustering) is permitted + +While you will need to access HarperDB through port 9925 for the administration through the operations API, and port 9932 for clustering, for higher level of security, you may want to consider keeping both of these ports restricted to a VPN or VPC, and only have the application interface (9926 by default) exposed to the public Internet. + +For this example, we will use an AWS Ubuntu Server 22.04 LTS m5.large EC2 Instance with an additional General Purpose SSD EBS volume and the default “ubuntu” user account. + +*** + +### (Optional) LVM Configuration + +Logical Volume Manager (LVM) can be used to stripe multiple disks together to form a single logical volume. If striping disks together is not a requirement, skip these steps. + +Find disk that already has a partition + +```bash +used_disk=$(lsblk -P -I 259 | grep "nvme.n1.*part" | grep -o "nvme.n1") +``` + +Create array of free disks + +```bash +declare -a free_disks +mapfile -t free_disks < <(lsblk -P -I 259 | grep "nvme.n1.*disk" | grep -o "nvme.n1" | grep -v "$used_disk") +``` + +Get quantity of free disks + +```bash +free_disks_qty=${#free_disks[@]} +``` + +Construct pvcreate command + +```bash +cmd_string="" +for i in "${free_disks[@]}" +do +cmd_string="$cmd_string /dev/$i" +done +``` + +Initialize disks for use by LVM + +```bash +pvcreate_cmd="pvcreate $cmd_string" +sudo $pvcreate_cmd +``` + +Create volume group + +```bash +vgcreate_cmd="vgcreate hdb_vg $cmd_string" +sudo $vgcreate_cmd +``` + +Create logical volume + +```bash +sudo lvcreate -n hdb_lv -i $free_disks_qty -l 100%FREE hdb_vg +``` + +### Configure Data Volume + +Run `lsblk` and note the device name of the additional volume + +```bash +lsblk +``` + +Create an ext4 filesystem on the volume (The below commands assume the device name is nvme1n1. If you used LVM to create logical volume, replace /dev/nvme1n1 with /dev/hdb\_vg/hdb\_lv) + +```bash +sudo mkfs.ext4 -L hdb_data /dev/nvme1n1 +``` + +Mount the file system and set the correct permissions for the directory + +```bash +mkdir /home/ubuntu/hdb +sudo mount -t ext4 /dev/nvme1n1 /home/ubuntu/hdb +sudo chown -R ubuntu:ubuntu /home/ubuntu/hdb +sudo chmod 775 /home/ubuntu/hdb +``` + +Create a fstab entry to mount the filesystem on boot + +```bash +echo "LABEL=hdb_data /home/ubuntu/hdb ext4 defaults,noatime 0 1" | sudo tee -a /etc/fstab +``` + +### Configure Linux and Install Prerequisites + +If a swap file or partition does not already exist, create and enable a 2GB swap file + +```bash +sudo dd if=/dev/zero of=/swapfile bs=128M count=16 +sudo chmod 600 /swapfile +sudo mkswap /swapfile +sudo swapon /swapfile +echo "/swapfile swap swap defaults 0 0" | sudo tee -a /etc/fstab +``` + +Increase the open file limits for the ubuntu user + +```bash +echo "ubuntu soft nofile 500000" | sudo tee -a /etc/security/limits.conf +echo "ubuntu hard nofile 1000000" | sudo tee -a /etc/security/limits.conf +``` + +Install Node Version Manager (nvm) + +```bash +curl -o- https:/raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh | bash +``` + +Load nvm (or logout and then login) + +```bash +. ~/.nvm/nvm.sh +``` + +Install Node.js using nvm ([read more about specific Node version requirements](https:/www.npmjs.com/package/harperdb#prerequisites)) + +```bash +nvm install +``` + +### `Install and Start HarperDB ` + +Here is an example of installing HarperDB with minimal configuration. + +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --ROOTPATH "/home/ubuntu/hdb" \ + --OPERATIONSAPI_NETWORK_PORT "9925" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" +``` + +Here is an example of installing HarperDB with commonly used additional configuration. + +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --ROOTPATH "/home/ubuntu/hdb" \ + --OPERATIONSAPI_NETWORK_PORT "9925" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" \ + --HTTP_SECUREPORT "9926" \ + --CLUSTERING_ENABLED "true" \ + --CLUSTERING_USER "cluster_user" \ + --CLUSTERING_PASSWORD "password" \ + --CLUSTERING_NODENAME "hdb1" +``` + +HarperDB will automatically start after installation. If you wish HarperDB to start when the OS boots, you have two options + +You can set up a crontab: + +```bash +(crontab -l 2>/dev/null; echo "@reboot PATH=\"/home/ubuntu/.nvm/versions/node/v18.15.0/bin:$PATH\" && harperdb start") | crontab - +``` + +Or you can create a systemd script at `/etc/systemd/system/harperdb.service` + +Pasting the following contents into the file: + +``` +[Unit] +Description=HarperDB + +[Service] +Type=simple +Restart=always +User=ubuntu +Group=ubuntu +WorkingDirectory=/home/ubuntu +ExecStart=/bin/bash -c 'PATH="/home/ubuntu/.nvm/versions/node/v18.15.0/bin:$PATH"; harperdb' + +[Install] +WantedBy=multi-user.target +``` + +And then running the following: + +``` +systemctl daemon-reload +systemctl enable harperdb +``` + +For more information visit the [HarperDB Command Line Interface guide](../../deployments/harperdb-cli) and the [HarperDB Configuration File guide](../../deployments/configuration). diff --git a/site/versioned_docs/version-4.2/deployments/upgrade-hdb-instance.md b/site/versioned_docs/version-4.2/deployments/upgrade-hdb-instance.md new file mode 100644 index 00000000..0b7c6e3f --- /dev/null +++ b/site/versioned_docs/version-4.2/deployments/upgrade-hdb-instance.md @@ -0,0 +1,90 @@ +--- +title: Upgrade a HarperDB Instance +--- + +# Upgrade a HarperDB Instance + +This document describes best practices for upgrading self-hosted HarperDB instances. HarperDB can be upgraded using a combination of npm and built-in HarperDB upgrade scripts. Whenever upgrading your HarperDB installation it is recommended you make a backup of your data first. Note: This document applies to self-hosted HarperDB instances only. All [HarperDB Cloud instances](./harperdb-cloud/) will be upgraded by the HarperDB Cloud team. + +## Upgrading + +Upgrading HarperDB is a two-step process. First the latest version of HarperDB must be downloaded from npm, then the HarperDB upgrade scripts will be utilized to ensure the newest features are available on the system. + +1. Install the latest version of HarperDB using `npm install -g harperdb`. + + Note `-g` should only be used if you installed HarperDB globally (which is recommended). +1. Run `harperdb` to initiate the upgrade process. + + HarperDB will then prompt you for all appropriate inputs and then run the upgrade directives. + +## Node Version Manager (nvm) + +[Node Version Manager (nvm)](http:/nvm.sh/) is an easy way to install, remove, and switch between different versions of Node.js as required by various applications. More information, including directions on installing nvm can be found here: https:/nvm.sh/. + +HarperDB supports Node.js versions 14.0.0 and higher, however, **please check our** [**NPM page**](https:/www.npmjs.com/package/harperdb) **for our recommended Node.js version.** To install a different version of Node.js with nvm, run the command: + +```bash +nvm install +``` + +To switch to a version of Node run: + +```bash +nvm use +``` + +To see the current running version of Node run: + +```bash +node --version +``` + +With a handful of different versions of Node.js installed, run nvm with the `ls` argument to list out all installed versions: + +```bash +nvm ls +``` + +When upgrading HarperDB, we recommend also upgrading your Node version. Here we assume you're running on an older version of Node; the execution may look like this: + +Switch to the older version of Node that HarperDB is running on (if it is not the current version): + +```bash +nvm use 14.19.0 +``` + +Make sure HarperDB is not running: + +```bash +harperdb stop +``` + +Uninstall HarperDB. Note, this step is not required, but will clean up old artifacts of HarperDB. We recommend removing all other HarperDB installations to ensure the most recent version is always running. + +```bash +npm uninstall -g harperdb +``` + +Switch to the newer version of Node: + +```bash +nvm use +``` + +Install HarperDB globally + +```bash +npm install -g harperdb +``` + +Run the upgrade script + +```bash +harperdb +``` + +Start HarperDB + +```bash +harperdb start +``` diff --git a/site/versioned_docs/version-4.2/developers/_category_.json b/site/versioned_docs/version-4.2/developers/_category_.json new file mode 100644 index 00000000..9fe399bf --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Developers", + "position": 1, + "link": { + "type": "generated-index", + "title": "Developers Documentation", + "description": "Comprehensive guides and references for building applications with HarperDB", + "keywords": [ + "developers" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/developers/applications/caching.md b/site/versioned_docs/version-4.2/developers/applications/caching.md new file mode 100644 index 00000000..7f228b5e --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/applications/caching.md @@ -0,0 +1,273 @@ +--- +title: Caching +--- + +# Caching + +HarperDB has integrated support for caching data. With built-in caching capabilities and distributed high-performance low-latency responsiveness, HarperDB makes an ideal data caching server. HarperDB can store cached data as queryable structured data, so data can easily be consumed in one format (for example JSON or CSV) and provided to end users in different formats with different selected properties (for example MessagePack, with a subset of selected properties), or even with customized querying capabilities. HarperDB also manages and provides timestamps/tags for proper caching control, facilitating further downstreaming caching. With these combined capabilities, HarperDB is an extremely fast, interoperable, flexible, and customizable caching server. + +## Configuring Caching + +To set up caching, first you will need to define a table that you will use as your cache (to store the cached data). You can review the [introduction to building applications](./) for more information on setting up the application (and the [defining schemas documentation](./defining-schemas)), but once you have defined an application folder with a schema, you can add a table for caching to your `schema.graphql`: + +```graphql +type MyCache @table(expiration: 3600) @export { + id: ID @primaryKey +} +``` + +You may also note that we can define a time-to-live (TTL) expiration on the table, indicating when table records/entries should expire. This is generally necessary for "passive" caches where there is no active notification of when entries expire. However, this is not needed if you provide a means of notifying when data is invalidated and changed. + +While you can provide a single expiration time, there are actually several expiration timings that are potentially relevant, and can be independently configured. These settings are available as directive properties on the table configuration (like `expiration` above): stale expiration: The point when a request for a record should trigger a request to origin (but might possibly return the current stale record depending on policy) must-revalidate expiration: The point when a request for a record must make a request to origin first and return the latest value from origin. eviction expiration: The point when a record is actually removed from the caching table. + +You can provide a single expiration and it defines the behavior for all three. You can also provide three settings for expiration, through table directives: expiration - The amount of time until a record goes stale. eviction - The amount of time after expiration before a record can be evicted (defaults to zero). scanInterval - The interval for scanning for expired records (defaults to one quarter of the total of expiration and eviction). + +## Define External Data Source + +Next, you need to define the source for your cache. External data sources could be HTTP APIs, other databases, microservices, or any other source of data. This can be defined as a resource class in your application's `resources.js` module. You can extend the `Resource` class (which is available as a global variable in the HarperDB environment) as your base class. The first method to implement is a `get()` method to define how to retrieve the source data. For example, if we were caching an external HTTP API, we might define it as such: + +```javascript +class ThirdPartyAPI extends Resource { + async get() { + return (await fetch(`http:/some-api.com/${this.getId()}`)).json(); + } +} +``` + +Next, we define this external data resource as the "source" for the caching table we defined above: + +```javascript +const { MyTable } = tables; +MyTable.sourcedFrom(ThirdPartyAPI); +``` + +Now we have a fully configured and connected cache. If you access data from `MyCache` (for example, through the REST API, like `/MyCache/some-id`), HarperDB will check to see if the requested entry is in the table and return it if it is available (and hasn't expired). If there is no entry, or it has expired (it is older than one hour in this case), it will go to the source, calling the `get()` method, which will then retrieve the requested entry. Once the entry is retrieved, it will be saved/cached in the caching table (for one hour based on our expiration time). + +```mermaid +flowchart TD + Client1(Client 1)-->Cache(Caching Table) + Client2(Client 2)-->Cache + Cache-->Resource(Data Source Connector) + Resource-->API(Remote Data Source API) +``` + +HarperDB handles waiting for an existing cache resolution to finish and uses its result. This prevents a "cache stampede" when entries expire, ensuring that multiple requests to a cache entry will all wait on a single request to the data source. + +Cache tables with an expiration are periodically pruned for expired entries. Because this is done periodically, there is usually some amount of time between when a record has expired and when the record is actually evicted (the cached data is removed). But when a record is checked for availability, the expiration time is used to determine if the record is fresh (and the cache entry can be used). + +### Eviction with Indexing + +Eviction is the removal of a locally cached copy of data, but it does not imply the deletion of the actual data from the canonical or origin data source. Because evicted records still exist (just not in the local cache), if a caching table uses expiration (and eviction), and has indexing on certain attributes, the data is not removed from the indexes. The indexes that reference the evicted record are preserved, along with the attribute data necessary to maintain these indexes. Therefore eviction means the removal of non-indexed data (in this case evictions are stored as "partial" records). Eviction only removes the data that can be safely removed from a cache without affecting the integrity or behavior of the indexes. If a search query is performed that matches this evicted record, the record will be requested on-demand to fulfill the search query. + +### Specifying a Timestamp + +In the example above, we simply retrieved data to fulfill a cache request. We may want to supply the timestamp of the record we are fulfilling as well. This can be set on the context for the request: + +```javascript +class ThirdPartyAPI extends Resource { + async get() { + let response = await fetch(`http:/some-api.com/${this.getId()}`); + this.getContext().lastModified = response.headers.get('Last-Modified'); + return response.json(); + } +} +``` + +#### Specifying an Expiration + +In addition, we can also specify when a cached record "expires". When a cached record expires, this means that a request for that record will trigger a request to the data source again. This does not necessarily mean that the cached record has been evicted (removed), although expired records will be periodically evicted. If the cached record still exists, the data source can revalidate it and return it. For example: + +```javascript +class ThirdPartyAPI extends Resource { + async get() { + const context = this.getContext(); + let headers = new Headers(); + if (context.replacingVersion) / this is the existing cached record + headers.set('If-Modified-Since', new Date(context.replacingVersion).toUTCString()); + let response = await fetch(`http:/some-api.com/${this.getId()}`, { headers }); + let cacheInfo = response.headers.get('Cache-Control'); + let maxAge = cacheInfo?.match(/max-age=(\d)/)?.[1]; + if (maxAge) / we can set a specific expiration time by setting context.expiresAt + context.expiresAt = Date.now() + maxAge * 1000; / convert from seconds to milliseconds and add to current time + / we can just revalidate and return the record if the origin has confirmed that it has the same version: + if (response.status === 304) return context.replacingRecord; + ... +``` + +## Active Caching and Invalidation + +The cache we have created above is a "passive" cache; it only pulls data from the data source as needed, and has no knowledge of if and when data from the data source has actually changed, so it must rely on timer-based expiration to periodically retrieve possibly updated data. This means that it is possible that the cache may have stale data for a while (if the underlying data has changed, but the cached data hasn't expired), and the cache may have to refresh more than necessary if the data source data hasn't changed. Consequently it can be significantly more effective to implement an "active" cache, in which the data source is monitored and notifies the cache when any data changes. This ensures that when data changes, the cache can immediately load the updated data, and unchanged data can remain cached much longer (or indefinitely). + +### Invalidate + +One way to provide more active caching is to specifically invalidate individual records. Invalidation is useful when you know the source data has changed, and the cache needs to re-retrieve data from the source the next time that record is accessed. This can be done by executing the `invalidate()` method on a resource. For example, you could extend a table (in your resources.js) and provide a custom POST handler that does invalidation: + +```javascript +const { MyTable } = tables; +export class MyTableEndpoint extends MyTable { + async post(data) { + if (data.invalidate) / use this flag as a marker + this.invalidate(); + } +} +``` + +(Note that if you are now exporting this endpoint through resources.js, you don't necessarily need to directly export the table separately in your schema.graphql). + +### Subscriptions + +We can provide more control of an active cache with subscriptions. If there is a way to receive notifications from the external data source of data changes, we can implement this data source as an "active" data source for our cache by implementing a `subscribe` method. A `subscribe` method should return an asynchronous iterable that iterates and returns events indicating the updates. One straightforward way of creating an asynchronous iterable is by defining the `subscribe` method as an asynchronous generator. If we had an endpoint that we could poll for changes, we could implement this like: + +```javascript +class ThirdPartyAPI extends Resource { + async *subscribe() { + do { + / get the next data change event from the source + let update = (await fetch(`http:/some-api.com/latest-update`)).json(); + const event = { / define the change event (which will update the cache) + type: 'put', / this would indicate that the event includes the new data value + id: / the primary key of the record that updated + value: / the new value of the record that updated + timestamp: / the timestamp of when the data change occurred + }; + yield event; / this returns this event, notifying the cache of the change + } while(true); + } + async get() { +... +``` + +Notification events should always include an `id` to indicate the primary key of the updated record. The event should have a `value` for `put` and `message` event types. The `timestamp` is optional and can be used to indicate the exact timestamp of the change. The following event `type`s are supported: + +* `put` - This indicates that the record has been updated and provides the new value of the record +* `invalidate` - Alternately, you can notify with an event type of `invalidate` to indicate that the data has changed, but without the overhead of actually sending the data (the `value` property is not needed), so the data only needs to be sent if and when the data is requested through the cache. An `invalidate` will evict the entry and update the timestamp to indicate that there is new data that should be requested (if needed). +* `delete` - This indicates that the record has been deleted. +* `message` - This indicates a message is being passed through the record. The record value has not changed, but this is used for [publish/subscribe messaging](../real-time). +* `transaction` - This indicates that there are multiple writes that should be treated as a single atomic transaction. These writes should be included as an array of data notification events in the `writes` property. + +And the following properties can be defined on event objects: + +* `type`: The event type as described above. +* `id`: The primary key of the record that updated +* `value`: The new value of the record that updated (for put and message) +* `writes`: An array of event properties that are part of a transaction (used in conjunction with the transaction event type). +* `table`: The name of the table with the record that was updated. This can be used with events within a transaction to specify events across multiple tables. +* `timestamp`: The timestamp of when the data change occurred + +With an active external data source with a `subscribe` method, the data source will proactively notify the cache, ensuring a fresh and efficient active cache. Note that with an active data source, we still use the `sourcedFrom` method to register the source for a caching table, and the table will automatically detect and call the subscribe method on the data source. + +By default, HarperDB will only run the subscribe method on one thread. HarperDB is multi-threaded and normally runs many concurrent worker threads, but typically running a subscription on multiple threads can introduce overlap in notifications and race conditions and running on a subscription on a single thread is preferable. However, if you want to enable subscribe on multiple threads, you can define a `static subscribeOnThisThread` method to specify if the subscription should run on the current thread: + +```javascript +class ThirdPartyAPI extends Resource { + static subscribeOnThisThread(threadIndex) { + return threadIndex < 2; / run on two threads (the first two threads) + } + async *subscribe() { + .... +``` + +An alternative to using asynchronous generators is to use a subscription stream and send events to it. A default subscription stream (that doesn't generate its own events) is available from the Resource's default subscribe method: + +```javascript +class ThirdPartyAPI extends Resource { + subscribe() { + const subscription = super.subscribe(); + setupListeningToRemoteService().on('update', (event) => { + subscription.send(event); + }); + return subscription; + } +} +``` + +## Downstream Caching + +It is highly recommended that you utilize the [REST interface](../rest) for accessing caching tables, as it facilitates downstreaming caching for clients. Timestamps are recorded with all cached entries. Timestamps are then used for incoming [REST requests to specify the `ETag` in the response](../rest#cachingconditional-requests). Clients can cache data themselves and send requests using the `If-None-Match` header to conditionally get a 304 and preserve their cached data based on the timestamp/`ETag` of the entries that are cached in HarperDB. Caching tables also have [subscription capabilities](./caching#subscribing-to-caching-tables), which means that downstream caches can be fully "layered" on top of HarperDB, both as passive or active caches. + +## Write-Through Caching + +The cache we have defined so far only has data flowing from the data source to the cache. However, you may wish to support write methods, so that writes to the cache table can flow through to underlying canonical data source, as well as populate the cache. This can be accomplished by implementing the standard write methods, like `put` and `delete`. If you were using an API with standard RESTful methods, you can pass writes through to the data source like this: + +```javascript +class ThirdPartyAPI extends Resource { + async put(data) { + await fetch(`http:/some-api.com/${this.getId()}`, { + method: 'PUT', + body: JSON.stringify(data) + }); + } + async delete() { + await fetch(`http:/some-api.com/${this.getId()}`, { + method: 'DELETE', + }); + } + ... +``` + +When doing an insert or update to the MyCache table, the data will be sent to the underlying data source through the `put` method and the new record value will be stored in the cache as well. + +### Loading from Source in Methods + +When you are using a caching table, it is important to remember that any resource methods besides `get()`, will not automatically load data from the source. If you have defined a `put()`, `post()`, or `delete()` method and you need the source data, you can ensure it is loaded by calling the `ensureLoaded()` method. For example, if you want to modify the existing record from the source, adding a property to it: + +```javascript +class MyCache extends tables.MyCache { + async post(data) { + / if the data is not cached locally, retrieves from source: + await this.ensuredLoaded(); + / now we can be sure that the data is loaded, and can access properties + this.quantity = this.quantity - data.purchases; + } +} +``` + +### Subscribing to Caching Tables + +You can subscribe to a caching table just like any other table. The one difference is that normal tables do not usually have `invalidate` events, but an active caching table may have `invalidate` events. Again, this event type gives listeners an opportunity to choose whether or not to actually retrieve the value that changed. + +### Caching with Replication + +Caching tables can be configured to replicate in HarperDB clusters. When replicating caching tables, there are a couple of options. If each node will be separately connected to the data source and you do not need the subscription data notification events to replicate, you can set the `replicationSource` to `false`. In this case, only data requests (that come through standard requests like REST interface or operations API), will be replicated. However, if you data notification will only be delivered to a single node (at once) and you need the subscription data notification events to replicate, you can set the `replicationSource` to `true` and the incoming events from the subscription will be replicated to all other nodes: + +```javascript +MyTable.sourcedFrom(ThirdPartyAPI, { replicationSource: true }); +``` + +### Passive-Active Updates + +With our passive update examples, we have provided a data source handler with a `get()` method that returns the specific requested record as the response. However, we can also actively update other records in our response handler (if our data source provides data that should be propagated to other related records). This can be done transactionally, to ensure that all updates occur atomically. The context that is provided to the data source holds the transaction information, so we can simply pass the context to any update/write methods that we call. For example, let's say we are loading a blog post, which should also includes comment records: + +```javascript +const { Post, Comment } = tables; +class BlogSource extends Resource { + get() { + let post = await (await fetch(`http:/my-blog-server/${this.getId()}`).json()); + for (let comment of comments) { + await Comment.put(comment, this); / save this comment as part of our current context and transaction + } + return post; + } +} +Post.sourcedFrom(BlogSource); +``` + +Here both the update to the post and the update to the comments will be atomically/transactionally committed together with the same timestamp. + +## Cache-Control header + +When interacting with cached data, you can also use the `Cache-Control` request header to specify certain caching behaviors. When performing a PUT (or POST) method, you can use the `max-age` directive to indicate how long the resource should be cached (until stale): + +```http +PUT /my-resource/id +Cache-Control: max-age=86400 +``` + +You can use the `only-if-cached` directive on GET requests to only return a resource if it is cached (otherwise will return 504). Note, that if the entry is not cached, this will still trigger a request for the source data from the data source. If you do not want source data retrieved, you can add the `no-store` directive. You can also use the `no-cache` directive if you do not want to use the cached resource. If you wanted to check if there is a cached resource without triggering a request to the data source: + +```http +GET /my-resource/id +Cache-Control: only-if-cached, no-store +``` + +You may also use the `stale-if-error` to indicate if it is acceptable to return a stale cached resource when the data source returns an error (network connection error, 500, 502, 503, or 504). The `must-revalidate` directive can indicate a stale cached resource can not be returned, even when the data source has an error (by default a stale cached resource is returned when there is a network connection error). diff --git a/site/versioned_docs/version-4.2/developers/applications/debugging.md b/site/versioned_docs/version-4.2/developers/applications/debugging.md new file mode 100644 index 00000000..ca03115f --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/applications/debugging.md @@ -0,0 +1,39 @@ +--- +title: Debugging Applications +--- + +# Debugging Applications + +HarperDB components and applications run inside the HarperDB process, which is a standard Node.js process that can be debugged with standard JavaScript development tools like Chrome's devtools, VSCode, and WebStorm. Debugging can be performed by launching the HarperDB entry script with your IDE, or you can start HarperDB in dev mode and connect your debugger to the running process (defaults to standard 9229 port): + +``` +harperdb dev +# or to run and debug a specific app +harperdb dev /path/to/app +``` + +Once you have connected a debugger, you may set breakpoints in your application and fully debug it. Note that when using the `dev` command from the CLI, this will run HarperDB in single-threaded mode. This would not be appropriate for production use, but makes it easier to debug applications. + +For local debugging and development, it is recommended that you use standard console log statements for logging. For production use, you may want to use HarperDB's logging facilities, so you aren't logging to the console. The logging functions are available on the global `logger` variable that is provided by HarperDB. This logger can be used to output messages directly to the HarperDB log using standardized logging level functions, described below. The log level can be set in the [HarperDB Configuration File](../../deployments/configuration). + +HarperDB Logger Functions + +* `trace(message)`: Write a 'trace' level log, if the configured level allows for it. +* `debug(message)`: Write a 'debug' level log, if the configured level allows for it. +* `info(message)`: Write a 'info' level log, if the configured level allows for it. +* `warn(message)`: Write a 'warn' level log, if the configured level allows for it. +* `error(message)`: Write a 'error' level log, if the configured level allows for it. +* `fatal(message)`: Write a 'fatal' level log, if the configured level allows for it. +* `notify(message)`: Write a 'notify' level log. + +For example, you can log a warning: + +```javascript +logger.warn('You have been warned'); +``` + +If you want to ensure a message is logged, you can use `notify` as these messages will appear in the log regardless of log level configured. + +## Viewing the Log + +The HarperDB Log can be found in your local `~/hdb/log/hdb.log` file (or in the log folder if you have specified an alternate hdb root), or in the [Studio Status page](../../administration/harperdb-studio/instance-metrics). Additionally, you can use the [`read_log` operation](../operations-api/logs) to query the HarperDB log. diff --git a/site/versioned_docs/version-4.2/developers/applications/define-routes.md b/site/versioned_docs/version-4.2/developers/applications/define-routes.md new file mode 100644 index 00000000..222d14cf --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/applications/define-routes.md @@ -0,0 +1,118 @@ +--- +title: Define Fastify Routes +--- + +# Define Fastify Routes + +HarperDB’s applications provide an extension for loading [Fastify](https:/www.fastify.io/) routes as a way to handle endpoints. While we generally recommend building your endpoints/APIs with HarperDB's [REST interface](../rest) for better performance and standards compliance, Fastify's route can provide an extensive API for highly customized path handling. Below is a very simple example of a route declaration. + +The fastify route handler can be configured in your application's config.yaml (this is the default config if you used the [application template](https:/github.com/HarperDB/application-template)): + +```yaml +fastifyRoutes: # This loads files that define fastify routes using fastify's auto-loader + files: routes/*.js # specify the location of route definition modules + path: . # relative to the app-name, like http:/server/app-name/route-name +``` + +By default, route URLs are configured to be: + +* \[**Instance URL**]:\[**Custom Functions Port**]/\[**Project Name**]/\[**Route URL**] + +However, you can specify the path to be `/` if you wish to have your routes handling the root path of incoming URLs. + +* The route below, using the default config, within the **dogs** project, with a route of **breeds** would be available at **http:/localhost:9926/dogs/breeds**. + +In effect, this route is just a pass-through to HarperDB. The same result could have been achieved by hitting the core HarperDB API, since it uses **hdbCore.preValidation** and **hdbCore.request**, which are defined in the “helper methods” section, below. + +```javascript +export default async (server, { hdbCore, logger }) => { + server.route({ + url: '/', + method: 'POST', + preValidation: hdbCore.preValidation, + handler: hdbCore.request, + }) +} +``` + +## Custom Handlers + +For endpoints where you want to execute multiple operations against HarperDB, or perform additional processing (like an ML classification, or an aggregation, or a call to a 3rd party API), you can define your own logic in the handler. The function below will execute a query against the dogs table, and filter the results to only return those dogs over 4 years in age. + +**IMPORTANT: This route has NO preValidation and uses hdbCore.requestWithoutAuthentication, which- as the name implies- bypasses all user authentication. See the security concerns and mitigations in the “helper methods” section, below.** + +```javascript +export default async (server, { hdbCore, logger }) => { + server.route({ + url: '/:id', + method: 'GET', + handler: (request) => { + request.body= { + operation: 'sql', + sql: `SELECT * FROM dev.dog WHERE id = ${request.params.id}` + }; + + const result = await hdbCore.requestWithoutAuthentication(request); + return result.filter((dog) => dog.age > 4); + } + }); +} +``` + +## Custom preValidation Hooks + +The simple example above was just a pass-through to HarperDB- the exact same result could have been achieved by hitting the core HarperDB API. But for many applications, you may want to authenticate the user using custom logic you write, or by conferring with a 3rd party service. Custom preValidation hooks let you do just that. + +Below is an example of a route that uses a custom validation hook: + +```javascript +import customValidation from '../helpers/customValidation'; + +export default async (server, { hdbCore, logger }) => { + server.route({ + url: '/:id', + method: 'GET', + preValidation: (request) => customValidation(request, logger), + handler: (request) => { + request.body= { + operation: 'sql', + sql: `SELECT * FROM dev.dog WHERE id = ${request.params.id}` + }; + + return hdbCore.requestWithoutAuthentication(request); + } + }); +} +``` + +Notice we imported customValidation from the **helpers** directory. To include a helper, and to see the actual code within customValidation, see [Helper Methods](#helper-methods). + +## Helper Methods + +When declaring routes, you are given access to 2 helper methods: hdbCore and logger. + +**hdbCore** + +hdbCore contains three functions that allow you to authenticate an inbound request, and execute operations against HarperDB directly, by passing the standard Operations API. + +* **preValidation** + + This is an array of functions used for fastify authentication. The second function takes the authorization header from the inbound request and executes the same authentication as the standard HarperDB Operations API (for example, `hdbCore.preValidation[1](./req, resp, callback)`). It will determine if the user exists, and if they are allowed to perform this operation. **If you use the request method, you have to use preValidation to get the authenticated user**. +* **request** + + This will execute a request with HarperDB using the operations API. The `request.body` should contain a standard HarperDB operation and must also include the `hdb_user` property that was in `request.body` provided in the callback. +* **requestWithoutAuthentication** + + Executes a request against HarperDB without any security checks around whether the inbound user is allowed to make this request. For security purposes, you should always take the following precautions when using this method: + + * Properly handle user-submitted values, including url params. User-submitted values should only be used for `search_value` and for defining values in records. Special care should be taken to properly escape any values if user-submitted values are used for SQL. + +**logger** + +This helper allows you to write directly to the log file, hdb.log. It’s useful for debugging during development, although you may also use the console logger. There are 5 functions contained within logger, each of which pertains to a different **logging.level** configuration in your harperdb-config.yaml file. + +* logger.trace(‘Starting the handler for /dogs’) +* logger.debug(‘This should only fire once’) +* logger.warn(‘This should never ever fire’) +* logger.error(‘This did not go well’) +* logger.fatal(‘This did not go very well at all’) diff --git a/site/versioned_docs/version-4.2/developers/applications/defining-schemas.md b/site/versioned_docs/version-4.2/developers/applications/defining-schemas.md new file mode 100644 index 00000000..d1204f15 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/applications/defining-schemas.md @@ -0,0 +1,103 @@ +--- +title: Defining Schemas +--- + +# Defining Schemas + +Schemas define tables and their attributes. Schemas can be declaratively defined in HarperDB's using GraphQL schema definitions. Schemas definitions can be used to ensure that tables exist (that are required for applications), and have the appropriate attributes. Schemas can define the primary key, data types for attributes, if they are required, and specify which attributes should be indexed. The [introduction to applications provides](./) a helpful introduction to how to use schemas as part of database application development. + +Schemas can be used to define the expected structure of data, but are also highly flexible and support heterogeneous data structures and by default allows data to include additional properties. The standard types for GraphQL schemas are specified in the [GraphQL schema documentation](https:/graphql.org/learn/schema/). + +An example schema that defines a couple tables might look like: + +```graphql +# schema.graphql: +type Dog @table { + id: ID @primaryKey + name: String + breed: String + age: Int +} + +type Breed @table { + id: ID @primaryKey +} +``` + +In this example, you can see that we specified the expected data structure for records in the Dog and Breed table. For example, this will enforce that Dog records are required to have a `name` property with a string (or null, unless the type were specified to be non-nullable). This does not preclude records from having additional properties (see `@sealed` for preventing additional properties. For example, some Dog records could also optionally include a `favoriteTrick` property. + +In this page, we will describe the specific directives that HarperDB uses for defining tables and attributes in a schema. + +### Type Directives + +#### `@table` + +The schema for tables are defined using GraphQL type definitions with a `@table` directive: + +```graphql +type TableName @table +``` + +By default the table name is inherited from the type name (in this case the table name would be "TableName"). The `@table` directive supports several optional arguments (all of these are optional and can be freely combined): + +* `@table(table: "table_name")` - This allows you to explicitly specify the table name. +* `@table(database: "database_name")` - This allows you to specify which database the table belongs to. This defaults to the "data" database. +* `@table(expiration: 3600)` - Sets an expiration time on entries in the table before they are automatically cleared (primarily useful for caching tables). This is specified in seconds. +* `@table(audit: true)` - This enables the audit log for the table so that a history of record changes are recorded. This defaults to [configuration file's setting for `auditLog`](../../deployments/configuration#logging). + +#### `@export` + +This indicates that the specified table should be exported as a resource that is accessible as an externally available endpoints, through REST, MQTT, or any of the external resource APIs. + +This directive also accepts a `name` parameter to specify the name that should be used for the exported resource (how it will appear in the URL path). For example: + +``` +type MyTable @table @export(name: "my-table") +``` + +This table would be available at the URL path `/my-table/`. Without the `name` parameter, the exported name defaults to the name of the table type ("MyTable" in this example). + +#### `@sealed` + +The `@sealed` directive specifies that no additional properties should be allowed on records besides though specified in the type itself. + +### Field Directives + +The field directives can be used for information about each attribute in table type definition. + +#### `@primaryKey` + +The `@primaryKey` directive specifies that an attribute is the primary key for a table. These must be unique and when records are created, this will be auto-generated with a UUID if no primary key is provided. + +#### `@indexed` + +The `@indexed` directive specifies that an attribute should be indexed. This is necessary if you want to execute queries using this attribute (whether that is through RESTful query parameters, SQL, or NoSQL operations). + +#### `@createdTime` + +The `@createdTime` directive indicates that this property should be assigned a timestamp of the creation time of the record (in epoch milliseconds). + +#### `@updatedTime` + +The `@updatedTime` directive indicates that this property should be assigned a timestamp of each updated time of the record (in epoch milliseconds). + +### Defined vs Dynamic Schemas + +If you do not define a schema for a table and create a table through the operations API (without specifying attributes) or studio, such a table will not have a defined schema and will follow the behavior of a ["dynamic-schema" table](../../technical-details/reference/dynamic-schema). It is generally best-practice to define schemas for your tables to ensure predictable, consistent structures with data integrity. + +### Field Types + +HarperDB supports the following field types in addition to user defined (object) types: + +* String: String/text. +* Int: A 32-bit signed integer (from -2147483648 to 2147483647). +* Long: A 54-bit signed integer (from -9007199254740992 to 9007199254740992). +* Float: Any number (any number that can be represented as a [64-bit double precision floating point number](https:/en.wikipedia.org/wiki/Double-precision\_floating-point\_format). Note that all numbers are stored in the most compact representation available). +* Boolean: true or false. +* ID: A string (but indicates it is not intended to be legible). +* Any: Any primitive, object, or array is allowed. +* Date: A Date object. + +#### Renaming Tables + +It is important to note that HarperDB does not currently support renaming tables. If you change the name of a table in your schema definition, this will result in the creation of a new, empty table. diff --git a/site/versioned_docs/version-4.2/developers/applications/example-projects.md b/site/versioned_docs/version-4.2/developers/applications/example-projects.md new file mode 100644 index 00000000..2eb92ba4 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/applications/example-projects.md @@ -0,0 +1,37 @@ +--- +title: Example Projects +--- + +# Example Projects + +**Library of example HarperDB applications and components:** + +* [Authorization in HarperDB using Okta Customer Identity Cloud](https:/www.harperdb.io/post/authorization-in-harperdb-using-okta-customer-identity-cloud), by Yitaek Hwang + +* [How to Speed Up your Applications by Caching at the Edge with HarperDB](https:/dev.to/doabledanny/how-to-speed-up-your-applications-by-caching-at-the-edge-with-harperdb-3o2l), by Danny Adams + +* [OAuth Authentication in HarperDB using Auth0 & Node.js](https:/www.harperdb.io/post/oauth-authentication-in-harperdb-using-auth0-and-node-js), by Lucas Santos + +* [How To Create a CRUD API with Next.js & HarperDB Custom Functions](https:/www.harperdb.io/post/create-a-crud-api-w-next-js-harperdb), by Colby Fayock + +* [Build a Dynamic REST API with Custom Functions](https:/harperdb.io/blog/build-a-dynamic-rest-api-with-custom-functions/), by Terra Roush + +* [How to use HarperDB Custom Functions to Build your Entire Backend](https:/dev.to/andrewbaisden/how-to-use-harperdb-custom-functions-to-build-your-entire-backend-a2m), by Andrew Baisden + +* [Using TensorFlowJS & HarperDB Custom Functions for Machine Learning](https:/harperdb.io/blog/using-tensorflowjs-harperdb-for-machine-learning/), by Kevin Ashcraft + +* [Build & Deploy a Fitness App with Python & HarperDB](https:/www.youtube.com/watch?v=KMkmA4i2FQc), by Patrick Löber + +* [Create a Discord Slash Bot using HarperDB Custom Functions](https:/geekysrm.hashnode.dev/discord-slash-bot-with-harperdb-custom-functions), by Soumya Ranjan Mohanty + +* [How I used HarperDB Custom Functions to Build a Web App for my Newsletter](https:/blog.hrithwik.me/how-i-used-harperdb-custom-functions-to-build-a-web-app-for-my-newsletter), by Hrithwik Bharadwaj + +* [How I used HarperDB Custom Functions and Recharts to create Dashboard](https:/blog.greenroots.info/how-to-create-dashboard-with-harperdb-custom-functions-and-recharts), by Tapas Adhikary + +* [How To Use HarperDB Custom Functions With Your React App](https:/dev.to/tyaga001/how-to-use-harperdb-custom-functions-with-your-react-app-2c43), by Ankur Tyagi + +* [Build a Web App Using HarperDB’s Custom Functions](https:/www.youtube.com/watch?v=rz6prItVJZU), livestream by Jaxon Repp + +* [How to Web Scrape Using Python, Snscrape & Custom Functions](https:/hackernoon.com/how-to-web-scrape-using-python-snscrape-and-harperdb), by Davis David + +* [What’s the Big Deal w/ Custom Functions](https:/rss.com/podcasts/harperdb-select-star/278933/), Select* Podcast \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/developers/applications/index.md b/site/versioned_docs/version-4.2/developers/applications/index.md new file mode 100644 index 00000000..bad0c09f --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/applications/index.md @@ -0,0 +1,375 @@ +--- +title: Applications +--- + +# Applications + +## Overview of HarperDB Applications + +HarperDB is more than a database, it's a distributed clustering platform allowing you to package your schema, endpoints and application logic and deploy them to an entire fleet of HarperDB instances optimized for on-the-edge scalable data delivery. + +In this guide, we are going to explore the evermore extensible architecture that HarperDB 4.2 and greater provides by building a HarperDB component, a fundamental building-block of the HarperDB ecosystem. + +When working through this guide, we recommend you use the [HarperDB Application Template](https:/github.com/HarperDB/application-template) repo as a reference. + +## Understanding the Component Application Architecture + +HarperDB provides several types of components. Any package that is added to HarperDB is called a "component", and components are generally categorized as either "applications", which deliver a set of endpoints for users, or "extensions", which are building blocks for features like authentication, additional protocols, and connectors that can be used by other components. Components can be added to the `hdb/components` directory and will be loaded by HarperDB when it starts. Components that are remotely deployed to HarperDB (through the studio or the operation API) are installed into the hdb/node\_modules directory. Using `harperdb run .` or `harperdb dev .` allows us to specifically load a certain application in addition to any that have been manually added to `hdb/components` or installed in `node_modules`. + +```mermaid +flowchart LR + Client(Client)-->Endpoints + Client(Client)-->HTTP + Client(Client)-->Extensions + subgraph HarperDB + direction TB + Applications(Applications)-- "Schemas" --> Tables[(Tables)] + Applications-->Endpoints[/Custom Endpoints/] + Applications-->Extensions + Endpoints-->Tables + HTTP[/REST/HTTP/]-->Tables + Extensions[/Extensions/]-->Tables + end +``` + +## Getting up and Running + +### Pre-Requisites + +We assume you are running HarperDB version 4.2 or greater, which supports HarperDB Application architecture (in previous versions, this is 'custom functions'). + +### Scaffolding our Application Directory + +Let's create and initialize a new directory for our application. It is recommended that you start by using the [HarperDB application template](https:/github.com/HarperDB/application-template). Assuming you have `git` installed, you can create your project directory by cloning: + +```shell +> git clone https:/github.com/HarperDB/application-template my-app +> cd my-app +``` + +
+ +You can also start with an empty application directory if you'd prefer. + +To create your own application from scratch, you'll may want to initialize it as an npm package with the \`type\` field set to \`module\` in the \`package.json\` so that you can use the EcmaScript module syntax used in this tutorial: + +```shell +> mkdir my-app +> cd my-app +> npm init -y esnext +``` + +
+ +
+ +If you want to version control your application code, you can adjust the remote URL to your repository. + +Here's an example for a github repo: + +```shell +> git remote set-url origin git@github.com:// +``` +Locally developing your application and then committing your app to a source control is a great way to manage your code and configuration, and then you can [directly deploy from your repository](#deploying-your-application). + +
+ +## Creating our first Table + +The core of a HarperDB application is the database, so let's create a database table! + +A quick and expressive way to define a table is through a [GraphQL Schema](https:/graphql.org/learn/schema). Using your editor of choice, edit the file named `schema.graphql` in the root of the application directory, `my-app`, that we created above. To create a table, we will need to add a `type` of `@table` named `Dog` (and you can remove the example table in the template): + +```graphql +type Dog @table { + # properties will go here soon +} +``` + +And then we'll add a primary key named `id` of type `ID`: + +_(Note: A GraphQL schema is a fast method to define tables in HarperDB, but you are by no means required to use GraphQL to query your application, nor should you necessarily do so)_ + +```graphql +type Dog @table { + id: ID @primaryKey +} +``` + +Now we tell HarperDB to run this as an application: + +```shell +> harperdb dev . # tell HarperDB cli to run current directory as an application in dev mode +``` + +HarperDB will now create the `Dog` table and its `id` attribute we just defined. Not only is this an easy way to get create a table, but this schema is included in our application, which will ensure that this table exists wherever we deploy this application (to any HarperDB instance). + +## Adding Attributes to our Table + +Next, let's expand our `Dog` table by adding additional typed attributes for dog `name`, `breed` and `age`. + +```graphql +type Dog @table { + id: ID @primaryKey + name: String + breed: String + age: Int +} +``` + +This will ensure that new records must have these properties with these types. + +Because we ran `harperdb dev .` earlier (dev mode), HarperDB is now monitoring the contents of our application directory for changes and reloading when they occur. This means that once we save our schema file with these new attributes, HarperDB will automatically reload our application, read `my-app/schema.graphql` and update the `Dog` table and attributes we just defined. The dev mode will also ensure that any logging or errors are immediately displayed in the console (rather only in the log file). + +As a NoSQL database, HarperDB supports heterogeneous records (also referred to as documents), so you can freely specify additional properties on any record. If you do want to restrict the records to only defined properties, you can always do that by adding the `sealed` directive: + +```graphql +type Dog @table @sealed { + id: ID @primaryKey + name: String + breed: String + age: Int + tricks: [String] +} +``` + +If you are using HarperDB Studio, we can now [add JSON-formatted records](../../administration/harperdb-studio/manage-schemas-browse-data#add-a-record) to this new table in the studio or upload data as [CSV from a local file or URL](../../administration/harperdb-studio/manage-schemas-browse-data#load-csv-data). A third, more advanced, way to add data to your database is to use the [operations API](../operations-api/), which provides full administrative control over your new HarperDB instance and tables. + +## Adding an Endpoint + +Now that we have a running application with a database (with data if you imported any data), let's make this data accessible from a RESTful URL by adding an endpoint. To do this, we simply add the `@export` directive to our `Dog` table: + +```graphql +type Dog @table @export { + id: ID @primaryKey + name: String + breed: String + age: Int + tricks: [String] +} +``` + +By default the application HTTP server port is `9926` (this can be [configured here](../../deployments/configuration#http)), so the local URL would be [http:/localhost:9926/Dog/](http:/localhost:9926/Dog/) with a full REST API. We can PUT or POST data into this table using this new path, and then GET or DELETE from it as well (you can even view data directly from the browser). If you have not added any records yet, we could use a PUT or POST to add a record. PUT is appropriate if you know the id, and POST can be used to assign an id: + +```http +POST /Dog/ +Content-Type: application/json + +{ + "name": "Harper", + "breed": "Labrador", + "age": 3, + "tricks": ["sits"] +} +``` + +With this a record will be created and the auto-assigned id will be available through the `Location` header. If you added a record, you can visit the path `/Dog/` to view that record. Alternately, the curl command `curl http:/localhost:9926/Dog/` will achieve the same thing. + +## Authenticating Endpoints + +These endpoints automatically support `Basic`, `Cookie`, and `JWT` authentication methods. See the documentation on [security](../security/) for more information on different levels of access. + +By default, HarperDB also automatically authorizes all requests from loopback IP addresses (from the same computer) as the superuser, to make it simple to interact for local development. If you want to test authentication/authorization, or enforce stricter security, you may want to disable the [`authentication.authorizeLocal` setting](../../deployments/configuration#authentication). + +### Content Negotiation + +These endpoints support various content types, including `JSON`, `CBOR`, `MessagePack` and `CSV`. Simply include an `Accept` header in your requests with the preferred content type. We recommend `CBOR` as a compact, efficient encoding with rich data types, but `JSON` is familiar and great for web application development, and `CSV` can be useful for exporting data to spreadsheets or other processing. + +HarperDB works with other important standard HTTP headers as well, and these endpoints are even capable of caching interaction: + +``` +Authorization: Basic +Accept: application/cbor +If-None-Match: "etag-id" # browsers can automatically provide this +``` + +## Querying + +Querying your application database is straightforward and easy, as tables exported with the `@export` directive are automatically exposed via [REST endpoints](../rest). Simple queries can be crafted through [URL query parameters](https:/en.wikipedia.org/wiki/Query\_string). + +In order to maintain reasonable query speed on a database as it grows in size, it is critical to select and establish the proper indexes. So, before we add the `@export` declaration to our `Dog` table and begin querying it, let's take a moment to target some table properties for indexing. We'll use `name` and `breed` as indexed table properties on our `Dog` table. All we need to do to accomplish this is tag these properties with the `@indexed` directive: + +```graphql +type Dog @table { + id: ID @primaryKey + name: String @indexed + breed: String @indexed + owner: String + age: Int + tricks: [String] +} +``` + +And finally, we'll add the `@export` directive to expose the table as a RESTful endpoint + +```graphql +type Dog @table @export { + id: ID @primaryKey + name: String @indexed + breed: String @indexed + owner: String + age: Int + tricks: [String] +} +``` + +Now we can start querying. Again, we just simply access the endpoint with query parameters (basic GET requests), like: + +``` +http:/localhost:9926/Dog/?name=Harper +http:/localhost:9926/Dog/?breed=Labrador +http:/localhost:9926/Dog/?breed=Husky&name=Balto&select=id,name,breed +``` + +Congratulations, you now have created a secure database application backend with a table, a well-defined structure, access controls, and a functional REST endpoint with query capabilities! See the [REST documentation for more information on HTTP access](../rest) and see the [Schema reference](./defining-schemas) for more options for defining schemas. + +## Deploying your Application + +This guide assumes that you're building a HarperDB application locally. If you have a cloud instance available, you can deploy it by doing the following: + +* Commit and push your application component directory code (i.e., the `my-app` directory) to a Github repo. In this tutorial we started with a clone of the application-template. To commit and push to your own repository, change the origin to your repo: `git remote set-url origin git@github.com:your-account/your-repo.git` +* Go to the applications section of your target cloud instance in the [HarperDB Studio](https:/studio.harperdb.io) +* In the left-hand menu of the applications IDE, click 'deploy' and specify a package location reference that follows the [npm package specification](https:/docs.npmjs.com/cli/v8/using-npm/package-spec) (i.e., a string like `HarperDB/Application-Template` or a URL like `https:/github.com/HarperDB/application-template`, for example, that npm knows how to install). + +You can also deploy your application from your repository by directly using the [`deploy_component` operation](../operations-api/components#deploy-component). + +Once you have deployed your application to a HarperDB cloud instance, you can start scaling your application by adding additional instances in other regions. + +With the help of a global traffic manager/load balancer configured, you can distribute incoming requests to the appropriate server. You can deploy and re-deploy your application to all the nodes in your mesh. + +Now, with an application that you can deploy, update, and re-deploy, you have an application that is horizontally and globally scalable! + +## Custom Functionality with JavaScript + +So far we have built an application entirely through schema configuration. However, if your application requires more custom functionality, you will probably want to employ your own JavaScript modules to implement more specific features and interactions. This gives you tremendous flexibility and control over how data is accessed and modified in HarperDB. Let's take a look at how we can use JavaScript to extend and define "resources" for custom functionality. Let's add a property to the dog records when they are returned, that includes their age in human years. In HarperDB, data is accessed through our [Resource API](../../technical-details/reference/resource), a standard interface to access data sources, tables, and make them available to endpoints. Database tables are `Resource` classes, and so extending the function of a table is as simple as extending their class. + +To define custom (JavaScript) resources as endpoints, we need to create a `resources.js` module (this goes in the root of your application folder). And then endpoints can be defined with Resource classes that `export`ed. This can be done in addition to, or in lieu of the `@export`ed types in the schema.graphql. If you are exporting and extending a table you defined in the schema make sure you remove the `@export` from the schema so that don't export the original table or resource to the same endpoint/path you are exporting with a class. Resource classes have methods that correspond to standard HTTP/REST methods, like `get`, `post`, `patch`, and `put` to implement specific handling for any of these methods (for tables they all have default implementations). To do this, we get the `Dog` class from the defined tables, extend it, and export it: + +```javascript +/ resources.js: +const { Dog } = tables; / get the Dog table from the HarperDB provided set of tables (in the default database) + +export class DogWithHumanAge extends Dog { + get(query) { + this.humanAge = 15 + this.age * 5; / silly calculation of human age equivalent + return super.get(query); + } +} +``` + +Here we exported the `DogWithHumanAge` class (exported with the same name), which directly maps to the endpoint path. Therefore, now we have a `/DogWithHumanAge/` endpoint based on this class, just like the direct table interface that was exported as `/Dog/`, but the new endpoint will return objects with the computed `humanAge` property. Resource classes provide getters/setters for every defined attribute so that accessing instance properties like `age`, will get the value from the underlying record. And changing or assigning new properties can be saved or included in the resource as it returned and serialized. The `return super.get(query)` call at the end allows for any query parameters to be applied to the resource, such as selecting individual properties (with a [`select` query parameter](../rest#select-properties)). + +Often we may want to incorporate data from other tables or data sources in your data models. Next, let's say that we want a `Breed` table that holds detailed information about each breed, and we want to add that information to the returned dog object. We might define the Breed table as (back in schema.graphql): + +```graphql +type Breed @table { + name: String @primaryKey + description: String @indexed + lifespan: Int + averageWeight: Float +} +``` + +And next we will use this table in our `get()` method. We will call the new table's (static) `get()` method to retrieve a breed by id. To do this correctly, we access the table using our current context by passing in `this` as the second argument. This is important because it ensures that we are accessing the data atomically, in a consistent snapshot across tables. This provides automatically tracking of most recently updated timestamps across resources for caching purposes. This allows for sharing of contextual metadata (like user who requested the data), and ensure transactional atomicity for any writes (not needed in this get operation, but important for other operations). The resource methods are automatically wrapped with a transaction (will commit/finish when the method completes), and this allows us to fully utilize multiple resources in our current transaction. With our own snapshot of the database for the Dog and Breed table we can then access data like this: + +```javascript +/resource.js: +const { Dog, Breed } = tables; / get the Breed table too +export class DogWithBreed extends Dog { + async get(query) { + let breedDescription = await Breed.get(this.breed, this); + this.breedDescription = breedDescription; + return super.get(query); + } +} +``` + +The call to `Breed.get` will return an instance of the `Breed` resource class, which holds the record specified the provided id/primary key. Like the `Dog` instance, we can access or change properties on the Breed instance. + +Here we have focused on customizing how we retrieve data, but we may also want to define custom actions for writing data. While HTTP PUT method has a specific semantic definition (replace current record), a common method for custom actions is through the HTTP POST method. the POST method has much more open-ended semantics and is a good choice for custom actions. POST requests are handled by our Resource's post() method. Let's say that we want to define a POST handler that adds a new trick to the `tricks` array to a specific instance. We might do it like this, and specify an action to be able to differentiate actions: + +```javascript +export class CustomDog extends Dog { + async post(data) { + if (data.action === 'add-trick') + this.tricks.push(data.trick); + } +} +``` + +And a POST request to /CustomDog/ would call this `post` method. The Resource class then automatically tracks changes you make to your resource instances and saves those changes when this transaction is committed (again these methods are automatically wrapped in a transaction and committed once the request handler is finished). So when you push data on to the `tricks` array, this will be recorded and persisted when this method finishes and before sending a response to the client. + +The `post` method automatically marks the current instance as being update. However, you can also explicitly specify that you are changing a resource by calling the `update()` method. If you want to modify a resource instance that you retrieved through a `get()` call (like `Breed.get()` call above), you can call its `update()` method to ensure changes are saved (and will be committed in the current transaction). + +We can also define custom authorization capabilities. For example, we might want to specify that only the owner of a dog can make updates to a dog. We could add logic to our `post` method or `put` method to do this, but we may want to separate the logic so these methods can be called separately without authorization checks. The [Resource API](../../technical-details/reference/resource) defines `allowRead`, `allowUpdate`, `allowCreate`, and `allowDelete`, or to easily configure individual capabilities. For example, we might do this: + +```javascript +export class CustomDog extends Dog { + allowUpdate(user) { + return this.owner === user.username; + } +} +``` + +Any methods that are not defined will fall back to HarperDB's default authorization procedure based on users' roles. If you are using/extending a table, this is based on HarperDB's [role based access](../security/users-and-roles). If you are extending the base `Resource` class, the default access requires super user permission. + +You can also use the `default` export to define the root path resource handler. For example: + +```javascript +/ resources.json +export default class CustomDog extends Dog { + ... +``` + +This will allow requests to url like / to be directly resolved to this resource. + +## Define Custom Data Sources + +We can also directly implement the Resource class and use it to create new data sources from scratch that can be used as endpoints. Custom resources can also be used as caching sources. Let's say that we defined a `Breed` table that was a cache of information about breeds from another source. We could implement a caching table like: + +```javascript +const { Breed } = tables; / our Breed table +class BreedSource extends Resource { / define a data source + async get() { + return (await fetch(`http:/best-dog-site.com/${this.getId()}`)).json(); + } +} +/ define that our breed table is a cache of data from the data source above, with a specified expiration +Breed.sourcedFrom(BreedSource, { expiration: 3600 }); +``` + +The [caching documentation](./caching) provides much more information on how to use HarperDB's powerful caching capabilities and set up data sources. + +HarperDB provides a powerful JavaScript API with significant capabilities that go well beyond a "getting started" guide. See our documentation for more information on using the [`globals`](../../technical-details/reference/globals) and the [Resource interface](../../technical-details/reference/resource). + +## Configuring Applications/Components + +Every application or component can define their own configuration in a `config.yaml`. If you are using the application template, you will have a [default configuration in this config file](https:/github.com/HarperDB/application-template/blob/main/config.yaml) (which is default configuration if no config file is provided). Within the config file, you can configure how different files and resources are loaded and handled. The default configuration file itself is documented with directions. Each entry can specify any `files` that the loader will handle, and can also optionally specify what, if any, URL `path`s it will handle. A path of `/` means that the root URLs are handled by the loader, and a path of `.` indicates that the URLs that start with this application's name are handled. + +This config file allows you define a location for static files, as well (that are directly delivered as-is for incoming HTTP requests). + +Each configuration entry can have the following properties, in addition to properties that may be specific to the individual component: + +* `files`: This specifies the set of files that should be handled the component. This is a glob pattern, so a set of files can be specified like "directory/**". +* `path`: This is the URL path that is handled by this component. +* `root`: This specifies the root directory for mapping file paths to the URLs. For example, if you want all the files in `web/**` to be available in the root URL path via the static handler, you could specify a root of `web`, to indicate that the web directory maps to the root URL path. +* `package`: This is used to specify that this component is a third party package, and can be loaded from the specified package reference (which can be an NPM package, Github reference, URL, etc.). + +## Define Fastify Routes + +Exporting resource will generate full RESTful endpoints. But, you may prefer to define endpoints through a framework. HarperDB includes a resource plugin for defining routes with the Fastify web framework. Fastify is a full-featured framework with many plugins, that provides sophisticated route definition capabilities. + +By default, applications are configured to load any modules in the `routes` directory (matching `routes/*.js`) with Fastify's autoloader, which will allow these modules to export a function to define fastify routes. See the [defining routes documentation](./define-routes) for more information on how to create Fastify routes. + +However, Fastify is not as fast as HarperDB's RESTful endpoints (about 10%-20% slower/more-overhead), nor does it automate the generation of a full uniform interface with correct RESTful header interactions (for caching control), so generally the HarperDB's REST interface is recommended for optimum performance and ease of use. + +## Restarting Your Instance + +Generally, HarperDB will auto-detect when files change and auto-restart the appropriate threads. However, if there are changes that aren't detected, you may manually restart, with the `restart_service` operation: + +```json +{ + "operation": "restart_service", + "service": "http_workers" +} +``` diff --git a/site/versioned_docs/version-4.2/developers/clustering/certificate-management.md b/site/versioned_docs/version-4.2/developers/clustering/certificate-management.md new file mode 100644 index 00000000..58243cb7 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/clustering/certificate-management.md @@ -0,0 +1,70 @@ +--- +title: Certificate Management +--- + +# Certificate Management + +## Development + +Out of the box HarperDB generates certificates that are used when HarperDB nodes are clustered together to securely share data between nodes. These certificates are meant for testing and development purposes. Because these certificates do not have Common Names (CNs) that will match the Fully Qualified Domain Name (FQDN) of the HarperDB node, the following settings (see the full [configuration file](../../deployments/configuration) docs for more details) are defaulted & recommended for ease of development: + +``` +clustering: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem + insecure: true + verify: true +``` + +The certificates that HarperDB generates are stored in your `/keys/`. + +`insecure` is set to `true` to accept the certificate CN mismatch due to development certificates. + +`verify` is set to `true` to enable mutual TLS between the nodes. + +## Production + +In a production environment, we recommend using your own certificate authority (CA), or a public CA such as LetsEncrypt to generate certs for your HarperDB cluster. This will let you generate certificates with CNs that match the FQDN of your nodes. + +Once you generate new certificates, to make HarperDB start using them you can either replace the generated files with your own, or update the configuration to point to your new certificates, and then restart HarperDB. + +Since these new certificates can be issued with correct CNs, you should set `insecure` to `false` so that nodes will do full validation of the certificates of the other nodes. + +### Certificate Requirements + +* Certificates must have an `Extended Key Usage` that defines both `TLS Web Server Authentication` and `TLS Web Client Authentication` as these certificates will be used to accept connections from other HarperDB nodes and to make requests to other HarperDB nodes. Example: + +``` +X509v3 Key Usage: critical + Digital Signature, Key Encipherment +X509v3 Extended Key Usage: + TLS Web Server Authentication, TLS Web Client Authentication +``` + +* If you are using an intermediate CA to issue the certificates, the entire certificate chain (to the root CA) must be included in the `certificateAuthority` file. +* If your certificates expire you will need a way to issue new certificates to the nodes and then restart HarperDB. If you are using a public CA such as LetsEncrypt, a tool like `certbot` can be used to renew certificates. + +### Certificate Troubleshooting + +If you are having TLS issues with clustering, use the following steps to verify that your certificates are valid. + +1. Make sure certificates can be parsed and that you can view the contents: + +``` +openssl x509 -in .pem -noout -text` +``` + +1. Make sure the certificate validates with the CA: + +``` +openssl verify -CAfile .pem .pem` +``` + +1. Make sure the certificate and private key are a valid pair by verifying that the output of the following commands match: + +``` +openssl rsa -modulus -noout -in .pem | openssl md5 +openssl x509 -modulus -noout -in .pem | openssl md5 +``` diff --git a/site/versioned_docs/version-4.2/developers/clustering/creating-a-cluster-user.md b/site/versioned_docs/version-4.2/developers/clustering/creating-a-cluster-user.md new file mode 100644 index 00000000..3edecd29 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/clustering/creating-a-cluster-user.md @@ -0,0 +1,59 @@ +--- +title: Creating a Cluster User +--- + +# Creating a Cluster User + +Inter-node authentication takes place via HarperDB users. There is a special role type called `cluster_user` that exists by default and limits the user to only clustering functionality. + +A `cluster_user` must be created and added to the `harperdb-config.yaml` file for clustering to be enabled. + +All nodes that are intended to be clustered together need to share the same `cluster_user` credentials (i.e. username and password). + +There are multiple ways a `cluster_user` can be created, they are: + +1. Through the operations API by calling `add_user` + +```json +{ + "operation": "add_user", + "role": "cluster_user", + "username": "cluster_account", + "password": "letsCluster123!", + "active": true +} +``` + +When using the API to create a cluster user the `harperdb-config.yaml` file must be updated with the username of the new cluster user. + +This can be done through the API by calling `set_configuration` or by editing the `harperdb-config.yaml` file. + +```json +{ + "operation": "set_configuration", + "clustering_user": "cluster_account" +} +``` + +In the `harperdb-config.yaml` file under the top-level `clustering` element there will be a user element. Set this to the name of the cluster user. + +```yaml +clustering: + user: cluster_account +``` + +_Note: When making any changes to the `harperdb-config.yaml` file, HarperDB must be restarted for the changes to take effect._ + +1. Upon installation using **command line variables**. This will automatically set the user in the `harperdb-config.yaml` file. + +_Note: Using command line or environment variables for setting the cluster user only works on install._ + +``` +harperdb install --CLUSTERING_USER cluster_account --CLUSTERING_PASSWORD letsCluster123! +``` + +1. Upon installation using **environment variables**. This will automatically set the user in the `harperdb-config.yaml` file. + +``` +CLUSTERING_USER=cluster_account CLUSTERING_PASSWORD=letsCluster123 +``` diff --git a/site/versioned_docs/version-4.2/developers/clustering/enabling-clustering.md b/site/versioned_docs/version-4.2/developers/clustering/enabling-clustering.md new file mode 100644 index 00000000..6b563b19 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/clustering/enabling-clustering.md @@ -0,0 +1,49 @@ +--- +title: Enabling Clustering +--- + +# Enabling Clustering + +Clustering does not run by default; it needs to be enabled. + +To enable clustering the `clustering.enabled` configuration element in the `harperdb-config.yaml` file must be set to `true`. + +There are multiple ways to update this element, they are: + +1. Directly editing the `harperdb-config.yaml` file and setting enabled to `true` + +```yaml +clustering: + enabled: true +``` + +_Note: When making any changes to the `harperdb-config.yaml` file HarperDB must be restarted for the changes to take effect._ + +1. Calling `set_configuration` through the operations API + +```json +{ + "operation": "set_configuration", + "clustering_enabled": true +} +``` + +_Note: When making any changes to HarperDB configuration HarperDB must be restarted for the changes to take effect._ + +1. Using **command line variables**. + +``` +harperdb --CLUSTERING_ENABLED true +``` + +1. Using **environment variables**. + +``` +CLUSTERING_ENABLED=true +``` + +An efficient way to **install HarperDB**, **create the cluster user**, **set the node name** and **enable clustering** in one operation is to combine the steps using command line and/or environment variables. Here is an example using command line variables. + +``` +harperdb install --CLUSTERING_ENABLED true --CLUSTERING_NODENAME Node1 --CLUSTERING_USER cluster_account --CLUSTERING_PASSWORD letsCluster123! +``` diff --git a/site/versioned_docs/version-4.2/developers/clustering/establishing-routes.md b/site/versioned_docs/version-4.2/developers/clustering/establishing-routes.md new file mode 100644 index 00000000..915c2844 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/clustering/establishing-routes.md @@ -0,0 +1,73 @@ +--- +title: Establishing Routes +--- + +# Establishing Routes + +A route is a connection between two nodes. It is how the clustering network is established. + +Routes do not need to cross connect all nodes in the cluster. You can select a leader node or a few leaders and all nodes connect to them, you can chain, etc… As long as there is one route connecting a node to the cluster all other nodes should be able to reach that node. + +Using routes the clustering servers will create a mesh network between nodes. This mesh network ensures that if a node drops out all other nodes can still communicate with each other. That being said, we recommend designing your routing with failover in mind, this means not storing all your routes on one node but dispersing them throughout the network. + +A simple route example is a two node topology, if Node1 adds a route to connect it to Node2, Node2 does not need to add a route to Node1. That one route configuration is all that’s needed to establish a bidirectional connection between the nodes. + +A route consists of a `port` and a `host`. + +`port` - the clustering port of the remote instance you are creating the connection with. This is going to be the `clustering.hubServer.cluster.network.port` in the HarperDB configuration on the node you are connecting with. + +`host` - the host of the remote instance you are creating the connection with.This can be an IP address or a URL. + +Routes are set in the `harperdb-config.yaml` file using the `clustering.hubServer.cluster.network.routes` element, which expects an object array, where each object has two properties, `port` and `host`. + +```yaml +clustering: + hubServer: + cluster: + network: + routes: + - host: 3.62.184.22 + port: 9932 + - host: 3.735.184.8 + port: 9932 +``` + +![figure 1](/img/v4.2/clustering/figure1.png) + +This diagram shows one way of using routes to connect a network of nodes. Node2 and Node3 do not reference any routes in their config. Node1 contains routes for Node2 and Node3, which is enough to establish a network between all three nodes. + +There are multiple ways to set routes, they are: + +1. Directly editing the `harperdb-config.yaml` file (refer to code snippet above). +1. Calling `cluster_set_routes` through the API. + +```json +{ + "operation": "cluster_set_routes", + "server": "hub", + "routes":[ {"host": "3.735.184.8", "port": 9932} ] +} +``` + +_Note: When making any changes to HarperDB configuration HarperDB must be restarted for the changes to take effect._ + +1. From the command line. + +```bash +--CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES "[{\"host\": \"3.735.184.8\", \"port\": 9932}]" +``` + +1. Using environment variables. + +```bash +CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES=[{"host": "3.735.184.8", "port": 9932}] +``` + +The API also has `cluster_get_routes` for getting all routes in the config and `cluster_delete_routes` for deleting routes. + +```json +{ + "operation": "cluster_delete_routes", + "routes":[ {"host": "3.735.184.8", "port": 9932} ] +} +``` diff --git a/site/versioned_docs/version-4.2/developers/clustering/index.md b/site/versioned_docs/version-4.2/developers/clustering/index.md new file mode 100644 index 00000000..f5949afd --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/clustering/index.md @@ -0,0 +1,31 @@ +--- +title: Clustering +--- + +# Clustering + +HarperDB clustering is the process of connecting multiple HarperDB databases together to create a database mesh network that enables users to define data replication patterns. + +HarperDB’s clustering engine replicates data between instances of HarperDB using a highly performant, bi-directional pub/sub model on a per-table basis. Data replicates asynchronously with eventual consistency across the cluster following the defined pub/sub configuration. Individual transactions are sent in the order in which they were transacted, once received by the destination instance, they are processed in an ACID-compliant manner. Conflict resolution follows a last writer wins model based on recorded transaction time on the transaction and the timestamp on the record on the node. + +*** + +### Common Use Case + +A common use case is an edge application collecting and analyzing sensor data that creates an alert if a sensor value exceeds a given threshold: + +* The edge application should not be making outbound http requests for security purposes. +* There may not be a reliable network connection. +* Not all sensor data will be sent to the cloud--either because of the unreliable network connection, or maybe it’s just a pain to store it. +* The edge node should be inaccessible from outside the firewall. +* The edge node will send alerts to the cloud with a snippet of sensor data containing the offending sensor readings. + +HarperDB simplifies the architecture of such an application with its bi-directional, table-level replication: + +* The edge instance subscribes to a “thresholds” table on the cloud instance, so the application only makes localhost calls to get the thresholds. +* The application continually pushes sensor data into a “sensor\_data” table via the localhost API, comparing it to the threshold values as it does so. +* When a threshold violation occurs, the application adds a record to the “alerts” table. +* The application appends to that record array “sensor\_data” entries for the 60 seconds (or minutes, or days) leading up to the threshold violation. +* The edge instance publishes the “alerts” table up to the cloud instance. + +By letting HarperDB focus on the fault-tolerant logistics of transporting your data, you get to write less code. By moving data only when and where it’s needed, you lower storage and bandwidth costs. And by restricting your app to only making local calls to HarperDB, you reduce the overall exposure of your application to outside forces. diff --git a/site/versioned_docs/version-4.2/developers/clustering/managing-subscriptions.md b/site/versioned_docs/version-4.2/developers/clustering/managing-subscriptions.md new file mode 100644 index 00000000..a1f8c56e --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/clustering/managing-subscriptions.md @@ -0,0 +1,168 @@ +--- +title: Managing subscriptions +--- + +# Managing subscriptions + +Subscriptions can be added, updated, or removed through the API. + +_Note: The schema and tables in the subscription must exist on either the local or the remote node. Any schema and tables that do not exist on one particular node, for example, the local node, will be automatically created on the local node._ + +To add a single node and create one or more subscriptions use `add_node`. + +```json +{ + "operation": "add_node", + "node_name": "Node2", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": false, + "subscribe": true + }, + { + "schema": "dev", + "table": "chicken", + "publish": true, + "subscribe": true + } + ] +} +``` + +This is an example of adding Node2 to your local node. Subscriptions are created for two tables, dog and chicken. + +To update one or more subscriptions with a single node use `update_node`. + +```json +{ + "operation": "update_node", + "node_name": "Node2", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ] +} +``` + +This call will update the subscription with the dog table. Any other subscriptions with Node2 will not change. + +To add or update subscriptions with one or more nodes in one API call use `configure_cluster`. + +```json +{ + "operation": "configure_cluster", + "connections": [ + { + "node_name": "Node2", + "subscriptions": [ + { + "schema": "dev", + "table": "chicken", + "publish": false, + "subscribe": true + }, + { + "schema": "prod", + "table": "dog", + "publish": true, + "subscribe": true + } + ] + }, + { + "node_name": "Node3", + "subscriptions": [ + { + "schema": "dev", + "table": "chicken", + "publish": true, + "subscribe": false + } + ] + } + ] +} +``` + +_Note: `configure_cluster` will override **any and all** existing subscriptions defined on the local node. This means that before going through the connections in the request and adding the subscriptions, it will first go through **all existing subscriptions the local node has** and remove them. To get all existing subscriptions use `cluster_status`._ + +#### Start time + +There is an optional property called `start_time` that can be passed in the subscription. This property accepts an ISO formatted UTC date. + +`start_time` can be used to set from what time you would like to source transactions from a table when creating or updating a subscription. + +```json +{ + "operation": "add_node", + "node_name": "Node2", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": false, + "subscribe": true, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] +} +``` + +This example will get all transactions on Node2’s dog table starting from `2022-09-02T20:06:35.993Z` and replicate them locally on the dog table. + +If no start time is passed it defaults to the current time. + +_Note: start time utilizes clustering to back source transactions. For this reason it can only source transactions that occurred when clustering was enabled._ + +#### Remove node + +To remove a node and all its subscriptions use `remove_node`. + +```json +{ + "operation":"remove_node", + "node_name":"Node2" +} +``` + +#### Cluster status + +To get the status of all connected nodes and see their subscriptions use `cluster_status`. + +```json +{ + "node_name": "Node1", + "is_enabled": true, + "connections": [ + { + "node_name": "Node2", + "status": "open", + "ports": { + "clustering": 9932, + "operations_api": 9925 + }, + "latency_ms": 65, + "uptime": "11m 19s", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ], + "system_info": { + "hdb_version": "4.0.0", + "node_version": "16.17.1", + "platform": "linux" + } + } + ] +} +``` diff --git a/site/versioned_docs/version-4.2/developers/clustering/naming-a-node.md b/site/versioned_docs/version-4.2/developers/clustering/naming-a-node.md new file mode 100644 index 00000000..d1ebdfb1 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/clustering/naming-a-node.md @@ -0,0 +1,45 @@ +--- +title: Naming a Node +--- + +# Naming a Node + +Node name is the name given to a node. It is how nodes are identified within the cluster and must be unique to the cluster. + +The name cannot contain any of the following characters: `.,*>` . Dot, comma, asterisk, greater than, or whitespace. + +The name is set in the `harperdb-config.yaml` file using the `clustering.nodeName` configuration element. + +_Note: If you want to change the node name make sure there are no subscriptions in place before doing so. After the name has been changed a full restart is required._ + +There are multiple ways to update this element, they are: + +1. Directly editing the `harperdb-config.yaml` file. + +```yaml +clustering: + nodeName: Node1 +``` + +_Note: When making any changes to the `harperdb-config.yaml` file HarperDB must be restarted for the changes to take effect._ + +1. Calling `set_configuration` through the operations API + +```json +{ + "operation": "set_configuration", + "clustering_nodeName":"Node1" +} +``` + +1. Using command line variables. + +``` +harperdb --CLUSTERING_NODENAME Node1 +``` + +1. Using environment variables. + +``` +CLUSTERING_NODENAME=Node1 +``` diff --git a/site/versioned_docs/version-4.2/developers/clustering/requirements-and-definitions.md b/site/versioned_docs/version-4.2/developers/clustering/requirements-and-definitions.md new file mode 100644 index 00000000..1e2dd6af --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/clustering/requirements-and-definitions.md @@ -0,0 +1,11 @@ +--- +title: Requirements and Definitions +--- + +# Requirements and Definitions + +To create a cluster you must have two or more nodes\* (aka instances) of HarperDB running. + +\*_A node is a single instance/installation of HarperDB. A node of HarperDB can operate independently with clustering on or off._ + +On the following pages we'll walk you through the steps required, in order, to set up a HarperDB cluster. diff --git a/site/versioned_docs/version-4.2/developers/clustering/subscription-overview.md b/site/versioned_docs/version-4.2/developers/clustering/subscription-overview.md new file mode 100644 index 00000000..63246c4f --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/clustering/subscription-overview.md @@ -0,0 +1,45 @@ +--- +title: Subscription Overview +--- + +# Subscription Overview + +A subscription defines how data should move between two nodes. They are exclusively table level and operate independently. They connect a table on one node to a table on another node, the subscription will apply to a matching schema name and table name on both nodes. + +_Note: ‘local’ and ‘remote’ will often be referred to. In the context of these docs ‘local’ is the node that is receiving the API request to create/update a subscription and remote is the other node that is referred to in the request, the node on the other end of the subscription._ + +A subscription consists of: + +`schema` - the name of the schema that the table you are creating the subscription for belongs to. + +`table` - the name of the table the subscription will apply to. + +`publish` - a boolean which determines if transactions on the local table should be replicated on the remote table. + +`subscribe` - a boolean which determines if transactions on the remote table should be replicated on the local table. + +#### Publish subscription + +![figure 2](/img/v4.2/clustering/figure2.png) + +This diagram is an example of a `publish` subscription from the perspective of Node1. + +The record with id 2 has been inserted in the dog table on Node1, after it has completed that insert it is sent to Node 2 and inserted in the dog table there. + +#### Subscribe subscription + +![figure 3](/img/v4.2/clustering/figure3.png) + +This diagram is an example of a `subscribe` subscription from the perspective of Node1. + +The record with id 3 has been inserted in the dog table on Node2, after it has completed that insert it is sent to Node1 and inserted there. + +#### Subscribe and Publish + +![figure 4](/img/v4.2/clustering/figure4.png) + +This diagram shows both subscribe and publish but publish is set to false. You can see that because subscribe is true the insert on Node2 is being replicated on Node1 but because publish is set to false the insert on Node1 is _**not**_ being replicated on Node2. + +![figure 5](/img/v4.2/clustering/figure5.png) + +This shows both subscribe and publish set to true. The insert on Node1 is replicated on Node2 and the update on Node2 is replicated on Node1. diff --git a/site/versioned_docs/version-4.2/developers/clustering/things-worth-knowing.md b/site/versioned_docs/version-4.2/developers/clustering/things-worth-knowing.md new file mode 100644 index 00000000..a140e4d3 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/clustering/things-worth-knowing.md @@ -0,0 +1,43 @@ +--- +title: Things Worth Knowing +--- + +# Things Worth Knowing + +Additional information that will help you define your clustering topology. + +*** + +### Transactions + +Transactions that are replicated across the cluster are: + +* Insert +* Update +* Upsert +* Delete +* Bulk loads + * CSV data load + * CSV file load + * CSV URL load + * Import from S3 + +When adding or updating a node any schemas and tables in the subscription that don’t exist on the remote node will be automatically created. + +**Destructive schema operations do not replicate across a cluster**. Those operations include `drop_schema`, `drop_table`, and `drop_attribute`. If the desired outcome is to drop schema information from any nodes then the operation(s) will need to be run on each node independently. + +Users and roles are not replicated across the cluster. + +*** + +### Queueing + +HarperDB has built-in resiliency for when network connectivity is lost within a subscription. When connections are reestablished, a catchup routine is executed to ensure data that was missed, specific to the subscription, is sent/received as defined. + +*** + +### Topologies + +HarperDB clustering creates a mesh network between nodes giving end users the ability to create an infinite number of topologies. subscription topologies can be simple or as complex as needed. + +![](/img/v4.2/clustering/figure6.png) diff --git a/site/versioned_docs/version-4.2/developers/components/drivers.md b/site/versioned_docs/version-4.2/developers/components/drivers.md new file mode 100644 index 00000000..0f1c063e --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/components/drivers.md @@ -0,0 +1,12 @@ +--- +title: Drivers +description: >- + Industry standard tools to real-time HarperDB data with BI, analytics, + reporting and data visualization technologies. +--- + +# Drivers + + + +
DriverDocsDownload
Power BIPowerBI DocsWindows
TableauTableau DocsWindows
Mac
Driver JAR
ExcelExcel DocsWindows
JDBCJDBC DocsWindows
Mac
Driver JAR
ODBCODBC DocsWindows
Mac
Linux (RPM)
Linux (DEB)
ADOADO DocsWindows
CmdletsCmdlets DocsWindows
SSISSSIS DocsWindows
diff --git a/site/versioned_docs/version-4.2/developers/components/google-data-studio.md b/site/versioned_docs/version-4.2/developers/components/google-data-studio.md new file mode 100644 index 00000000..e33fb2bd --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/components/google-data-studio.md @@ -0,0 +1,37 @@ +--- +title: Google Data Studio +--- + +# Google Data Studio + +[Google Data Studio](https:/datastudio.google.com/) is a free collaborative visualization tool which enables users to build configurable charts and tables quickly. The HarperDB Google Data Studio connector seamlessly integrates your HarperDB data with Google Data Studio so you can build custom, real-time data visualizations. + +The HarperDB Google Data Studio Connector is subject to our [Terms of Use](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/) and [Privacy Policy](https:/harperdb.io/legal/privacy-policy/). + +## Requirements + +The HarperDB database must be accessible through the Internet in order for Google Data Studio servers to access it. The database may be hosted by you or via [HarperDB Cloud](../../deployments/harperdb-cloud/). + +## Get Started + +Get started by selecting the HarperDB connector from the [Google Data Studio Partner Connector Gallery](https:/datastudio.google.com/u/0/datasources/create). + +1. Log in to https:/datastudio.google.com/. +1. Add a new Data Source using the HarperDB connector. The current release version can be added as a data source by following this link: [HarperDB Google Data Studio Connector](https:/datastudio.google.com/datasources/create?connectorId=AKfycbxBKgF8FI5R42WVxO-QCOq7dmUys0HJrUJMkBQRoGnCasY60\_VJeO3BhHJPvdd20-S76g). +1. Authorize the connector to access other servers on your behalf (this allows the connector to contact your database). +1. Enter the Web URL to access your database (preferably with HTTPS), as well as the Basic Auth key you use to access the database. Just include the key, not the word “Basic” at the start of it. +1. Check the box for “Secure Connections Only” if you want to always use HTTPS connections for this data source; entering a Web URL that starts with https:/ will do the same thing, if you prefer. +1. Check the box for “Allow Bad Certs” if your HarperDB instance does not have a valid SSL certificate. [HarperDB Cloud](../../deployments/harperdb-cloud/) always has valid certificates, and so will never require this to be checked. Instances you set up yourself may require this, if you are using self-signed certs. If you are using [HarperDB Cloud](../../deployments/harperdb-cloud/) or another instance you know should always have valid SSL certificates, do not check this box. +1. Choose your Query Type. This determines what information the configuration will ask for after pressing the Next button. + * Table will ask you for a Schema and a Table to return all fields of using `SELECT *`. + * SQL will ask you for the SQL query you’re using to retrieve fields from the database. You may `JOIN` multiple tables together, and use HarperDB specific SQL functions, along with the usual power SQL grants. +1. When all information is entered correctly, press the Connect button in the top right of the new Data Source view to generate the Schema. You may also want to name the data source at this point. If the connector encounters any errors, a dialog box will tell you what went wrong so you can correct the issue. +1. If there are no errors, you now have a data source you can use in your reports! You may change the types of the generated fields in the Schema view if you need to (for instance, changing a Number field to a specific currency), as well as creating new fields from the report view that do calculations on other fields. + +## Considerations + +* Both Postman and the [HarperDB Studio](../../administration/harperdb-studio/) app have ways to convert a user:password pair to a Basic Auth token. Use either to create the token for the connector’s user. + * You may sign out of your current user by going to the instances tab in HarperDB Studio, then clicking on the lock icon at the top-right of a given instance’s box. Click the lock again to sign in as any user. The Basic Auth token will be visible in the Authorization header portion of any code created in the Sample Code tab. +* It’s highly recommended that you create a read-only user role in HarperDB Studio, and create a user with that role for your data sources to use. This prevents that authorization token from being used to alter your database, should someone else ever get ahold of it. +* The RecordCount field is intended for use as a metric, for counting how many instances of a given set of values appear in a report’s data set. +* _Do not attempt to create fields with spaces in their names_ for any data sources! Google Data Studio will crash when attempting to retrieve a field with such a name, producing a System Error instead of a useful chart on your reports. Using CamelCase or snake\_case gets around this. diff --git a/site/versioned_docs/version-4.2/developers/components/index.md b/site/versioned_docs/version-4.2/developers/components/index.md new file mode 100644 index 00000000..4901c49f --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/components/index.md @@ -0,0 +1,38 @@ +--- +title: Components +--- + +# Components + +HarperDB is a highly extensible database application platform with support for a rich variety of composable modular components and components that can be used and combined to build applications and add functionality to existing applications. HarperDB tools, components, and add-ons can be found in a few places: + +* [SDK libraries](./sdks) are available for connecting to HarperDB from different languages. +* [Drivers](./drivers) are available for connecting to HarperDB from different products and tools. +* [HarperDB-Add-Ons repositories](https:/github.com/orgs/HarperDB-Add-Ons/repositories) lists various templates and add-ons for HarperDB. +* [HarperDB repositories](https:/github.com/orgs/HarperDB-Add-Ons/repositories) include additional tools for HarperDB. +* You can also [search github.com for ever-growing list of projects that use, or work with, HarperDB](https:/github.com/search?q=harperdb\&type=repositories) +* [Google Data Studio](./google-data-studio) is a visualization tool for building charts and tables from HarperDB data. + +## Components + +There are four general categories of components for HarperDB. The most common is applications. Applications are simply a component that delivers complete functionality through an external interface that it defines, and is usually composed of other components. See [our guide to building applications for getting started](../applications/). + +A data source component can implement the Resource API to customize access to a table or provide access to an external data source. External data source components are used to retrieve and access data from other sources. + +The next two are considered extension components. Server protocol extension components provide and define ways for clients to access data and can be used to extend or create new protocols. + +Server resource components implement support for different types of files that can be used as resources in applications. HarperDB includes support for using JavaScript modules and GraphQL Schemas as resources, but resource components may add support for different file types like HTML templates (like JSX), CSV data, and more. + +## Server components + +Server components can be easily be added and configured by simply adding an entry to your harperdb-config.yaml: + +```yaml +my-server-component: + package: 'HarperDB-Add-Ons/package-name' # this can be any valid github or npm reference + port: 4321 +``` + +## Writing Extension Components + +You can write your own extensions to build new functionality on HarperDB. See the [writing extension components documentation](./writing-extensions) for more information. diff --git a/site/versioned_docs/version-4.2/developers/components/installing.md b/site/versioned_docs/version-4.2/developers/components/installing.md new file mode 100644 index 00000000..aac137ea --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/components/installing.md @@ -0,0 +1,79 @@ +--- +title: Installing +--- + +# Installing + +Components can be easily added by adding a new top level element to your `harperdb-config.yaml` file. + +The configuration comprises two values: + +* component name - can be anything, as long as it follows valid YAML syntax. +* package - a reference to your component. + +```yaml +myComponentName: + package: HarperDB-Add-Ons/package +``` + +Under the hood HarperDB is calling npm install on all components, this means that the package value can be any valid npm reference such as a GitHub repo, an NPM package, a tarball, a local directory or a website. + +```yaml +myGithubComponent: + package: HarperDB-Add-Ons/package#v2.2.0 # install from GitHub +myNPMComponent: + package: harperdb # install from NPM +myTarBall: + package: /Users/harper/cool-component.tar # install from tarball +myLocal: + package: /Users/harper/local # install from local path +myWebsite: + package: https:/harperdb-component # install from URL +``` + +When HarperDB is run or restarted it checks to see if there are any new or updated components. If there are, it will dynamically create a package.json file in the `rootPath` directory and call `npm install`. + +NPM will install all the components in `/node_moduels`. + +The package.json file that is created will look something like this. + +```json +{ + "dependencies": { + "myGithubComponent": "github:HarperDB-Add-Ons/package#v2.2.0", + "myNPMComponent": "npm:harperdb", + "myTarBall": "file:/Users/harper/cool-component.tar", + "myLocal": "file:/Users/harper/local", + "myWebsite": "https:/harperdb-component" + } +} +``` + +The package prefix is automatically added, however you can manually set it in your package reference. + +```yaml +myCoolComponent: + package: file:/Users/harper/cool-component.tar +``` + +## Installing components using the operations API + +To add a component using the operations API use the `deploy_component` operation. + +```json +{ + "operation": "deploy_component", + "project": "my-cool-component", + "package": "HarperDB-Add-Ons/package/mycc" +} +``` + +Another option is to pass `deploy_component` a base64-encoded string representation of your component as a `.tar` file. HarperDB can generate this via the `package_component` operation. When deploying with a payload, your component will be deployed to your `/components` directory. Any components in this directory will be automatically picked up by HarperDB. + +```json +{ + "operation": "deploy_component", + "project": "my-cool-component", + "payload": "NzY1IAAwMDAwMjQgADAwMDAwMDAwMDAwIDE0NDIwMDQ3...." +} +``` diff --git a/site/versioned_docs/version-4.2/developers/components/operations.md b/site/versioned_docs/version-4.2/developers/components/operations.md new file mode 100644 index 00000000..fc5d2bf9 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/components/operations.md @@ -0,0 +1,37 @@ +--- +title: Operations +--- + +# Operations + +One way to manage applications and components is through [HarperDB Studio](../../administration/harperdb-studio/). It performs all the necessary operations automatically. To get started, navigate to your instance in HarperDB Studio and click the subnav link for “applications”. Once configuration is complete, you can manage and deploy applications in minutes. + +HarperDB Studio manages your applications using nine HarperDB operations. You may view these operations within our [API Docs](../operations-api/). A brief overview of each of the operations is below: + +* **components\_status** + + Returns the state of the applications server. This includes whether it is enabled, upon which port it is listening, and where its root project directory is located on the host machine. +* **get\_components** + + Returns an array of projects within the applications root project directory. +* **get\_component\_file** + + Returns the content of the specified file as text. HarperDB Studio uses this call to render the file content in its built-in code editor. +* **set\_component\_file** + + Updates the content of the specified file. HarperDB Studio uses this call to save any changes made through its built-in code editor. +* **drop\_component\_file** + + Deletes the specified file. +* **add\_component\_project** + + Creates a new project folder in the applications root project directory. It also inserts into the new directory the contents of our applications Project template, which is available publicly, here: https:/github.com/HarperDB/harperdb-custom-functions-template. +* **drop\_component\_project** + + Deletes the specified project folder and all of its contents. +* **package\_component\_project** + + Creates a .tar file of the specified project folder, then reads it into a base64-encoded string and returns that string to the user. +* **deploy\_component\_project** + + Takes the output of package\_component\_project, decrypts the base64-encoded string, reconstitutes the .tar file of your project folder, and extracts it to the applications root project directory. diff --git a/site/versioned_docs/version-4.2/developers/components/sdks.md b/site/versioned_docs/version-4.2/developers/components/sdks.md new file mode 100644 index 00000000..9064851e --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/components/sdks.md @@ -0,0 +1,21 @@ +--- +title: SDKs +description: >- + Software Development Kits available for connecting to HarperDB from different + languages. +--- + +# SDKs + +| SDK/Tool | Description | Installation | +| ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------- | +| [HarperDB.NET.Client](https:/www.nuget.org/packages/HarperDB.NET.Client) | A Dot Net Core client to execute operations against HarperDB | `dotnet add package HarperDB.NET.Client --version 1.1.0` | +| [Websocket Client](https:/www.npmjs.com/package/harperdb-websocket-client) | A Javascript client for real-time access to HarperDB transactions | `npm i -s harperdb-websocket-client` | +| [Gatsby HarperDB Source](https:/www.npmjs.com/package/gatsby-source-harperdb) | Use HarperDB as the data source for a Gatsby project at the build time | `npm i -s gatsby-source-harperdb` | +| [HarperDB.EntityFrameworkCore](https:/www.nuget.org/packages/HarperDB.EntityFrameworkCore) | The HarperDB EntityFrameworkCore Provider Package for .NET 6.0 | `dotnet add package HarperDB.EntityFrameworkCore --version 1.0.0` | +| [Python SDK](https:/pypi.org/project/harperdb/) | Python3 implementations of HarperDB API functions with wrappers for an object-oriented interface | `pip3 install harperdb` | +| [HarperDB Flutter SDK](https:/github.com/HarperDB/harperdb-sdk-flutter) | A HarperDB SDK for Flutter | `flutter pub add harperdb` | +| [React Hook](https:/www.npmjs.com/package/use-harperdb) | A ReactJS Hook for HarperDB | `npm i -s use-harperdb` | +| [Node Red Node](https:/flows.nodered.org/node/node-red-contrib-harperdb) | Easy drag and drop connections to HarperDB using the Node-Red platform | `npm i -s node-red-contrib-harperdb` | +| [NodeJS SDK](https:/www.npmjs.com/package/harperive) | A HarperDB SDK for NodeJS | `npm i -s harperive` | +| [HarperDB Cargo Crate](https:/crates.io/crates/harperdb) | A HarperDB SDK for Rust | `Cargo.toml > harperdb = '1.0.0'` | diff --git a/site/versioned_docs/version-4.2/developers/components/writing-extensions.md b/site/versioned_docs/version-4.2/developers/components/writing-extensions.md new file mode 100644 index 00000000..3a0b0ea1 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/components/writing-extensions.md @@ -0,0 +1,153 @@ +--- +title: Writing Extensions +--- + +# Writing Extensions + +HarperDB is a highly extensible database application platform with support for a rich variety of composable modular components and extensions that can be used and combined to build applications and add functionality to existing applications. Here we describe the different types of components/extensions that can be developed for HarperDB and how to create them. + +There are three general categories of components for HarperDB: + +* **protocol extensions** that provide and define ways for clients to access data +* **resource extensions** that handle and interpret different types of files +* **consumer data sources** that provide a way to access and retrieve data from other sources. + +Server protocol extensions can be used to implement new protocols like MQTT, AMQP, Kafka, or maybe a retro-style Gopher interface. It can also be used to augment existing protocols like HTTP with "middleware" that can add authentication, analytics, or additional content negotiation, or add layer protocols on top of WebSockets. + +Server resource extensions implement support for different types of files that can be used as resources in applications. HarperDB includes support for using JavaScript modules and GraphQL Schemas as resources, but resource extensions could be added to support different file types like HTML templates (like JSX), CSV data, and more. + +Consumer data source components are used to retrieve and access data from other sources, and can be very useful if you want to use HarperDB to cache or use data from other databases like MySQL, Postgres, or Oracle, or subscribe to data from messaging brokers (again possibly Kafka, NATS, etc.). + +These are not mutually exclusive, you may build components that fulfill any or all of these roles. + +## Server Extensions + +Server Extensions are implemented as JavaScript packages/modules and interact with HarperDB through a number of possible hooks. A component can be defined as an extension by specifying the extensionModule in the config.yaml: + +```yaml +extensionModule: './entry-module-name.js' +``` + +### Module Initialization + +Once a user has configured an extension, HarperDB will attempt to load the extension package specified by `package` property. Once loaded, there are several functions that can be exported that will be called by HarperDB: + +`export function start(options: { port: number, server: {}})` If defined, this will be called on the initialization of the extension. The provided `server` property object includes a set of additional entry points for utilizing or layering on top of other protocols (and when implementing a new protocol, you can add your own entry points). The most common entry is to provide an HTTP middleware layer. This looks like: + +```javascript +export function start(options: { port: number, server: {}}) { + options.server.http(async (request, nextLayer) => { + / we can directly return a response here, or do some processing on the request and delegate to the next layer + let response = await nextLayer(request); + return response; + }); +} +``` + +Here, the `request` object will have the following structure (this is based on Node's request, but augmented to conform to a subset of the [WHATWG Request API](https:/developer.mozilla.org/en-US/docs/Web/API/Request)): + +```typescript +interface Request { + method: string + headers: Headers / use request.headers.get(headerName) to get header values + body: Stream + data: any / deserialized data from the request body +} +``` + +The returned `response` object should have the following structure (again, following a structural subset of the [WHATWG Response API](https:/developer.mozilla.org/en-US/docs/Web/API/Response)): + +```typescript +interface Response { + status?: number + headers?: {} / an object with header name/values + data?: any / object/value that will be serialized into the body + body?: Stream +} +``` + +If you were implementing an authentication extension, you could get authentication information from the request and use it to add the `user` property to the request: + +```javascript +export function start(options: { port: number, server: {}, resources: Map}) { + options.server.http((request, nextLayer) => { + let authorization = request.headers.authorization; + if (authorization) { + / get some token for the user and determine the user + / if we want to use harperdb's user database + let user = server.getUser(username, password); + request.user = user; / authenticate user object goes on the request + } + / continue on to the next layer + return nextLayer(request); + }); + / if you needed to add a login resource, could add it as well: + resources.set('/login', LoginResource); +} +``` + +If you were implementing a new protocol, you can directly interact with the sockets and listen for new incoming TCP connections: + +```javascript +export function start(options: { port: number, server: {}}) { + options.server.socket((socket) => { + }); +}) +``` + +### Resource Handling + +Typically, servers not only communicate with clients, but serve up meaningful data based on the resources within the server. While resource extensions typically handle defining resources, once resources are defined, they can be consumed by server extensions. The `resources` argument provides access to the set of all the resources that have been defined. A server can call `resources.getMatch(path)` to get the resource associated with the URL path. + +## Resource Extensions + +Resource extensions allow us to handle different files and make them accessible to servers as resources, following the common [Resource API](../../technical-details/reference/resource). To implement a resource extension, you export a function called `handleFile`. Users can then configure which files that should be handled by your extension. For example, if we had implemented an EJS handler, it could be configured as: + +```yaml + module: 'ejs-extension', + path: '/templates/*.ejs' +``` + +And in our extension module, we could implement `handleFile`: + +```javascript +export function handleFile?(contents, relative_path, file_path, resources) { + / will be called for each .ejs file. + / We can then add the generate resource: + resources.set(relative_path, GeneratedResource); +} +``` + +We can also implement a handler for directories. This can be useful for implementing a handler for broader frameworks that load their own files, like Next.js or Remix, or a static file handler. HarperDB includes such an extension for fastify's auto-loader that loads a directory of route definitions. This hook looks like: + +```javascript +export function handleDirectory?(relative_path, path, resources) { +} +``` + +Note that these hooks are not mutually exclusive. You can write an extension that implements any or all of these hooks, potentially implementing a custom protocol and file handling. + +## Data Source Components + +Data source components implement the Resource interface to provide access to various data sources, which may be other APIs, databases, or local storage. Components that implement this interface can then be used as a source for caching tables, can be accessed as part of endpoint implementations, or even used as endpoints themselves. See the [Resource documentation](../../technical-details/reference/resource) for more information on implementing new resources. + +## Content Type Extensions + +HarperDB uses content negotiation to determine how to deserialize content incoming data from HTTP requests (and any other protocols that support content negotiation) and to serialize data into responses. This negotiation is performed by comparing the `Content-Type` header with registered content type handler to determine how to deserialize content into structured data that is processed and stored, and comparing the `Accept` header with registered content type handlers to determine how to serialize structured data. HarperDB comes with a rich set of content type handlers including JSON, CBOR, MessagePack, CSV, Event-Stream, and more. However, you can also add your own content type handlers by adding new entries (or even replacing existing entries) to the `contentTypes` exported map from the `server` global (or `harperdb` export). This map is keyed by the MIME type, and the value is an object with properties (all optional): `serialize(data): Buffer|Uint8Array|string`: If defined, this will be called with the data structure and should return the data serialized as binary data (NodeJS Buffer or Uint8Array) or a string, for the response. `serializeStream(data): ReadableStream`: If defined, this will be called with the data structure and should return the data serialized as a ReadableStream. This is generally necessary for handling asynchronous iteratables. `deserialize(Buffer|string): any`: If defined (and deserializeStream is not defined), this will be called with the raw data received from the incoming request and should return the deserialized data structure. This will be called with a string for text MIME types ("text/..."), and a Buffer for all others. `deserializeStream(ReadableStream): any`: If defined (and deserializeStream is not defined), this will be called with the raw data stream received from the incoming request and should return the deserialized data structure (potentially as an asynchronous iterable). `q: number`: This is an indication of this serialization quality between 0 and 1, and if omitted, defaults to 1. It is called "content negotiation" instead of "content demanding" because both client and server may have multiple supported content types, and the server needs to choose the best for both. This is determined by finding the content type (of all supported) with the highest product of client q and server q (1 is a perfect representation of the data, 0 is worst, 0.5 is medium quality). + +For example, if you wanted to define an XML serializer (that can respond with XML to requests with `Accept: text/xml`) you could write: + +```javascript +contentTypes.set('text/xml', { + serialize(data) { + return '' ... some serialization ''; + }, + q: 0.8, +}); +``` + +## Trusted/Untrusted + +Extensions will also be categorized as trusted or untrusted. For some HarperDB installations, administrators may choose to constrain users to only using trusted extensions for security reasons (such multi-tenancy requirements or added defense in depth). Most installations do not impose such constraints, but this may exist in some situations. + +An extension can be automatically considered trusted if it conforms to the requirements of [Secure EcmaScript](https:/www.npmjs.com/package/ses/v/0.7.0) (basically strict mode code that doesn't modify any global objects), and either does not use any other modules, or only uses modules from other trusted extensions/components. An extension can be marked as trusted by review by the HarperDB team as well, but developers should not expect that HarperDB can review all extensions. Untrusted extensions can access any other packages/modules, and may have many additional capabilities. diff --git a/site/versioned_docs/version-4.2/developers/operations-api/advanced-json-sql-examples.md b/site/versioned_docs/version-4.2/developers/operations-api/advanced-json-sql-examples.md new file mode 100644 index 00000000..1584a0c4 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/advanced-json-sql-examples.md @@ -0,0 +1,1780 @@ +--- +title: Advanced JSON SQL Examples +--- + +# Advanced JSON SQL Examples + +## Create movies database +Create a new database called "movies" using the 'create_database' operation. + +_Note: Creating a database is optional, if one is not created HarperDB will default to using a database named `data`_ + +### Body +```json +{ + "operation": "create_database", + "database": "movies" +} +``` + +### Response: 200 +```json +{ + "message": "database 'movies' successfully created" +} +``` + +--- + +## Create movie Table +Creates a new table called "movie" inside the database "movies" using the ‘create_table’ operation. + +### Body + +```json +{ + "operation": "create_table", + "database": "movies", + "table": "movie", + "primary_key": "id" +} +``` + +### Response: 200 +```json +{ + "message": "table 'movies.movie' successfully created." +} +``` + + +--- + +## Create credits Table +Creates a new table called "credits" inside the database "movies" using the ‘create_table’ operation. + +### Body + +```json +{ + "operation": "create_table", + "database": "movies", + "table": "credits", + "primary_key": "movie_id" +} +``` + +### Response: 200 +```json +{ + "message": "table 'movies.credits' successfully created." +} +``` + + +--- + +## Bulk Insert movie Via CSV +Inserts data from a hosted CSV file into the "movie" table using the 'csv_url_load' operation. + +### Body + +```json +{ + "operation": "csv_url_load", + "database": "movies", + "table": "movie", + "csv_url": "https:/search-json-sample-data.s3.us-east-2.amazonaws.com/movie.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 1889eee4-23c1-4945-9bb7-c805fc20726c" +} +``` + + +--- + +## Bulk Insert credits Via CSV +Inserts data from a hosted CSV file into the "credits" table using the 'csv_url_load' operation. + +### Body + +```json +{ + "operation": "csv_url_load", + "database": "movies", + "table": "credits", + "csv_url": "https:/search-json-sample-data.s3.us-east-2.amazonaws.com/credits.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 3a14cd74-67f3-41e9-8ccd-45ffd0addc2c", + "job_id": "3a14cd74-67f3-41e9-8ccd-45ffd0addc2c" +} +``` + + +--- + +## View raw data +In the following example we will be running expressions on the keywords & production_companies attributes, so for context we are displaying what the raw data looks like. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT title, rank, keywords, production_companies FROM movies.movie ORDER BY rank LIMIT 10" +} +``` + +### Response: 200 +```json +[ + { + "title": "Ad Astra", + "rank": 1, + "keywords": [ + { + "id": 305, + "name": "moon" + }, + { + "id": 697, + "name": "loss of loved one" + }, + { + "id": 839, + "name": "planet mars" + }, + { + "id": 14626, + "name": "astronaut" + }, + { + "id": 157265, + "name": "moon colony" + }, + { + "id": 162429, + "name": "solar system" + }, + { + "id": 240119, + "name": "father son relationship" + }, + { + "id": 244256, + "name": "near future" + }, + { + "id": 257878, + "name": "planet neptune" + }, + { + "id": 260089, + "name": "space walk" + } + ], + "production_companies": [ + { + "id": 490, + "name": "New Regency Productions", + "origin_country": "" + }, + { + "id": 79963, + "name": "Keep Your Head", + "origin_country": "" + }, + { + "id": 73492, + "name": "MadRiver Pictures", + "origin_country": "" + }, + { + "id": 81, + "name": "Plan B Entertainment", + "origin_country": "US" + }, + { + "id": 30666, + "name": "RT Features", + "origin_country": "BR" + }, + { + "id": 30148, + "name": "Bona Film Group", + "origin_country": "CN" + }, + { + "id": 22213, + "name": "TSG Entertainment", + "origin_country": "US" + } + ] + }, + { + "title": "Extraction", + "rank": 2, + "keywords": [ + { + "id": 3070, + "name": "mercenary" + }, + { + "id": 4110, + "name": "mumbai (bombay), india" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 9730, + "name": "crime boss" + }, + { + "id": 11107, + "name": "rescue mission" + }, + { + "id": 18712, + "name": "based on graphic novel" + }, + { + "id": 265216, + "name": "dhaka (dacca), bangladesh" + } + ], + "production_companies": [ + { + "id": 106544, + "name": "AGBO", + "origin_country": "US" + }, + { + "id": 109172, + "name": "Thematic Entertainment", + "origin_country": "US" + }, + { + "id": 92029, + "name": "TGIM Films", + "origin_country": "US" + } + ] + }, + { + "title": "To the Beat! Back 2 School", + "rank": 3, + "keywords": [ + { + "id": 10873, + "name": "school" + } + ], + "production_companies": [] + }, + { + "title": "Bloodshot", + "rank": 4, + "keywords": [ + { + "id": 2651, + "name": "nanotechnology" + }, + { + "id": 9715, + "name": "superhero" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 164218, + "name": "psychotronic" + }, + { + "id": 255024, + "name": "shared universe" + }, + { + "id": 258575, + "name": "valiant comics" + } + ], + "production_companies": [ + { + "id": 34, + "name": "Sony Pictures", + "origin_country": "US" + }, + { + "id": 10246, + "name": "Cross Creek Pictures", + "origin_country": "US" + }, + { + "id": 6573, + "name": "Mimran Schur Pictures", + "origin_country": "US" + }, + { + "id": 333, + "name": "Original Film", + "origin_country": "US" + }, + { + "id": 103673, + "name": "The Hideaway Entertainment", + "origin_country": "US" + }, + { + "id": 124335, + "name": "Valiant Entertainment", + "origin_country": "US" + }, + { + "id": 5, + "name": "Columbia Pictures", + "origin_country": "US" + }, + { + "id": 1225, + "name": "One Race", + "origin_country": "US" + }, + { + "id": 30148, + "name": "Bona Film Group", + "origin_country": "CN" + } + ] + }, + { + "title": "The Call of the Wild", + "rank": 5, + "keywords": [ + { + "id": 818, + "name": "based on novel or book" + }, + { + "id": 4542, + "name": "gold rush" + }, + { + "id": 15162, + "name": "dog" + }, + { + "id": 155821, + "name": "sled dogs" + }, + { + "id": 189390, + "name": "yukon" + }, + { + "id": 207928, + "name": "19th century" + }, + { + "id": 259987, + "name": "cgi animation" + }, + { + "id": 263806, + "name": "1890s" + } + ], + "production_companies": [ + { + "id": 787, + "name": "3 Arts Entertainment", + "origin_country": "US" + }, + { + "id": 127928, + "name": "20th Century Studios", + "origin_country": "US" + }, + { + "id": 22213, + "name": "TSG Entertainment", + "origin_country": "US" + } + ] + }, + { + "title": "Sonic the Hedgehog", + "rank": 6, + "keywords": [ + { + "id": 282, + "name": "video game" + }, + { + "id": 6054, + "name": "friendship" + }, + { + "id": 10842, + "name": "good vs evil" + }, + { + "id": 41645, + "name": "based on video game" + }, + { + "id": 167043, + "name": "road movie" + }, + { + "id": 172142, + "name": "farting" + }, + { + "id": 188933, + "name": "bar fight" + }, + { + "id": 226967, + "name": "amistad" + }, + { + "id": 245230, + "name": "live action remake" + }, + { + "id": 258111, + "name": "fantasy" + }, + { + "id": 260223, + "name": "videojuego" + } + ], + "production_companies": [ + { + "id": 333, + "name": "Original Film", + "origin_country": "US" + }, + { + "id": 10644, + "name": "Blur Studios", + "origin_country": "US" + }, + { + "id": 77884, + "name": "Marza Animation Planet", + "origin_country": "JP" + }, + { + "id": 4, + "name": "Paramount", + "origin_country": "US" + }, + { + "id": 113750, + "name": "SEGA", + "origin_country": "JP" + }, + { + "id": 100711, + "name": "DJ2 Entertainment", + "origin_country": "" + }, + { + "id": 24955, + "name": "Paramount Animation", + "origin_country": "US" + } + ] + }, + { + "title": "Birds of Prey (and the Fantabulous Emancipation of One Harley Quinn)", + "rank": 7, + "keywords": [ + { + "id": 849, + "name": "dc comics" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 187056, + "name": "woman director" + }, + { + "id": 229266, + "name": "dc extended universe" + } + ], + "production_companies": [ + { + "id": 9993, + "name": "DC Entertainment", + "origin_country": "US" + }, + { + "id": 82968, + "name": "LuckyChap Entertainment", + "origin_country": "GB" + }, + { + "id": 103462, + "name": "Kroll & Co Entertainment", + "origin_country": "US" + }, + { + "id": 174, + "name": "Warner Bros. Pictures", + "origin_country": "US" + }, + { + "id": 429, + "name": "DC Comics", + "origin_country": "US" + }, + { + "id": 128064, + "name": "DC Films", + "origin_country": "US" + }, + { + "id": 101831, + "name": "Clubhouse Pictures", + "origin_country": "US" + } + ] + }, + { + "title": "Justice League Dark: Apokolips War", + "rank": 8, + "keywords": [ + { + "id": 849, + "name": "dc comics" + } + ], + "production_companies": [ + { + "id": 2785, + "name": "Warner Bros. Animation", + "origin_country": "US" + }, + { + "id": 9993, + "name": "DC Entertainment", + "origin_country": "US" + }, + { + "id": 429, + "name": "DC Comics", + "origin_country": "US" + } + ] + }, + { + "title": "Parasite", + "rank": 9, + "keywords": [ + { + "id": 1353, + "name": "underground" + }, + { + "id": 5318, + "name": "seoul" + }, + { + "id": 5732, + "name": "birthday party" + }, + { + "id": 5752, + "name": "private lessons" + }, + { + "id": 9866, + "name": "basement" + }, + { + "id": 10453, + "name": "con artist" + }, + { + "id": 11935, + "name": "working class" + }, + { + "id": 12565, + "name": "psychological thriller" + }, + { + "id": 13126, + "name": "limousine driver" + }, + { + "id": 14514, + "name": "class differences" + }, + { + "id": 14864, + "name": "rich poor" + }, + { + "id": 17997, + "name": "housekeeper" + }, + { + "id": 18015, + "name": "tutor" + }, + { + "id": 18035, + "name": "family" + }, + { + "id": 33421, + "name": "crime family" + }, + { + "id": 173272, + "name": "flood" + }, + { + "id": 188861, + "name": "smell" + }, + { + "id": 198673, + "name": "unemployed" + }, + { + "id": 237462, + "name": "wealthy family" + } + ], + "production_companies": [ + { + "id": 7036, + "name": "CJ Entertainment", + "origin_country": "KR" + }, + { + "id": 4399, + "name": "Barunson E&A", + "origin_country": "KR" + } + ] + }, + { + "title": "Star Wars: The Rise of Skywalker", + "rank": 10, + "keywords": [ + { + "id": 161176, + "name": "space opera" + } + ], + "production_companies": [ + { + "id": 1, + "name": "Lucasfilm", + "origin_country": "US" + }, + { + "id": 11461, + "name": "Bad Robot", + "origin_country": "US" + }, + { + "id": 2, + "name": "Walt Disney Pictures", + "origin_country": "US" + }, + { + "id": 120404, + "name": "British Film Commission", + "origin_country": "" + } + ] + } +] +``` + + +--- + +## Simple search_json call +This query uses search_json to convert the keywords object array to a simple string array. The expression '[name]' tells the function to extract all values for the name attribute and wrap them in an array. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT title, rank, search_json('[name]', keywords) as keywords FROM movies.movie ORDER BY rank LIMIT 10" +} +``` + +### Response: 200 +```json +[ + { + "title": "Ad Astra", + "rank": 1, + "keywords": [ + "moon", + "loss of loved one", + "planet mars", + "astronaut", + "moon colony", + "solar system", + "father son relationship", + "near future", + "planet neptune", + "space walk" + ] + }, + { + "title": "Extraction", + "rank": 2, + "keywords": [ + "mercenary", + "mumbai (bombay), india", + "based on comic", + "crime boss", + "rescue mission", + "based on graphic novel", + "dhaka (dacca), bangladesh" + ] + }, + { + "title": "To the Beat! Back 2 School", + "rank": 3, + "keywords": [ + "school" + ] + }, + { + "title": "Bloodshot", + "rank": 4, + "keywords": [ + "nanotechnology", + "superhero", + "based on comic", + "psychotronic", + "shared universe", + "valiant comics" + ] + }, + { + "title": "The Call of the Wild", + "rank": 5, + "keywords": [ + "based on novel or book", + "gold rush", + "dog", + "sled dogs", + "yukon", + "19th century", + "cgi animation", + "1890s" + ] + }, + { + "title": "Sonic the Hedgehog", + "rank": 6, + "keywords": [ + "video game", + "friendship", + "good vs evil", + "based on video game", + "road movie", + "farting", + "bar fight", + "amistad", + "live action remake", + "fantasy", + "videojuego" + ] + }, + { + "title": "Birds of Prey (and the Fantabulous Emancipation of One Harley Quinn)", + "rank": 7, + "keywords": [ + "dc comics", + "based on comic", + "woman director", + "dc extended universe" + ] + }, + { + "title": "Justice League Dark: Apokolips War", + "rank": 8, + "keywords": [ + "dc comics" + ] + }, + { + "title": "Parasite", + "rank": 9, + "keywords": [ + "underground", + "seoul", + "birthday party", + "private lessons", + "basement", + "con artist", + "working class", + "psychological thriller", + "limousine driver", + "class differences", + "rich poor", + "housekeeper", + "tutor", + "family", + "crime family", + "flood", + "smell", + "unemployed", + "wealthy family" + ] + }, + { + "title": "Star Wars: The Rise of Skywalker", + "rank": 10, + "keywords": [ + "space opera" + ] + } +] +``` + + +--- + +## Use search_json in a where clause +This example shows how we can use SEARCH_JSON to filter out records in a WHERE clause. The production_companies attribute holds an object array of companies that produced each movie, we want to only see movies which were produced by Marvel Studios. Our expression is a filter '$[name="Marvel Studios"]' this tells the function to iterate the production_companies array and only return entries where the name is "Marvel Studios". + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT title, release_date FROM movies.movie where search_json('$[name=\"Marvel Studios\"]', production_companies) IS NOT NULL ORDER BY release_date" +} +``` + +### Response: 200 +```json +[ + { + "title": "Iron Man", + "release_date": "2008-04-30" + }, + { + "title": "The Incredible Hulk", + "release_date": "2008-06-12" + }, + { + "title": "Iron Man 2", + "release_date": "2010-04-28" + }, + { + "title": "Thor", + "release_date": "2011-04-21" + }, + { + "title": "Captain America: The First Avenger", + "release_date": "2011-07-22" + }, + { + "title": "Marvel One-Shot: The Consultant", + "release_date": "2011-09-12" + }, + { + "title": "Marvel One-Shot: A Funny Thing Happened on the Way to Thor's Hammer", + "release_date": "2011-10-25" + }, + { + "title": "The Avengers", + "release_date": "2012-04-25" + }, + { + "title": "Marvel One-Shot: Item 47", + "release_date": "2012-09-13" + }, + { + "title": "Iron Man 3", + "release_date": "2013-04-18" + }, + { + "title": "Marvel One-Shot: Agent Carter", + "release_date": "2013-09-08" + }, + { + "title": "Thor: The Dark World", + "release_date": "2013-10-29" + }, + { + "title": "Marvel One-Shot: All Hail the King", + "release_date": "2014-02-04" + }, + { + "title": "Marvel Studios: Assembling a Universe", + "release_date": "2014-03-18" + }, + { + "title": "Captain America: The Winter Soldier", + "release_date": "2014-03-20" + }, + { + "title": "Guardians of the Galaxy", + "release_date": "2014-07-30" + }, + { + "title": "Avengers: Age of Ultron", + "release_date": "2015-04-22" + }, + { + "title": "Ant-Man", + "release_date": "2015-07-14" + }, + { + "title": "Captain America: Civil War", + "release_date": "2016-04-27" + }, + { + "title": "Team Thor", + "release_date": "2016-08-28" + }, + { + "title": "Doctor Strange", + "release_date": "2016-10-25" + }, + { + "title": "Guardians of the Galaxy Vol. 2", + "release_date": "2017-04-19" + }, + { + "title": "Spider-Man: Homecoming", + "release_date": "2017-07-05" + }, + { + "title": "Thor: Ragnarok", + "release_date": "2017-10-25" + }, + { + "title": "Black Panther", + "release_date": "2018-02-13" + }, + { + "title": "Avengers: Infinity War", + "release_date": "2018-04-25" + }, + { + "title": "Ant-Man and the Wasp", + "release_date": "2018-07-04" + }, + { + "title": "Captain Marvel", + "release_date": "2019-03-06" + }, + { + "title": "Avengers: Endgame", + "release_date": "2019-04-24" + }, + { + "title": "Spider-Man: Far from Home", + "release_date": "2019-06-28" + }, + { + "title": "Black Widow", + "release_date": "2020-10-28" + }, + { + "title": "Untitled Spider-Man 3", + "release_date": "2021-11-04" + }, + { + "title": "Thor: Love and Thunder", + "release_date": "2022-02-10" + }, + { + "title": "Doctor Strange in the Multiverse of Madness", + "release_date": "2022-03-23" + }, + { + "title": "Untitled Marvel Project (3)", + "release_date": "2022-07-29" + }, + { + "title": "Guardians of the Galaxy Vol. 3", + "release_date": "2023-02-16" + } +] +``` + + +--- + +## Use search_json to show the movies with the largest casts +This example shows how we can use SEARCH_JSON to perform a simple calculation on JSON and order by the results. The cast attribute holds an object array of details around the cast of a movie. We use the expression '$count(id)' that counts each id and returns the value back which we alias in SQL as cast_size which in turn gets used to sort the rows. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT movie_title, search_json('$count(id)', `cast`) as cast_size FROM movies.credits ORDER BY cast_size DESC LIMIT 10" +} +``` + +### Response: 200 +```json +[ + { + "movie_title": "Around the World in Eighty Days", + "cast_size": 312 + }, + { + "movie_title": "And the Oscar Goes To...", + "cast_size": 259 + }, + { + "movie_title": "Rock of Ages", + "cast_size": 223 + }, + { + "movie_title": "Mr. Smith Goes to Washington", + "cast_size": 213 + }, + { + "movie_title": "Les Misérables", + "cast_size": 208 + }, + { + "movie_title": "Jason Bourne", + "cast_size": 201 + }, + { + "movie_title": "The Muppets", + "cast_size": 191 + }, + { + "movie_title": "You Don't Mess with the Zohan", + "cast_size": 183 + }, + { + "movie_title": "The Irishman", + "cast_size": 173 + }, + { + "movie_title": "Spider-Man: Far from Home", + "cast_size": 173 + } +] +``` + + +--- + +## search_json as a condition, in a select with a table join +This example shows how we can use SEARCH_JSON to find movies where at least of 2 our favorite actors from Marvel films have acted together then list the movie, its overview, release date, and the actors names and their characters. The WHERE clause performs a count on credits.cast attribute that have the matching actors. The SELECT performs the same filter on the cast attribute and performs a transform on each object to just return the actor's name and their character. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT m.title, m.overview, m.release_date, search_json('$[name in [\"Robert Downey Jr.\", \"Chris Evans\", \"Scarlett Johansson\", \"Mark Ruffalo\", \"Chris Hemsworth\", \"Jeremy Renner\", \"Clark Gregg\", \"Samuel L. Jackson\", \"Gwyneth Paltrow\", \"Don Cheadle\"]].{\"actor\": name, \"character\": character}', c.`cast`) as characters FROM movies.credits c INNER JOIN movies.movie m ON c.movie_id = m.id WHERE search_json('$count($[name in [\"Robert Downey Jr.\", \"Chris Evans\", \"Scarlett Johansson\", \"Mark Ruffalo\", \"Chris Hemsworth\", \"Jeremy Renner\", \"Clark Gregg\", \"Samuel L. Jackson\", \"Gwyneth Paltrow\", \"Don Cheadle\"]])', c.`cast`) >= 2" +} +``` + +### Response: 200 +```json +[ + { + "title": "Out of Sight", + "overview": "Meet Jack Foley, a smooth criminal who bends the law and is determined to make one last heist. Karen Sisco is a federal marshal who chooses all the right moves … and all the wrong guys. Now they're willing to risk it all to find out if there's more between them than just the law.", + "release_date": "1998-06-26", + "characters": [ + { + "actor": "Don Cheadle", + "character": "Maurice Miller" + }, + { + "actor": "Samuel L. Jackson", + "character": "Hejira Henry (uncredited)" + } + ] + }, + { + "title": "Iron Man", + "overview": "After being held captive in an Afghan cave, billionaire engineer Tony Stark creates a unique weaponized suit of armor to fight evil.", + "release_date": "2008-04-30", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "Captain America: The First Avenger", + "overview": "During World War II, Steve Rogers is a sickly man from Brooklyn who's transformed into super-soldier Captain America to aid in the war effort. Rogers must stop the Red Skull – Adolf Hitler's ruthless head of weaponry, and the leader of an organization that intends to use a mysterious device of untold powers for world domination.", + "release_date": "2011-07-22", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "In Good Company", + "overview": "Dan Foreman is a seasoned advertisement sales executive at a high-ranking publication when a corporate takeover results in him being placed under naive supervisor Carter Duryea, who is half his age. Matters are made worse when Dan's new supervisor becomes romantically involved with his daughter an 18 year-old college student Alex.", + "release_date": "2004-12-29", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Alex Foreman" + }, + { + "actor": "Clark Gregg", + "character": "Mark Steckle" + } + ] + }, + { + "title": "Zodiac", + "overview": "The true story of the investigation of the \"Zodiac Killer\", a serial killer who terrified the San Francisco Bay Area, taunting police with his ciphers and letters. The case becomes an obsession for three men as their lives and careers are built and destroyed by the endless trail of clues.", + "release_date": "2007-03-02", + "characters": [ + { + "actor": "Mark Ruffalo", + "character": "Dave Toschi" + }, + { + "actor": "Robert Downey Jr.", + "character": "Paul Avery" + } + ] + }, + { + "title": "Hard Eight", + "overview": "A stranger mentors a young Reno gambler who weds a hooker and befriends a vulgar casino regular.", + "release_date": "1996-02-28", + "characters": [ + { + "actor": "Gwyneth Paltrow", + "character": "Clementine" + }, + { + "actor": "Samuel L. Jackson", + "character": "Jimmy" + } + ] + }, + { + "title": "The Spirit", + "overview": "Down these mean streets a man must come. A hero born, murdered, and born again. A Rookie cop named Denny Colt returns from the beyond as The Spirit, a hero whose mission is to fight against the bad forces from the shadows of Central City. The Octopus, who kills anyone unfortunate enough to see his face, has other plans; he is going to wipe out the entire city.", + "release_date": "2008-12-25", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Silken Floss" + }, + { + "actor": "Samuel L. Jackson", + "character": "Octopuss" + } + ] + }, + { + "title": "S.W.A.T.", + "overview": "Hondo Harrelson recruits Jim Street to join an elite unit of the Los Angeles Police Department. Together they seek out more members, including tough Deke Kay and single mom Chris Sanchez. The team's first big assignment is to escort crime boss Alex Montel to prison. It seems routine, but when Montel offers a huge reward to anyone who can break him free, criminals of various stripes step up for the prize.", + "release_date": "2003-08-08", + "characters": [ + { + "actor": "Samuel L. Jackson", + "character": "Sgt. Dan 'Hondo' Harrelson" + }, + { + "actor": "Jeremy Renner", + "character": "Brian Gamble" + } + ] + }, + { + "title": "Iron Man 2", + "overview": "With the world now aware of his dual life as the armored superhero Iron Man, billionaire inventor Tony Stark faces pressure from the government, the press and the public to share his technology with the military. Unwilling to let go of his invention, Stark, with Pepper Potts and James 'Rhodey' Rhodes at his side, must forge new alliances – and confront powerful enemies.", + "release_date": "2010-04-28", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Scarlett Johansson", + "character": "Natalie Rushman / Natasha Romanoff / Black Widow" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + } + ] + }, + { + "title": "Thor", + "overview": "Against his father Odin's will, The Mighty Thor - a powerful but arrogant warrior god - recklessly reignites an ancient war. Thor is cast down to Earth and forced to live among humans as punishment. Once here, Thor learns what it takes to be a true hero when the most dangerous villain of his world sends the darkest forces of Asgard to invade Earth.", + "release_date": "2011-04-21", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye (uncredited)" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + } + ] + }, + { + "title": "View from the Top", + "overview": "A small-town woman tries to achieve her goal of becoming a flight attendant.", + "release_date": "2003-03-21", + "characters": [ + { + "actor": "Gwyneth Paltrow", + "character": "Donna" + }, + { + "actor": "Mark Ruffalo", + "character": "Ted Stewart" + } + ] + }, + { + "title": "The Nanny Diaries", + "overview": "A college graduate goes to work as a nanny for a rich New York family. Ensconced in their home, she has to juggle their dysfunction, a new romance, and the spoiled brat in her charge.", + "release_date": "2007-08-24", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Annie Braddock" + }, + { + "actor": "Chris Evans", + "character": "Hayden \"Harvard Hottie\"" + } + ] + }, + { + "title": "The Perfect Score", + "overview": "Six high school seniors decide to break into the Princeton Testing Center so they can steal the answers to their upcoming SAT tests and all get perfect scores.", + "release_date": "2004-01-30", + "characters": [ + { + "actor": "Chris Evans", + "character": "Kyle" + }, + { + "actor": "Scarlett Johansson", + "character": "Francesca Curtis" + } + ] + }, + { + "title": "The Avengers", + "overview": "When an unexpected enemy emerges and threatens global safety and security, Nick Fury, director of the international peacekeeping agency known as S.H.I.E.L.D., finds himself in need of a team to pull the world back from the brink of disaster. Spanning the globe, a daring recruitment effort begins!", + "release_date": "2012-04-25", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + } + ] + }, + { + "title": "Iron Man 3", + "overview": "When Tony Stark's world is torn apart by a formidable terrorist called the Mandarin, he starts an odyssey of rebuilding and retribution.", + "release_date": "2013-04-18", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / Iron Patriot" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner (uncredited)" + } + ] + }, + { + "title": "Marvel One-Shot: The Consultant", + "overview": "Agent Coulson informs Agent Sitwell that the World Security Council wishes Emil Blonsky to be released from prison to join the Avengers Initiative. As Nick Fury doesn't want to release Blonsky, the two agents decide to send a patsy to sabotage the meeting...", + "release_date": "2011-09-12", + "characters": [ + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark (archive footage)" + } + ] + }, + { + "title": "Thor: The Dark World", + "overview": "Thor fights to restore order across the cosmos… but an ancient race led by the vengeful Malekith returns to plunge the universe back into darkness. Faced with an enemy that even Odin and Asgard cannot withstand, Thor must embark on his most perilous and personal journey yet, one that will reunite him with Jane Foster and force him to sacrifice everything to save us all.", + "release_date": "2013-10-29", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Chris Evans", + "character": "Loki as Captain America (uncredited)" + } + ] + }, + { + "title": "Avengers: Age of Ultron", + "overview": "When Tony Stark tries to jumpstart a dormant peacekeeping program, things go awry and Earth’s Mightiest Heroes are put to the ultimate test as the fate of the planet hangs in the balance. As the villainous Ultron emerges, it is up to The Avengers to stop him from enacting his terrible plans, and soon uneasy alliances and unexpected action pave the way for an epic and unique global adventure.", + "release_date": "2015-04-22", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + } + ] + }, + { + "title": "Captain America: The Winter Soldier", + "overview": "After the cataclysmic events in New York with The Avengers, Steve Rogers, aka Captain America is living quietly in Washington, D.C. and trying to adjust to the modern world. But when a S.H.I.E.L.D. colleague comes under attack, Steve becomes embroiled in a web of intrigue that threatens to put the world at risk. Joining forces with the Black Widow, Captain America struggles to expose the ever-widening conspiracy while fighting off professional assassins sent to silence him at every turn. When the full scope of the villainous plot is revealed, Captain America and the Black Widow enlist the help of a new ally, the Falcon. However, they soon find themselves up against an unexpected and formidable enemy—the Winter Soldier.", + "release_date": "2014-03-20", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + } + ] + }, + { + "title": "Thanks for Sharing", + "overview": "A romantic comedy that brings together three disparate characters who are learning to face a challenging and often confusing world as they struggle together against a common demon—sex addiction.", + "release_date": "2013-09-19", + "characters": [ + { + "actor": "Mark Ruffalo", + "character": "Adam" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Phoebe" + } + ] + }, + { + "title": "Chef", + "overview": "When Chef Carl Casper suddenly quits his job at a prominent Los Angeles restaurant after refusing to compromise his creative integrity for its controlling owner, he is left to figure out what's next. Finding himself in Miami, he teams up with his ex-wife, his friend and his son to launch a food truck. Taking to the road, Chef Carl goes back to his roots to reignite his passion for the kitchen -- and zest for life and love.", + "release_date": "2014-05-08", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Molly" + }, + { + "actor": "Robert Downey Jr.", + "character": "Marvin" + } + ] + }, + { + "title": "Marvel Studios: Assembling a Universe", + "overview": "A look at the story behind Marvel Studios and the Marvel Cinematic Universe, featuring interviews and behind-the-scenes footage from all of the Marvel films, the Marvel One-Shots and \"Marvel's Agents of S.H.I.E.L.D.\"", + "release_date": "2014-03-18", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Himself / Tony Stark / Iron Man" + }, + { + "actor": "Chris Hemsworth", + "character": "Himself / Thor" + }, + { + "actor": "Chris Evans", + "character": "Himself / Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Himself / Bruce Banner / Hulk" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Herself" + }, + { + "actor": "Clark Gregg", + "character": "Himself" + }, + { + "actor": "Samuel L. Jackson", + "character": "Himself" + }, + { + "actor": "Scarlett Johansson", + "character": "Herself" + }, + { + "actor": "Jeremy Renner", + "character": "Himself" + } + ] + }, + { + "title": "Captain America: Civil War", + "overview": "Following the events of Age of Ultron, the collective governments of the world pass an act designed to regulate all superhuman activity. This polarizes opinion amongst the Avengers, causing two factions to side with Iron Man or Captain America, which causes an epic battle between former allies.", + "release_date": "2016-04-27", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + } + ] + }, + { + "title": "Thor: Ragnarok", + "overview": "Thor is imprisoned on the other side of the universe and finds himself in a race against time to get back to Asgard to stop Ragnarok, the destruction of his home-world and the end of Asgardian civilization, at the hands of an all-powerful new threat, the ruthless Hela.", + "release_date": "2017-10-25", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / Hulk" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow (archive footage / uncredited)" + } + ] + }, + { + "title": "Avengers: Endgame", + "overview": "After the devastating events of Avengers: Infinity War, the universe is in ruins due to the efforts of the Mad Titan, Thanos. With the help of remaining allies, the Avengers must assemble once more in order to undo Thanos' actions and restore order to the universe once and for all, no matter what consequences may be in store.", + "release_date": "2019-04-24", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Don Cheadle", + "character": "James Rhodes / War Machine" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Pepper Potts" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "Avengers: Infinity War", + "overview": "As the Avengers and their allies have continued to protect the world from threats too large for any one hero to handle, a new danger has emerged from the cosmic shadows: Thanos. A despot of intergalactic infamy, his goal is to collect all six Infinity Stones, artifacts of unimaginable power, and use them to inflict his twisted will on all of reality. Everything the Avengers have fought for has led up to this moment - the fate of Earth and existence itself has never been more uncertain.", + "release_date": "2018-04-25", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + } + ] + }, + { + "title": "Captain Marvel", + "overview": "The story follows Carol Danvers as she becomes one of the universe’s most powerful heroes when Earth is caught in the middle of a galactic war between two alien races. Set in the 1990s, Captain Marvel is an all-new adventure from a previously unseen period in the history of the Marvel Cinematic Universe.", + "release_date": "2019-03-06", + "characters": [ + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Agent Phil Coulson" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America (uncredited)" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow (uncredited)" + }, + { + "actor": "Don Cheadle", + "character": "James 'Rhodey' Rhodes / War Machine (uncredited)" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk (uncredited)" + } + ] + }, + { + "title": "Spider-Man: Homecoming", + "overview": "Following the events of Captain America: Civil War, Peter Parker, with the help of his mentor Tony Stark, tries to balance his life as an ordinary high school student in Queens, New York City, with fighting crime as his superhero alter ego Spider-Man as a new threat, the Vulture, emerges.", + "release_date": "2017-07-05", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + } + ] + }, + { + "title": "Team Thor", + "overview": "Discover what Thor was up to during the events of Captain America: Civil War.", + "release_date": "2016-08-28", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner" + } + ] + }, + { + "title": "Black Widow", + "overview": "Natasha Romanoff, also known as Black Widow, confronts the darker parts of her ledger when a dangerous conspiracy with ties to her past arises. Pursued by a force that will stop at nothing to bring her down, Natasha must deal with her history as a spy and the broken relationships left in her wake long before she became an Avenger.", + "release_date": "2020-10-28", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + } + ] + } +] +``` diff --git a/site/versioned_docs/version-4.2/developers/operations-api/bulk-operations.md b/site/versioned_docs/version-4.2/developers/operations-api/bulk-operations.md new file mode 100644 index 00000000..048ec5d4 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/bulk-operations.md @@ -0,0 +1,136 @@ +--- +title: Bulk Operations +--- + +# Bulk Operations + +## CSV Data Load +Ingests CSV data, provided directly in the operation as an `insert`, `update` or `upsert` into the specified database table. + +* operation _(required)_ - must always be `csv_data_load` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* data _(required)_ - csv data to import into HarperDB + +### Body +```json +{ + "operation": "csv_data_load", + "database": "dev", + "action": "insert", + "table": "breed", + "data": "id,name,section,country,image\n1,ENGLISH POINTER,British and Irish Pointers and Setters,GREAT BRITAIN,http:/www.fci.be/Nomenclature/Illustrations/001g07.jpg\n2,ENGLISH SETTER,British and Irish Pointers and Setters,GREAT BRITAIN,http:/www.fci.be/Nomenclature/Illustrations/002g07.jpg\n3,KERRY BLUE TERRIER,Large and medium sized Terriers,IRELAND,\n" +} +``` + +### Response: 200 +```json + { + "message": "Starting job with id 2fe25039-566e-4670-8bb3-2db3d4e07e69", + "job_id": "2fe25039-566e-4670-8bb3-2db3d4e07e69" + } +``` + +--- + +## CSV File Load +Ingests CSV data, provided via a path on the local filesystem, as an `insert`, `update` or `upsert` into the specified database table. + +_Note: The CSV file must reside on the same machine on which HarperDB is running. For example, the path to a CSV on your computer will produce an error if your HarperDB instance is a cloud instance._ + +* operation _(required)_ - must always be `csv_file_load` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* file_path _(required)_ - path to the csv file on the host running harperdb + +### Body +```json +{ + "operation": "csv_file_load", + "action": "insert", + "database": "dev", + "table": "breed", + "file_path": "/home/user/imports/breeds.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 3994d8e2-ec6a-43c4-8563-11c1df81870e", + "job_id": "3994d8e2-ec6a-43c4-8563-11c1df81870e" +} +``` + +--- + +## CSV URL Load +Ingests CSV data, provided via URL, as an `insert`, `update` or `upsert` into the specified database table. + +* operation _(required)_ - must always be `csv_url_load` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* csv_url _(required)_ - URL to the csv + +### Body +```json +{ + "operation": "csv_url_load", + "action": "insert", + "database": "dev", + "table": "breed", + "csv_url": "https:/s3.amazonaws.com/complimentarydata/breeds.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 332aa0a2-6833-46cd-88a6-ae375920436a", + "job_id": "332aa0a2-6833-46cd-88a6-ae375920436a" +} +``` + +--- + +## Import from S3 +This operation allows users to import CSV or JSON files from an AWS S3 bucket as an `insert`, `update` or `upsert`. + +* operation _(required)_ - must always be `import_from_s3` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* s3 _(required)_ - object containing required AWS S3 bucket info for operation: + * aws_access_key_id - AWS access key for authenticating into your S3 bucket + * aws_secret_access_key - AWS secret for authenticating into your S3 bucket + * bucket - AWS S3 bucket to import from + * key - the name of the file to import - _the file must include a valid file extension ('.csv' or '.json')_ + * region - the region of the bucket + +### Body +```json +{ + "operation": "import_from_s3", + "action": "insert", + "database": "dev", + "table": "dog", + "s3": { + "aws_access_key_id": "YOUR_KEY", + "aws_secret_access_key": "YOUR_SECRET_KEY", + "bucket": "BUCKET_NAME", + "key": "OBJECT_NAME", + "region": "BUCKET_REGION" + } +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 062a1892-6a0a-4282-9791-0f4c93b12e16", + "job_id": "062a1892-6a0a-4282-9791-0f4c93b12e16" +} +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/developers/operations-api/clustering.md b/site/versioned_docs/version-4.2/developers/operations-api/clustering.md new file mode 100644 index 00000000..bb7c0632 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/clustering.md @@ -0,0 +1,390 @@ +--- +title: Clustering +--- + +# Clustering + +## Cluster Set Routes +Adds a route/routes to either the hub or leaf server cluster configuration. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_set_routes` +* server _(required)_ - must always be `hub` or `leaf`, in most cases you should use `hub` here +* routes _(required)_ - must always be an objects array with a host and port: + * host - the host of the remote instance you are clustering to + * port - the clustering port of the remote instance you are clustering to, in most cases this is the value in `clustering.hubServer.cluster.network.port` on the remote instance `harperdb-config.yaml` + +### Body +```json +{ + "operation": "cluster_set_routes", + "server": "hub", + "routes": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "cluster routes successfully set", + "set": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "skipped": [] +} +``` + +--- + +## Cluster Get Routes +Gets all the hub and leaf server routes from the config file. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_get_routes` + +### Body +```json +{ + "operation": "cluster_get_routes" +} +``` + +### Response: 200 +```json +{ + "hub": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "leaf": [] +} +``` + +--- + +## Cluster Delete Routes +Removes route(s) from hub and/or leaf server routes array in config file. Returns a deletion success message and arrays of deleted and skipped records. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_delete_routes` +* routes _required_ - Must be an array of route object(s) + +### Body + +```json +{ + "operation": "cluster_delete_routes", + "routes": [ + { + "host": "18.116.24.71", + "port": 12345 + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "cluster routes successfully deleted", + "deleted": [ + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "skipped": [] +} +``` + + +--- + +## Add Node +Registers an additional HarperDB instance with associated subscriptions. Learn more about HarperDB clustering here: https:/harperdb.io/docs/clustering/. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_node` +* node_name _(required)_ - the node name of the remote node +* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `schema`, `table`, `subscribe` and `publish`: + * schema - the schema to replicate from + * table - the table to replicate from + * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + * publish - a boolean which determines if transactions on the local table should be replicated on the remote table + * start_time _(optional)_ - How far back to go to get transactions from node being added. Must be in UTC YYYY-MM-DDTHH:mm:ss.sssZ format + +### Body +```json +{ + "operation": "add_node", + "node_name": "ec2-3-22-181-22", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": false, + "publish": true, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "Successfully added 'ec2-3-22-181-22' to manifest" +} +``` + +--- + +## Update Node +Modifies an existing HarperDB instance registration and associated subscriptions. Learn more about HarperDB clustering here: https:/harperdb.io/docs/clustering/. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `update_node` +* node_name _(required)_ - the node name of the remote node you are updating +* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `schema`, `table`, `subscribe` and `publish`: + * schema - the schema to replicate from + * table - the table to replicate from + * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + * publish - a boolean which determines if transactions on the local table should be replicated on the remote table + +### Body +```json +{ + "operation": "update_node", + "node_name": "ec2-18-223-239-195", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": false + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "Successfully updated 'ec2-3-22-181-22'" +} +``` + +--- + +## Cluster Status +Returns an array of status objects from a cluster. A status object will contain the clustering node name, whether or not clustering is enabled, and a list of possible connections. Learn more about HarperDB clustering here: https:/harperdb.io/docs/clustering/. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_status` + +### Body +```json +{ + "operation": "cluster_status" +} +``` + +### Response: 200 +```json +{ + "node_name": "ec2-18-221-143-69", + "is_enabled": true, + "connections": [ + { + "node_name": "ec2-3-22-181-22", + "status": "open", + "ports": { + "clustering": 12345, + "operations_api": 9925 + }, + "latency_ms": 13, + "uptime": "30d 1h 18m 8s", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ] + } + ] +} +``` + + +--- + +## Cluster Network +Returns an object array of enmeshed nodes. Each node object will contain the name of the node, the amount of time (in milliseconds) it took for it to respond, the names of the nodes it is enmeshed with and the routes set in its config file. Learn more about HarperDB clustering here: [https:/harperdb.io/docs/clustering/](https:/harperdb.io/docs/clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_- must always be `cluster_network` +* timeout (_optional_) - the amount of time in milliseconds to wait for a response from the network. Must be a number +* connected_nodes (_optional_) - omit `connected_nodes` from the response. Must be a boolean. Defaults to `false` +* routes (_optional_) - omit `routes` from the response. Must be a boolean. Defaults to `false` + +### Body + +```json +{ + "operation": "cluster_network" +} +``` + +### Response: 200 +```json +{ + "nodes": [ + { + "name": "local_node", + "response_time": 4, + "connected_nodes": ["ec2-3-142-255-78"], + "routes": [ + { + "host": "3.142.255.78", + "port": 9932 + } + ] + }, + { + "name": "ec2-3-142-255-78", + "response_time": 57, + "connected_nodes": ["ec2-3-12-153-124", "ec2-3-139-236-138", "local_node"], + "routes": [] + } + ] +} +``` + +--- + +## Remove Node +Removes a HarperDB instance and associated subscriptions from the cluster. Learn more about HarperDB clustering here: https:/harperdb.io/docs/clustering/. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `remove_node` +* name _(required)_ - The name of the node you are de-registering + +### Body +```json +{ + "operation": "remove_node", + "node_name": "ec2-3-22-181-22" +} +``` + +### Response: 200 +```json +{ + "message": "Successfully removed 'ec2-3-22-181-22' from manifest" +} +``` + +--- + +## Configure Cluster +Bulk create/remove subscriptions for any number of remote nodes. Resets and replaces any existing clustering setup. +Learn more about HarperDB clustering here: https:/harperdb.io/docs/clustering/. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `configure_cluster` +* connections _(required)_ - must be an object array with each object containing `node_name` and `subscriptions` for that node + +### Body +```json +{ + "operation": "configure_cluster", + "connections": [ + { + "node_name": "ec2-3-137-184-8", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": false + } + ] + }, + { + "node_name": "ec2-18-223-239-195", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": true + } + ] + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "Cluster successfully configured." +} +``` diff --git a/site/versioned_docs/version-4.2/developers/operations-api/components.md b/site/versioned_docs/version-4.2/developers/operations-api/components.md new file mode 100644 index 00000000..17ba5f0a --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/components.md @@ -0,0 +1,291 @@ +--- +title: Components +--- + +# Components + +## Add Component + +Creates a new component project in the component root directory using a predefined template. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_component` +* project _(required)_ - the name of the project you wish to create + +### Body +```json +{ + "operation": "add_component", + "project": "my-component" +} +``` + +### Response: 200 +```json +{ + "message": "Successfully added project: my-component" +} +``` +--- +## Deploy Component + +Will deploy a component using either a base64-encoded string representation of a `.tar` file (the output from `package_component`) or a package value, which can be any valid NPM reference, such as a GitHub repo, an NPM package, a tarball, a local directory or a website.\ + +If deploying with the `payload` option, HarperDB will decrypt the base64-encoded string, reconstitute the .tar file of your project folder, and extract it to the component root project directory.\ + +If deploying with the `package` option, the package value will be written to `harperdb-config.yaml`. Then npm install will be utilized to install the component in the `node_modules` directory located in the hdb root. The value is a package reference, which should generally be a [URL reference, as described here](https:/docs.npmjs.com/cli/v10/configuring-npm/package-json#urls-as-dependencies) (it is also possible to include NPM registerd packages and file paths). URL package references can directly reference tarballs that can be installed as a package. However, the most common and recommended usage is to install from a Git repository, which can be combined with a tag to deploy a specific version directly from versioned source control. When using tags, we highly recommend that you use the `semver` directive to ensure consistent and reliable installation by NPM. In addition to tags, you can also reference branches or commit numbers. Here is an example URL package reference to a (public) Git repository that doesn't require authentication: +``` +https:/github.com/HarperDB/application-template#semver:v1.0.0 +``` +or this can be shortened to: +``` +HarperDB/application-template#semver:v1.0.0 +``` + +You can also install from private repository if you have an installed SSH keys on the server: +``` +git+ssh:/git@github.com:my-org/my-app.git#semver:v1.0.0 +``` +Or you can use a Github token: +``` +https:/@github.com/my-org/my-app#semver:v1.0.0 +``` +Or you can use a GitLab Project Access Token: +``` +https:/my-project:@gitlab.com/my-group/my-project#semver:v1.0.0 +``` +Note that your component will be installed by NPM. If your component has dependencies, NPM will attempt to download and install these as well. NPM normally uses the public registry.npmjs.org registry. If you are installing without network access to this, you may wish to define [custom registry locations](https:/docs.npmjs.com/cli/v8/configuring-npm/npmrc) if you have any dependencies that need to be installed. NPM will install the deployed component and any dependencies in node_modules in the hdb root directory (typically `~/hdb/node_modules`). + +_Note: After deploying a component a restart may be required_ + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `deploy_component` +* project _(required)_ - the name of the project you wish to deploy +* package _(optional)_ - this can be any valid GitHub or NPM reference +* payload _(optional)_ - a base64-encoded string representation of the .tar file. Must be a string + +### Body + +```json +{ + "operation": "deploy_component", + "project": "my-component", + "payload": "A very large base64-encoded string representation of the .tar file" +} +``` + +```json +{ + "operation": "deploy_component", + "project": "my-component", + "package": "HarperDB/application-template" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deployed: my-component" +} +``` +--- +## Package Component + +Creates a temporary `.tar` file of the specified project folder, then reads it into a base64-encoded string and returns an object with the string and the payload. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `package_component` +* project _(required)_ - the name of the project you wish to package +* skip_node_modules _(optional)_ - if true, creates option for tar module that will exclude the project's node_modules directory. Must be a boolean + +### Body + +```json +{ + "operation": "package_component", + "project": "my-component", + "skip_node_modules": true +} +``` + +### Response: 200 + +```json +{ + "project": "my-component", + "payload": "LgAAAAAAAAAAAAAAAAAAA...AAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" +} +``` +--- +## Drop Component + +Deletes a file from inside the component project or deletes the complete project. + +**If just `project` is provided it will delete all that projects local files and folders** + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `drop_component` +* project _(required)_ - the name of the project you wish to delete or to delete from if using the `file` parameter +* file _(optional)_ - the path relative to your project folder of the file you wish to delete + +### Body + +```json +{ + "operation": "drop_component", + "project": "my-component", + "file": "utils/myUtils.js" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully dropped: my-component/utils/myUtils.js" +} +``` +--- +## Get Components + +Gets all local component files and folders and any component config from `harperdb-config.yaml` + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_components` + +### Body + +```json +{ + "operation": "get_components" +} +``` + +### Response: 200 + +```json +{ + "name": "components", + "entries": [ + { + "package": "HarperDB/application-template", + "name": "deploy-test-gh" + }, + { + "package": "@fastify/compress", + "name": "fast-compress" + }, + { + "name": "my-component", + "entries": [ + { + "name": "LICENSE", + "mtime": "2023-08-22T16:00:40.286Z", + "size": 1070 + }, + { + "name": "index.md", + "mtime": "2023-08-22T16:00:40.287Z", + "size": 1207 + }, + { + "name": "config.yaml", + "mtime": "2023-08-22T16:00:40.287Z", + "size": 1069 + }, + { + "name": "package.json", + "mtime": "2023-08-22T16:00:40.288Z", + "size": 145 + }, + { + "name": "resources.js", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 583 + }, + { + "name": "schema.graphql", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 466 + }, + { + "name": "utils", + "entries": [ + { + "name": "commonUtils.js", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 583 + } + ] + } + ] + } + ] +} +``` +--- +## Get Component File + +Gets the contents of a file inside a component project. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_component_file` +* project _(required)_ - the name of the project where the file is located +* file _(required)_ - the path relative to your project folder of the file you wish to view +* encoding _(optional)_ - the encoding that will be passed to the read file call. Defaults to `utf8` + +### Body + +```json +{ + "operation": "get_component_file", + "project": "my-component", + "file": "resources.js" +} +``` + +### Response: 200 + +```json +{ + "message": "/**export class MyCustomResource extends tables.TableName {\n\t/ we can define our own custom POST handler\n\tpost(content) {\n\t\t/ do something with the incoming content;\n\t\treturn super.post(content);\n\t}\n\t/ or custom GET handler\n\tget() {\n\t\t/ we can modify this resource before returning\n\t\treturn super.get();\n\t}\n}\n */\n/ we can also define a custom resource without a specific table\nexport class Greeting extends Resource {\n\t/ a \"Hello, world!\" handler\n\tget() {\n\t\treturn { greeting: 'Hello, world!' };\n\t}\n}" +} +``` +--- +## Set Component File + +Creates or updates a file inside a component project. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_component_file` +* project _(required)_ - the name of the project the file is located in +* file _(required)_ - the path relative to your project folder of the file you wish to set +* payload _(required)_ - what will be written to the file +* encoding _(optional)_ - the encoding that will be passed to the write file call. Defaults to `utf8` + +### Body + +```json +{ + "operation": "set_component_file", + "project": "my-component", + "file": "test.js", + "payload": "console.log('hello world')" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully set component: test.js" +} +``` diff --git a/site/versioned_docs/version-4.2/developers/operations-api/custom-functions.md b/site/versioned_docs/version-4.2/developers/operations-api/custom-functions.md new file mode 100644 index 00000000..bf9537fc --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/custom-functions.md @@ -0,0 +1,276 @@ +--- +title: Custom Functions +--- + +# Custom Functions + +## Custom Functions Status + +Returns the state of the Custom functions server. This includes whether it is enabled, upon which port it is listening, and where its root project directory is located on the host machine. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `custom_function_status` + +### Body +```json +{ + "operation": "custom_functions_status" +} +``` + +### Response: 200 +```json +{ + "is_enabled": true, + "port": 9926, + "directory": "/Users/myuser/hdb/custom_functions" +} +``` + +--- + +## Get Custom Functions + +Returns an array of projects within the Custom Functions root project directory. Each project has details including each of the files in the routes and helpers directories, and the total file count in the static folder. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_custom_functions` + +### Body + +```json +{ + "operation": "get_custom_functions" +} +``` + +### Response: 200 + +```json +{ + "dogs": { + "routes": ["examples"], + "helpers":["example"], + "static":3 + } +} +``` + +--- + +## Get Custom Function + +Returns the content of the specified file as text. HarperDB Studio uses this call to render the file content in its built-in code editor. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_custom_function` +* project _(required)_ - the name of the project containing the file for which you wish to get content +* type _(required)_ - the name of the sub-folder containing the file for which you wish to get content - must be either routes or helpers +* file _(required)_ - The name of the file for which you wish to get content - should not include the file extension (which is always .js) + +### Body + +```json +{ + "operation": "get_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example" +} +``` + +### Response: 200 + +```json +{ + "message": "'use strict';\n\nconst https = require('https');\n\nconst authRequest = (options) => {\n return new Promise((resolve, reject) => {\n const req = https.request(options, (res) => {\n res.setEncoding('utf8');\n let responseBody = '';\n\n res.on('data', (chunk) => {\n responseBody += chunk;\n });\n\n res.on('end', () => {\n resolve(JSON.parse(responseBody));\n });\n });\n\n req.on('error', (err) => {\n reject(err);\n });\n\n req.end();\n });\n};\n\nconst customValidation = async (request,logger) => {\n const options = {\n hostname: 'jsonplaceholder.typicode.com',\n port: 443,\n path: '/todos/1',\n method: 'GET',\n headers: { authorization: request.headers.authorization },\n };\n\n const result = await authRequest(options);\n\n /*\n * throw an authentication error based on the response body or statusCode\n */\n if (result.error) {\n const errorString = result.error || 'Sorry, there was an error authenticating your request';\n logger.error(errorString);\n throw new Error(errorString);\n }\n return request;\n};\n\nmodule.exports = customValidation;\n" +} +``` + +--- + +## Set Custom Function + +Updates the content of the specified file. HarperDB Studio uses this call to save any changes made through its built-in code editor. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_custom_function` +* project _(required)_ - the name of the project containing the file for which you wish to set content +* type _(required)_ - the name of the sub-folder containing the file for which you wish to set content - must be either routes or helpers +* file _(required)_ - the name of the file for which you wish to set content - should not include the file extension (which is always .js) +* function_content _(required)_ - the content you wish to save into the specified file + +### Body + +```json +{ + "operation": "set_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example", + "function_content": "'use strict';\n\nconst https = require('https');\n\nconst authRequest = (options) => {\n return new Promise((resolve, reject) => {\n const req = https.request(options, (res) => {\n res.setEncoding('utf8');\n let responseBody = '';\n\n res.on('data', (chunk) => {\n responseBody += chunk;\n });\n\n res.on('end', () => {\n resolve(JSON.parse(responseBody));\n });\n });\n\n req.on('error', (err) => {\n reject(err);\n });\n\n req.end();\n });\n};\n\nconst customValidation = async (request,logger) => {\n const options = {\n hostname: 'jsonplaceholder.typicode.com',\n port: 443,\n path: '/todos/1',\n method: 'GET',\n headers: { authorization: request.headers.authorization },\n };\n\n const result = await authRequest(options);\n\n /*\n * throw an authentication error based on the response body or statusCode\n */\n if (result.error) {\n const errorString = result.error || 'Sorry, there was an error authenticating your request';\n logger.error(errorString);\n throw new Error(errorString);\n }\n return request;\n};\n\nmodule.exports = customValidation;\n" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully updated custom function: example.js" +} +``` + +--- + +## Drop Custom Function + +Deletes the specified file. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `drop_custom_function` +* project _(required)_ - the name of the project containing the file you wish to delete +* type _(required)_ - the name of the sub-folder containing the file you wish to delete. Must be either routes or helpers +* file _(required)_ - the name of the file you wish to delete. Should not include the file extension (which is always .js) + +### Body + +```json +{ + "operation": "drop_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example" +} +``` + +### Response: 200 + +```json +{ + "message":"Successfully deleted custom function: example.js" +} +``` + +--- + +## Add Custom Function Project + +Creates a new project folder in the Custom Functions root project directory. It also inserts into the new directory the contents of our Custom Functions Project template, which is available publicly, here: https:/github.com/HarperDB/harperdb-custom-functions-template. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_custom_function_project` +* project _(required)_ - the name of the project you wish to create + +### Body + +```json +{ + "operation": "add_custom_function_project", + "project": "dogs" +} +``` + +### Response: 200 + +```json +{ + "message":"Successfully created custom function project: dogs" +} +``` + +--- + +## Drop Custom Function Project + +Deletes the specified project folder and all of its contents. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `drop_custom_function_project` +* project _(required)_ - the name of the project you wish to delete + +### Body + +```json +{ + "operation": "drop_custom_function_project", + "project": "dogs" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deleted project: dogs" +} +``` + +--- + +## Package Custom Function Project + +Creates a .tar file of the specified project folder, then reads it into a base64-encoded string and returns an object with the string, the payload and the file. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `package_custom_function_project` +* project _(required)_ - the name of the project you wish to package up for deployment +* skip_node_modules _(optional)_ - if true, creates option for tar module that will exclude the project's node_modules directory. Must be a boolean. + +### Body + +```json +{ + "operation": "package_custom_function_project", + "project": "dogs", + "skip_node_modules": true +} +``` + +### Response: 200 + +```json +{ + "project": "dogs", + "payload": "LgAAAAAAAAAAAAAAAAAAA...AAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "file": "/tmp/d27f1154-5d82-43f0-a5fb-a3018f366081.tar" +} +``` + +--- + +## Deploy Custom Function Project + +Takes the output of package_custom_function_project, decrypts the base64-encoded string, reconstitutes the .tar file of your project folder, and extracts it to the Custom Functions root project directory. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `deploy_custom_function_project` +* project _(required)_ - the name of the project you wish to deploy. Must be a string +* payload _(required)_ - a base64-encoded string representation of the .tar file. Must be a string + + +### Body + +```json +{ + "operation": "deploy_custom_function_project", + "project": "dogs", + "payload": "A very large base64-encoded string represenation of the .tar file" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deployed project: dogs" +} +``` diff --git a/site/versioned_docs/version-4.2/developers/operations-api/databases-and-tables.md b/site/versioned_docs/version-4.2/developers/operations-api/databases-and-tables.md new file mode 100644 index 00000000..18f23171 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/databases-and-tables.md @@ -0,0 +1,362 @@ +--- +title: Databases and Tables +--- + +# Databases and Tables + +## Describe All +Returns the definitions of all databases and tables within the database. Record counts about 5000 records are estimated, as determining the exact count can be expensive. When the record count is estimated, this is indicated by the inclusion of a confidence interval of `estimated_record_range`. If you need the exact count, you can include an `"exact_count": true` in the operation, but be aware that this requires a full table scan (may be expensive). + +* operation _(required)_ - must always be `describe_all` + +### Body +```json +{ + "operation": "describe_all" +} +``` + +### Response: 200 +```json +{ + "dev": { + "dog": { + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 + } + } +} +``` + +--- + +## Describe database +Returns the definitions of all tables within the specified database. + +* operation _(required)_ - must always be `describe_database` +* database _(optional)_ - database where the table you wish to describe lives. The default is `data` + +### Body +```json +{ + "operation": "describe_database", + "database": "dev" +} +``` + +### Response: 200 +```json +{ + "dog": { + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 + } +} +``` + +--- + +## Describe Table +Returns the definition of the specified table. + +* operation _(required)_ - must always be `describe_table` +* table _(required)_ - table you wish to describe +* database _(optional)_ - database where the table you wish to describe lives. The default is `data` + +### Body +```json +{ + "operation": "describe_table", + "table": "dog" +} +``` + +### Response: 200 +```json +{ + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 +} +``` + +--- + +## Create database +Create a new database. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `create_database` +* database _(optional)_ - name of the database you are creating. The default is `data` + +### Body +```json +{ + "operation": "create_database", + "database": "dev" +} +``` + +### Response: 200 +```json +{ + "message": "database 'dev' successfully created" +} +``` + +--- + +## Drop database +Drop an existing database. NOTE: Dropping a database will delete all tables and all of their records in that database. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `drop_database` +* database _(required)_ - name of the database you are dropping + +### Body +```json +{ + "operation": "drop_database", + "database": "dev" +} +``` + +### Response: 200 +```json +{ + "message": "successfully deleted 'dev'" +} +``` + +--- + +## Create Table +Create a new table within a database. + +_Operation is restricted to super_user roles only_ + + +* operation _(required)_ - must always be `create_table` +* database _(optional)_ - name of the database where you want your table to live. If the database does not exist, it will be created. If the `database` property is not provided it will default to `data`. +* table _(required)_ - name of the table you are creating +* primary_key _(required)_ - primary key for the table +* attributes _(optional)_ - an array of attributes that specifies the schema for the table, that is the set of attributes for the table. When attributes are supplied the table will not be considered a "dynamic schema" table, and attributes will not be auto-added when records with new properties are inserted. Each attribute is specified as: + * name _(required)_ - the name of the attribute + * indexed _(optional)_ - indicates if the attribute should be indexed + * type _(optional)_ - specifies the data type of the attribute (can be String, Int, Float, Date, ID, Any) +* expiration _(optional)_ - specifies the time-to-live or expiration of records in the table before they are evicted (records are not evicted on any timer if not specified). This is specified in seconds. + +### Body +```json +{ + "operation": "create_table", + "database": "dev", + "table": "dog", + "primary_key": "id" +} +``` + +### Response: 200 +```json +{ + "message": "table 'dev.dog' successfully created." +} +``` + +--- + +## Drop Table +Drop an existing database table. NOTE: Dropping a table will delete all associated records in that table. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `drop_table` +* database _(optional)_ - database where the table you are dropping lives. The default is `data` +* table _(required)_ - name of the table you are dropping + +### Body + +```json +{ + "operation": "drop_table", + "database": "dev", + "table": "dog" +} +``` + +### Response: 200 +```json +{ + "message": "successfully deleted table 'dev.dog'" +} +``` + +--- + +## Create Attribute +Create a new attribute within the specified table. **The create_attribute operation can be used for admins wishing to pre-define schema values for setting role-based permissions or for any other reason.** + +_Note: HarperDB will automatically create new attributes on insert and update if they do not already exist within the schema._ + +* operation _(required)_ - must always be `create_attribute` +* database _(optional)_ - name of the database of the table you want to add your attribute. The default is `data` +* table _(required)_ - name of the table where you want to add your attribute to live +* attribute _(required)_ - name for the attribute + +### Body +```json +{ + "operation": "create_attribute", + "database": "dev", + "table": "dog", + "attribute": "is_adorable" +} +``` + +### Response: 200 +```json +{ + "message": "inserted 1 of 1 records", + "skipped_hashes": [], + "inserted_hashes": [ + "383c0bef-5781-4e1c-b5c8-987459ad0831" + ] +} +``` + +--- + +## Drop Attribute +Drop an existing attribute from the specified table. NOTE: Dropping an attribute will delete all associated attribute values in that table. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `drop_attribute` +* database _(optional)_ - database where the table you are dropping lives. The default is `data` +* table _(required)_ - table where the attribute you are dropping lives +* attribute _(required)_ - attribute that you intend to drop + +### Body + +```json +{ + "operation": "drop_attribute", + "database": "dev", + "table": "dog", + "attribute": "is_adorable" +} +``` + +### Response: 200 +```json +{ + "message": "successfully deleted attribute 'is_adorable'" +} +``` + +--- + +## Get Backup +This will return a snapshot of the requested database. This provides a means for backing up the database through the operations API. The response will be the raw database file (in binary format), which can later be restored as a database file by copying into the appropriate hdb/databases directory (with HarperDB not running). The returned file is a snapshot of the database at the moment in time that the get_backup operation begins. This also supports backing up individual tables in a database. However, this is a more expensive operation than backing up a database in whole, and will lose any transactional atomicity between writes across tables, so generally it is recommended that you backup the entire database. + +It is important to note that trying to copy a database file that is in use (HarperDB actively running and writing to the file) using standard file copying tools is not safe (the copied file will likely be corrupt), which is why using this snapshot operation is recommended for backups (volume snapshots are also a good way to backup HarperDB databases). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `get_backup` +* database _(required)_ - this is the database that will be snapshotted and returned +* table _(optional)_ - this will specify a specific table to backup +* tables _(optional)_ - this will specify a specific set of tables to backup + +### Body + +```json +{ + "operation": "get_backup", + "database": "dev" +} +``` + +### Response: 200 +``` +The database in raw binary data format +``` diff --git a/site/versioned_docs/version-4.2/developers/operations-api/index.md b/site/versioned_docs/version-4.2/developers/operations-api/index.md new file mode 100644 index 00000000..cf2db22d --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/index.md @@ -0,0 +1,51 @@ +--- +title: Operations API +--- + +# Operations API + +The operations API provides a full set of capabilities for configuring, deploying, administering, and controlling HarperDB. To send operations to the operations API, you send a POST request to the operations API endpoint, which [defaults to port 9925](../../../deployments/configuration), on the root path, where the body is the operations object. These requests need to authenticated, which can be done with [basic auth](../../../developers/security/basic-auth) or [JWT authentication](../../../developers/security/jwt-auth). For example, a request to create a table would be performed as: + +```http +POST http:/my-harperdb-server:9925/ +Authorization: Basic YourBase64EncodedInstanceUser:Pass +Content-Type: application/json + +{ + "operation": "create_table", + "table": "my-table" +} +``` + +The operations API reference is available below and categorized by topic: + +* [Quick Start Examples](./quickstart-examples) +* [Databases and Tables](./databases-and-tables) +* [NoSQL Operations](./nosql-operations) +* [Bulk Operations](./bulk-operations) +* [Users and Roles](./users-and-roles) +* [Clustering](./clustering) +* [Components](./components) +* [Registration](./registration) +* [Jobs](./jobs) +* [Logs](./logs) +* [Utilities](./utilities) +* [Token Authentication](./token-authentication) +* [SQL Operations](./sql-operations) +* [Advanced JSON SQL Examples](./advanced-json-sql-examples) + +• [Past Release API Documentation](https:/olddocs.harperdb.io) + +## More Examples + +Here is an example of using `curl` to make an operations API request: + +```bash +curl --location --request POST 'https:/instance-subdomain.harperdbcloud.com' \ +--header 'Authorization: Basic YourBase64EncodedInstanceUser:Pass' \ +--header 'Content-Type: application/json' \ +--data-raw '{ +"operation": "create_schema", +"schema": "dev" +}' +``` diff --git a/site/versioned_docs/version-4.2/developers/operations-api/jobs.md b/site/versioned_docs/version-4.2/developers/operations-api/jobs.md new file mode 100644 index 00000000..8b05357f --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/jobs.md @@ -0,0 +1,82 @@ +--- +title: Jobs +--- + +# Jobs + +## Get Job +Returns job status, metrics, and messages for the specified job ID. + +* operation _(required)_ - must always be `get_job` +* id _(required)_ - the id of the job you wish to view + +### Body + +```json +{ + "operation": "get_job", + "id": "4a982782-929a-4507-8794-26dae1132def" +} +``` + +### Response: 200 +```json +[ + { + "__createdtime__": 1611615798782, + "__updatedtime__": 1611615801207, + "created_datetime": 1611615798774, + "end_datetime": 1611615801206, + "id": "4a982782-929a-4507-8794-26dae1132def", + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "start_datetime": 1611615798805, + "status": "COMPLETE", + "type": "csv_url_load", + "user": "HDB_ADMIN", + "start_datetime_converted": "2021-01-25T23:03:18.805Z", + "end_datetime_converted": "2021-01-25T23:03:21.206Z" + } +] +``` + +--- + +## Search Jobs By Start Date +Returns a list of job statuses, metrics, and messages for all jobs executed within the specified time window. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `search_jobs_by_start_date` +* from_date _(required)_ - the date you wish to start the search +* to_date _(required)_ - the date you wish to end the search + +### Body +```json +{ + "operation": "search_jobs_by_start_date", + "from_date": "2021-01-25T22:05:27.464+0000", + "to_date": "2021-01-25T23:05:27.464+0000" +} +``` + +### Response: 200 +```json +[ + { + "id": "942dd5cb-2368-48a5-8a10-8770ff7eb1f1", + "user": "HDB_ADMIN", + "type": "csv_url_load", + "status": "COMPLETE", + "start_datetime": 1611613284781, + "end_datetime": 1611613287204, + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "created_datetime": 1611613284764, + "__createdtime__": 1611613284767, + "__updatedtime__": 1611613287207, + "start_datetime_converted": "2021-01-25T22:21:24.781Z", + "end_datetime_converted": "2021-01-25T22:21:27.204Z" + } +] +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/developers/operations-api/logs.md b/site/versioned_docs/version-4.2/developers/operations-api/logs.md new file mode 100644 index 00000000..3da8a570 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/logs.md @@ -0,0 +1,753 @@ +--- +title: Logs +--- + +# Logs + +## Read HarperDB Log +Returns log outputs from the primary HarperDB log based on the provided search criteria. Read more about HarperDB logging here: https:/docs.harperdb.io/docs/logging#read-logs-via-the-api. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `read_Log` +* start _(optional)_ - result to start with. Must be a number +* limit _(optional)_ - number of results returned. Default behavior is 100. Must be a number +* level _(optional)_ - error level to filter on. Default behavior is all levels. Must be `error`, `info`, or `null` +* from _(optional)_ - date to begin showing log results. Must be `YYYY-MM-DD` or `YYYY-MM-DD hh:mm:ss` +* until _(optional)_ - date to end showing log results. Must be `YYYY-MM-DD` or `YYYY-MM-DD hh:mm:ss` +* order _(optional)_ - order to display logs desc or asc by timestamp +### Body + +```json +{ + "operation": "read_log", + "start": 0, + "limit": 1000, + "level": "error", + "from": "2021-01-25T22:05:27.464+0000", + "until": "2021-01-25T23:05:27.464+0000", + "order": "desc" +} +``` + +### Response: 200 +```json +[ + { + "level": "notify", + "message": "Connected to cluster server.", + "timestamp": "2021-01-25T23:03:20.710Z", + "thread": "main/0", + "tags": [] + }, + { + "level": "warn", + "message": "Login failed", + "timestamp": "2021-01-25T22:24:45.113Z", + "thread": "http/9", + "tags": [] + }, + { + "level": "error", + "message": "unknown attribute 'name and breed'", + "timestamp": "2021-01-25T22:23:24.167Z", + "thread": "http/9", + "tags": [] + } +] + +``` + + +--- + +## Read Transaction Log +Returns all transactions logged for the specified database table. You may filter your results with the optional from, to, and limit fields. Read more about HarperDB transaction logs here: https:/docs.harperdb.io/docs/transaction-logging#read_transaction_log. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `read_transaction_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* from _(optional)_ - time format must be millisecond-based epoch in UTC +* to _(optional)_ - time format must be millisecond-based epoch in UTC +* limit _(optional)_ - max number of logs you want to receive. Must be a number + +### Body + +```json +{ + "operation": "read_transaction_log", + "schema": "dev", + "table": "dog", + "from": 1560249020865, + "to": 1660585656639, + "limit": 10 +} +``` + +### Response: 200 +```json +[ + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619736, + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38, + "__updatedtime__": 1660165619688, + "__createdtime__": 1660165619688 + } + ] + }, + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619813, + "records": [ + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "breed_id": 346, + "age": 7, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 3, + "dog_name": "Alby", + "owner_name": "Kaylan", + "breed_id": 348, + "age": 7, + "weight_lbs": 84, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "breed_id": 347, + "age": 6, + "weight_lbs": 60, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "breed_id": 348, + "age": 8, + "weight_lbs": 15, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 6, + "dog_name": "Kato", + "owner_name": "Kyle", + "breed_id": 351, + "age": 6, + "weight_lbs": 32, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 7, + "dog_name": "Simon", + "owner_name": "Fred", + "breed_id": 349, + "age": 3, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "breed_id": 350, + "age": 5, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 9, + "dog_name": "Yeti", + "owner_name": "Jaxon", + "breed_id": 200, + "age": 5, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 10, + "dog_name": "Monkey", + "owner_name": "Aron", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 11, + "dog_name": "Bode", + "owner_name": "Margo", + "breed_id": 104, + "age": 8, + "weight_lbs": 75, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 12, + "dog_name": "Tucker", + "owner_name": "David", + "breed_id": 346, + "age": 2, + "weight_lbs": 60, + "adorable": true, + "__updatedtime__": 1660165619798, + "__createdtime__": 1660165619798 + }, + { + "id": 13, + "dog_name": "Jagger", + "owner_name": "Margo", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619798, + "__createdtime__": 1660165619798 + } + ] + }, + { + "operation": "update", + "user": "admin", + "timestamp": 1660165620040, + "records": [ + { + "id": 1, + "dog_name": "Penny B", + "__updatedtime__": 1660165620036 + } + ] + } +] +``` + +--- + +## Delete Transaction Logs Before +Deletes transaction log data for the specified database table that is older than the specified timestamp. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `delete_transaction_log_before` +* schema _(required)_ - schema under which the transaction log resides. Must be a string +* table _(required)_ - table under which the transaction log resides. Must be a string +* timestamp _(required)_ - records older than this date will be deleted. Format is millisecond-based epoch in UTC + +### Body +```json +{ + "operation": "delete_transaction_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1598290282817 +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 26a6d3a6-6d77-40f9-bee7-8d6ef479a126" +} +``` + +--- + +## Read Audit Log +AuditLog must be enabled in the HarperDB configuration file to make this request. Returns a verbose history of all transactions logged for the specified database table, including original data records. You may filter your results with the optional search_type and search_values fields. Read more about HarperDB transaction logs here: https:/docs.harperdb.io/docs/transaction-logging#read_audit_log. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search_type _(optional)_ - possibilities are `hash_value`, `timestamp` and `username` +* search_values _(optional)_ - an array of string or numbers relating to search_type + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog" +} +``` + +### Response: 200 +```json +[ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [ + 444 + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + } +] +``` + + +--- + +## Read Audit Log by timestamp +AuditLog must be enabled in the HarperDB configuration file to make this request. Returns the transactions logged for the specified database table between the specified time window. Read more about HarperDB transaction logs here: https:/docs.harperdb.io/docs/transaction-logging#read_audit_log. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search_type _(optional)_ - timestamp +* search_values _(optional)_ - an array containing a maximum of two values [`from_timestamp`, `to_timestamp`] defining the range of transactions you would like to view. + * Timestamp format is millisecond-based epoch in UTC + * If no items are supplied then all transactions are returned + * If only one entry is supplied then all transactions after the supplied timestamp will be returned + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "timestamp", + "search_values": [ + 1660585740558, + 1660585759710.56 + ] +} +``` + +### Response: 200 +```json +[ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [ + 444 + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } +] +``` + + +--- + +## Read Audit Log by username +AuditLog must be enabled in the HarperDB configuration file to make this request. Returns the transactions logged for the specified database table which were committed by the specified user. Read more about HarperDB transaction logs here: https:/docs.harperdb.io/docs/transaction-logging#read_audit_log. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search_type _(optional)_ - username +* search_values _(optional)_ - the HarperDB user for whom you would like to view transactions + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "username", + "search_values": [ + "admin" + ] +} +``` + +### Response: 200 +```json +{ + "admin": [ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [ + 444 + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } + ] +} +``` + + +--- + +## Read Audit Log by hash_value +AuditLog must be enabled in the HarperDB configuration file to make this request. Returns the transactions logged for the specified database table which were committed to the specified hash value(s). Read more about HarperDB transaction logs here: https:/docs.harperdb.io/docs/transaction-logging#read_audit_log. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search_type _(optional)_ - hash_value +* search_values _(optional)_ - an array of hash_attributes for which you wish to see transaction logs + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "hash_value", + "search_values": [ + 318 + ] +} +``` + +### Response: 200 +```json +{ + "318": [ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } + ] +} +``` + +--- + +## Delete Audit Logs Before +AuditLog must be enabled in the HarperDB configuration file to make this request. Deletes audit log data for the specified database table that is older than the specified timestamp. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `delete_audit_logs_before` +* schema _(required)_ - schema under which the transaction log resides. Must be a string +* table _(required)_ - table under which the transaction log resides. Must be a string +* timestamp _(required)_ - records older than this date will be deleted. Format is millisecond-based epoch in UTC + +### Body +```json +{ + "operation": "delete_audit_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1660585759710.56 +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 7479e5f8-a86e-4fc9-add7-749493bc100f" +} +``` diff --git a/site/versioned_docs/version-4.2/developers/operations-api/nosql-operations.md b/site/versioned_docs/version-4.2/developers/operations-api/nosql-operations.md new file mode 100644 index 00000000..d27e0c95 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/nosql-operations.md @@ -0,0 +1,390 @@ +--- +title: NoSQL Operations +--- + +# NoSQL Operations + +## Insert + +Adds one or more rows of data to a database table. Primary keys of the inserted JSON record may be supplied on insert. If a primary key is not provided, then a GUID will be generated for each record. + +* operation _(required)_ - must always be `insert` +* database _(optional)_ - database where the table you are inserting records into lives. The default is `data` +* table _(required)_ - table where you want to insert records +* records _(required)_ - array of one or more records for insert + +### Body + +```json +{ + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 8, + "dog_name": "Harper", + "breed_id": 346, + "age": 7 + }, + { + "id": 9, + "dog_name": "Penny", + "breed_id": 154, + "age": 7 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 2 of 2 records", + "inserted_hashes": [ + 8, + 9 + ], + "skipped_hashes": [] +} +``` + +--- + +## Update + +Changes the values of specified attributes in one or more rows in a database table as identified by the primary key. NOTE: Primary key of the updated JSON record(s) MUST be supplied on update. + +* operation _(required)_ - must always be `update` +* database _(optional)_ - database of the table you are updating records in. The default is `data` +* table _(required)_ - table where you want to update records +* records _(required)_ - array of one or more records for update + +### Body + +```json +{ + "operation": "update", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 1, + "weight_lbs": 55 + }, + { + "id": 2, + "owner": "Kyle B", + "weight_lbs": 35 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "updated 2 of 2 records", + "update_hashes": [ + 1, + 3 + ], + "skipped_hashes": [] +} +``` + +--- + +## Upsert + +Changes the values of specified attributes for rows with matching primary keys that exist in the table. Adds rows to the database table for primary keys that do not exist or are not provided. + +* operation _(required)_ - must always be `update` +* database _(optional)_ - database of the table you are updating records in. The default is `data` +* table _(required)_ - table where you want to update records +* records _(required)_ - array of one or more records for update + +### Body + +```json +{ + "operation": "upsert", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 8, + "weight_lbs": 155 + }, + { + "name": "Bill", + "breed": "Pit Bull", + "id": 10, + "Age": 11, + "weight_lbs": 155 + }, + { + "name": "Harper", + "breed": "Mutt", + "age": 5, + "weight_lbs": 155 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "upserted 3 of 3 records", + "upserted_hashes": [ + 8, + 10, + "ea06fc8e-717b-4c6c-b69d-b29014054ab7" + ] +} +``` + +--- + +## Delete + +Removes one or more rows of data from a specified table. + +* operation _(required)_ - must always be `delete` +* database _(optional)_ - database where the table you are deleting records lives. The default is `data` +* table _(required)_ - table where you want to deleting records +* ids _(required)_ - array of one or more primary key values, which identifies records to delete + +### Body + +```json +{ + "operation": "delete", + "database": "dev", + "table": "dog", + "ids": [ + 1, + 2 + ] +} +``` + +### Response: 200 + +```json +{ + "message": "2 of 2 records successfully deleted", + "deleted_hashes": [ + 1, + 2 + ], + "skipped_hashes": [] +} +``` + +--- + +## Search By ID + +Returns data from a table for one or more primary keys. + +* operation _(required)_ - must always be `search_by_id` +* database _(optional)_ - database where the table you are searching lives. The default is `data` +* table _(required)_ - table you wish to search +* ids _(required)_ - array of primary keys to retrieve +* get_attributes _(required)_ - define which attributes you want returned. _Use `['*']` to return all attributes_ + +### Body + +```json +{ + "operation": "search_by_id", + "database": "dev", + "table": "dog", + "ids": [ + 1, + 2 + ], + "get_attributes": [ + "dog_name", + "breed_id" + ] +} +``` + +### Response: 200 + +```json +[ + { + "dog_name": "Penny", + "breed_id": 154 + }, + { + "dog_name": "Harper", + "breed_id": 346 + } +] +``` + +--- + +## Search By Value + +Returns data from a table for a matching value. + +* operation _(required)_ - must always be `search_by_value` +* database _(optional)_ - database where the table you are searching lives. The default is `data` +* table _(required)_ - table you wish to search +* search_attribute _(required)_ - attribute you wish to search can be any attribute +* search_value _(required)_ - value you wish to search - wild cards are allowed +* get_attributes _(required)_ - define which attributes you want returned. Use `['*']` to return all attributes + +### Body + +```json +{ + "operation": "search_by_value", + "database": "dev", + "table": "dog", + "search_attribute": "owner_name", + "search_value": "Ky*", + "get_attributes": [ + "id", + "dog_name" + ] +} +``` + +### Response: 200 + +```json +[ + { + "dog_name": "Penny" + }, + { + "dog_name": "Kato" + } +] +``` + +--- + +## Search By Conditions + +Returns data from a table for one or more matching conditions. + +* operation _(required)_ - must always be `search_by_conditions` +* database _(optional)_ - database where the table you are searching lives. The default is `data` +* table _(required)_ - table you wish to search +* operator _(optional)_ - the operator used between each condition - `and`, `or`. The default is `and` +* offset _(optional)_ - the number of records that the query results will skip. The default is `0` +* limit _(optional)_ - the number of records that the query results will include. The default is `null`, resulting in no limit +* get_attributes _(required)_ - define which attributes you want returned. Use `['*']` to return all attributes +* conditions _(required)_ - the array of conditions objects, specified below, to filter by. Must include one or more object in the array + * search_attribute _(required)_ - the attribute you wish to search, can be any attribute + * search_type _(required)_ - the type of search to perform - `equals`, `contains`, `starts_with`, `ends_with`, `greater_than`, `greater_than_equal`, `less_than`, `less_than_equal`, `between` + * search_value _(required)_ - case-sensitive value you wish to search. If the `search_type` is `between` then use an array of two values to search between + +### Body + +```json +{ + "operation": "search_by_conditions", + "database": "dev", + "table": "dog", + "operator": "and", + "offset": 0, + "limit": 10, + "get_attributes": [ + "*" + ], + "conditions": [ + { + "search_attribute": "age", + "search_type": "between", + "search_value": [ + 5, + 8 + ] + }, + { + "search_attribute": "weight_lbs", + "search_type": "greater_than", + "search_value": 40 + }, + { + "search_attribute": "adorable", + "search_type": "equals", + "search_value": true + } + ] +} +``` + +### Response: 200 + +```json +[ + { + "__createdtime__": 1620227719791, + "__updatedtime__": 1620227719791, + "adorable": true, + "age": 7, + "breed_id": 346, + "dog_name": "Harper", + "id": 2, + "owner_name": "Stephen", + "weight_lbs": 55 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 7, + "breed_id": 348, + "dog_name": "Alby", + "id": 3, + "owner_name": "Kaylan", + "weight_lbs": 84 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 6, + "breed_id": 347, + "dog_name": "Billy", + "id": 4, + "owner_name": "Zach", + "weight_lbs": 60 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 5, + "breed_id": 250, + "dog_name": "Gemma", + "id": 8, + "owner_name": "Stephen", + "weight_lbs": 55 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 8, + "breed_id": 104, + "dog_name": "Bode", + "id": 11, + "owner_name": "Margo", + "weight_lbs": 75 + } +] +``` diff --git a/site/versioned_docs/version-4.2/developers/operations-api/quickstart-examples.md b/site/versioned_docs/version-4.2/developers/operations-api/quickstart-examples.md new file mode 100644 index 00000000..e74b7979 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/quickstart-examples.md @@ -0,0 +1,385 @@ +--- +title: Quick Start Examples +--- + +# Quick Start Examples + +## Create dog Table + +We first need to create a table. Since our company is named after our CEO's dog, lets create a table to store all our employees' dogs. We'll call this table, `dogs`. + +Tables in HarperDB are schema-less, so we don't need to add any attributes other than a primary_key (in pre 4.2 versions this was referred to as the hash_attribute) to create this table. A hash attribute is an attribute that defines the unique identifier for each row in your table. In a traditional RDMS this would be called a primary key. + +HarperDB does offer a `database` parameter that can be used to hold logical groupings of tables. The parameter is optional and if not provided the operation will default to using a database named `data`. + +If you receive an error response, make sure your Basic Authentication user and password match those you entered during the installation process. + +### Body + +```json +{ + "operation": "create_table", + "table": "dog", + "primary_key": "id" +} +``` + +### Response: 200 + +```json +{ + "message": "table 'data.dog' successfully created." +} +``` + +--- + +## Create breed Table +Now that we have a table to store our dog data, we also want to create a table to track known breeds. Just as with the dog table, the only attribute we need to specify is the `primary_key`. + +### Body + +```json +{ + "operation": "create_table", + "table": "breed", + "primary_key": "id" +} +``` + +### Response: 200 + +```json +{ + "message": "table 'data.breed' successfully created." +} +``` + +--- + +## Insert 1 Dog + +We're ready to add some dog data. Penny is our CTO's pup, so she gets ID 1 or we're all fired. We are specifying attributes in this call, but this doesn't prevent us from specifying additional attributes in subsequent calls. + +### Body + +```json +{ + "operation": "insert", + "table": "dog", + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 1 of 1 records", + "inserted_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` + +--- + +## Insert Multiple Dogs + +Let's add some more Harper doggies! We can add as many dog objects as we want into the records collection. If you're adding a lot of objects, we would recommend using the .csv upload option (see the next section where we populate the breed table). + +### Body + +```json +{ + "operation": "insert", + "table": "dog", + "records": [ + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "breed_id": 346, + "age": 7, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 3, + "dog_name": "Alby", + "owner_name": "Kaylan", + "breed_id": 348, + "age": 7, + "weight_lbs": 84, + "adorable": true + }, + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "breed_id": 347, + "age": 6, + "weight_lbs": 60, + "adorable": true + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "breed_id": 348, + "age": 8, + "weight_lbs": 15, + "adorable": true + }, + { + "id": 6, + "dog_name": "Kato", + "owner_name": "Kyle", + "breed_id": 351, + "age": 6, + "weight_lbs": 32, + "adorable": true + }, + { + "id": 7, + "dog_name": "Simon", + "owner_name": "Fred", + "breed_id": 349, + "age": 3, + "weight_lbs": 35, + "adorable": true + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "breed_id": 350, + "age": 5, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 9, + "dog_name": "Yeti", + "owner_name": "Jaxon", + "breed_id": 200, + "age": 5, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 10, + "dog_name": "Monkey", + "owner_name": "Aron", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true + }, + { + "id": 11, + "dog_name": "Bode", + "owner_name": "Margo", + "breed_id": 104, + "age": 8, + "weight_lbs": 75, + "adorable": true + }, + { + "id": 12, + "dog_name": "Tucker", + "owner_name": "David", + "breed_id": 346, + "age": 2, + "weight_lbs": 60, + "adorable": true + }, + { + "id": 13, + "dog_name": "Jagger", + "owner_name": "Margo", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 12 of 12 records", + "inserted_hashes": [ + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13 + ], + "skipped_hashes": [] +} +``` + +--- + +## Bulk Insert Breeds Via CSV + +We need to populate the 'breed' table with some data so we can reference it later. For larger data sets, we recommend using our CSV upload option. + +Each header in a column will be considered as an attribute, and each row in the file will be a row in the table. Simply specify the file path and the table to upload to, and HarperDB will take care of the rest. You can pull the breeds.csv file from here: https:/s3.amazonaws.com/complimentarydata/breeds.csv + +### Body + +```json +{ + "operation": "csv_url_load", + "table": "breed", + "csv_url": "https:/s3.amazonaws.com/complimentarydata/breeds.csv" +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id e77d63b9-70d5-499c-960f-6736718a4369", + "job_id": "e77d63b9-70d5-499c-960f-6736718a4369" +} +``` + +--- + +## Update 1 Dog Using NoSQL + +HarperDB supports NoSQL and SQL commands. We're going to update the dog table to show Penny's last initial using our NoSQL API. + +### Body + +```json +{ + "operation": "update", + "table": "dog", + "records": [ + { + "id": 1, + "dog_name": "Penny B" + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "updated 1 of 1 records", + "update_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` + +--- + +## Select a Dog by ID Using SQL + +Now we're going to use a simple SQL SELECT call to pull Penny's updated data. Note we now see Penny's last initial in the dog name. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT * FROM data.dog where id = 1" +} +``` + +### Response: 200 + +```json +[ + { + "owner_name": "Kyle", + "adorable": null, + "breed_id": 154, + "__updatedtime__": 1610749428575, + "dog_name": "Penny B", + "weight_lbs": 38, + "id": 1, + "age": 7, + "__createdtime__": 1610749386566 + } +] +``` + +--- + +## Select Dogs and Join Breed + +Here's a more complex SQL command joining the breed table with the dog table. We will also pull only the pups belonging to Kyle, Zach, and Stephen. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT d.id, d.dog_name, d.owner_name, b.name, b.section FROM data.dog AS d INNER JOIN data.breed AS b ON d.breed_id = b.id WHERE d.owner_name IN ('Kyle', 'Zach', 'Stephen') AND b.section = 'Mutt' ORDER BY d.dog_name" +} +``` + +### Response: 200 + +```json +[ + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "name": "LABRADOR / GREAT DANE MIX", + "section": "Mutt" + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "name": "SHORT HAIRED SETTER MIX", + "section": "Mutt" + }, + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "name": "HUSKY MIX", + "section": "Mutt" + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "name": "TERRIER MIX", + "section": "Mutt" + } +] + +``` diff --git a/site/versioned_docs/version-4.2/developers/operations-api/registration.md b/site/versioned_docs/version-4.2/developers/operations-api/registration.md new file mode 100644 index 00000000..53d953af --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/registration.md @@ -0,0 +1,67 @@ +--- +title: Registration +--- + +# Registration + + +## Registration Info +Returns the registration data of the HarperDB instance. + +* operation _(required)_ - must always be `registration_info` + +### Body +```json +{ + "operation": "registration_info" +} +``` + +### Response: 200 +```json +{ + "registered": true, + "version": "4.2.0", + "ram_allocation": 2048, + "license_expiration_date": "2022-01-15" +} +``` + +--- + +## Get Fingerprint +Returns the HarperDB fingerprint, uniquely generated based on the machine, for licensing purposes. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_fingerprint` + +### Body + +```json +{ + "operation": "get_fingerprint" +} +``` + +--- + +## Set License +Sets the HarperDB license as generated by HarperDB License Management software. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_license` +* key _(required)_ - your license key +* company _(required)_ - the company that was used in the license + +### Body + +```json +{ + "operation": "set_license", + "key": "", + "company": "" +} +``` + diff --git a/site/versioned_docs/version-4.2/developers/operations-api/sql-operations.md b/site/versioned_docs/version-4.2/developers/operations-api/sql-operations.md new file mode 100644 index 00000000..39259083 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/sql-operations.md @@ -0,0 +1,118 @@ +--- +title: SQL Operations +--- + +# SQL Operations + +## Select +Executes the provided SQL statement. The SELECT statement is used to query data from the database. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT * FROM dev.dog WHERE id = 1" +} +``` + +### Response: 200 +```json +[ + { + "id": 1, + "age": 7, + "dog_name": "Penny", + "weight_lbs": 38, + "breed_id": 154, + "owner_name": "Kyle", + "adorable": true, + "__createdtime__": 1611614106043, + "__updatedtime__": 1611614119507 + } +] +``` + +--- + +## Insert +Executes the provided SQL statement. The INSERT statement is used to add one or more rows to a database table. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body + +```json +{ + "operation": "sql", + "sql": "INSERT INTO dev.dog (id, dog_name) VALUE (22, 'Simon')" +} +``` + +### Response: 200 +```json +{ + "message": "inserted 1 of 1 records", + "inserted_hashes": [ + 22 + ], + "skipped_hashes": [] +} +``` +--- + +## Update +Executes the provided SQL statement. The UPDATE statement is used to change the values of specified attributes in one or more rows in a database table. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body +```json +{ + "operation": "sql", + "sql": "UPDATE dev.dog SET dog_name = 'penelope' WHERE id = 1" +} +``` + +### Response: 200 +```json +{ + "message": "updated 1 of 1 records", + "update_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` + +--- + +## Delete +Executes the provided SQL statement. The DELETE statement is used to remove one or more rows of data from a database table. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body +```json +{ + "operation": "sql", + "sql": "DELETE FROM dev.dog WHERE id = 1" +} +``` + +### Response: 200 +```json +{ + "message": "1 of 1 record successfully deleted", + "deleted_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` diff --git a/site/versioned_docs/version-4.2/developers/operations-api/token-authentication.md b/site/versioned_docs/version-4.2/developers/operations-api/token-authentication.md new file mode 100644 index 00000000..161c69b5 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/token-authentication.md @@ -0,0 +1,54 @@ +--- +title: Token Authentication +--- + +# Token Authentication + +## Create Authentication Tokens +Creates the tokens needed for authentication: operation & refresh token. + +_Note - this operation does not require authorization to be set_ + +* operation _(required)_ - must always be `create_authentication_tokens` +* username _(required)_ - username of user to generate tokens for +* password _(required)_ - password of user to generate tokens for + +### Body +```json +{ + "operation": "create_authentication_tokens", + "username": "", + "password": "" +} +``` + +### Response: 200 +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6IkhEQl9BRE1JTiIsImlhdCI6MTYwNTA2Mzk0OSwiZXhwIjoxNjA1MTUwMzQ5LCJzdWIiOiJvcGVyYXRpb24ifQ.TlV93BqavQVQntXTt_WeY5IjAuCshfd6RzhihLWFWhu1qEKLHdwg9o5Z4ASaNmfuyKBqbFw65IbOYKd348EXeC_T6d0GO3yUhICYWXkqhQnxVW_T-ECKc7m5Bty9HTgfeaJ2e2yW55nbZYWG_gLtNgObUjCziX20-gGGR25sNTRm78mLQPYQkBJph6WXwAuyQrX704h0NfvNqyAZSwjxgtjuuEftTJ7FutLrQSLGIBIYq9nsHrFkheiDSn-C8_WKJ_zATa4YIofjqn9g5wA6o_7kSNaU2-gWnCm_jbcAcfvOmXh6rd89z8pwPqnC0f131qHIBps9UHaC1oozzmu_C6bsg7905OoAdFFY42Vojs98SMbfRApRvwaS4SprBsam3izODNI64ZUBREu3l4SZDalUf2kN8XPVWkI1LKq_mZsdtqr1r11Z9xslI1wVdxjunYeanjBhs7_j2HTX7ieVGn1a23cWceUk8F1HDGe_KEuPQs03R73V8acq_freh-kPhIa4eLqmcHeBw3WcyNGW8GuP8kyQRkGuO5sQSzZqbr_YSbZdSShZWTWDE6RYYC9ZV9KJtHVxhs0hexUpcoqO8OtJocyltRjtDjhSm9oUxszYRaALu-h8YadZT9dEKzsyQIt30d7LS9ETmmGWx4nKSTME2bV21PnDv_rEc5R6gnE", + "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6IkhEQl9BRE1JTiIsImlhdCI6MTYwNTA2Mzk0OSwiZXhwIjoxNjA3NjU1OTQ5LCJzdWIiOiJyZWZyZXNoIn0.znhJhkdSROBPP_GLRzAxYdjgQ3BuqpAbQB7zMSSOQJ3s83HnmZ10Bnpw_3L2aF-tOFgz_t6HUAvn26fNOLsspJD2aOvHPcVS4yLKS5nagpA6ar_pqng9f6Ebfs8ohguLCfHnHRJ8poLxuWRvWW9_9pIlDiwsj4yo3Mbxi3mW8Bbtnk2MwiNHFxTksD12Ne8EWz8q2jic5MjArqBBgR373oYoWU1oxpTM6gIsZCBRowXcc9XFy2vyRoggEUU4ISRFQ4ZY9ayJ-_jleSDCUamJSNQsdb1OUTvc6CxeYlLjCoV0ijRUB6p2XWNVezFhDu8yGqOeyGFJzArhxbVc_pl4UYd5aUVxhrO9DdhG29cY_mHV0FqfXphR9QllK--LJFTP4aFqkCxnVr7HSa17hL0ZVK1HaKrx21PAdCkVNZpD6J3RtRbTkfnIB_C3Be9jhOV3vpTf7ZGn_Bs3CPJi_sL313Z1yKSDAS5rXTPceEOcTPHjzkMP9Wz19KfFq_0kuiZdDmeYNqJeFPAgGJ-S0tO51krzyGqLyCCA32_W104GR8OoQi2gEED6HIx2G0-1rnLnefN6eHQiY5r-Q3Oj9e2y3EvqqgWOmEDw88-SjPTwQVnMbBHYN2RfluU7EmvDh6Saoe79Lhlu8ZeSJ1x6ZgA8-Cirraz1_526Tn8v5FGDfrc" +} +``` + +--- + +## Refresh Operation Token +This operation creates a new operation token. + +* operation _(required)_ - must always be `refresh_operation_token` +* refresh_token _(required)_ - the refresh token that was provided when tokens were created + +### Body +```json +{ + "operation": "refresh_operation_token", + "refresh_token": "EXISTING_REFRESH_TOKEN" +} +``` + +### Response: 200 +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6eyJfX2NyZWF0ZWR0aW1lX18iOjE2MDQ1MTc4Nzk1MjMsIl9fdXBkYXRlZHRpbWVfXyI6MTYwNDUxNzg3OTUyMywiYWN0aXZlIjp0cnVlLCJhdXRoX3Rva2VuIjpudWxsLCJyb2xlIjp7Il9fY3JlYXRlZHRpbWVfXyI6MTYwNDUxNzg3OTUyMSwiX191cGRhdGVkdGltZV9fIjoxNjA0NTE3ODc5NTIxLCJpZCI6IjZhYmRjNGJhLWU5MjQtNDlhNi1iOGY0LWM1NWUxYmQ0OTYzZCIsInBlcm1pc3Npb24iOnsic3VwZXJfdXNlciI6dHJ1ZSwic3lzdGVtIjp7InRhYmxlcyI6eyJoZGJfdGFibGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9hdHRyaWJ1dGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9zY2hlbWEiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl91c2VyIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfcm9sZSI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2pvYiI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2xpY2Vuc2UiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9pbmZvIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfbm9kZXMiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl90ZW1wIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119fX19LCJyb2xlIjoic3VwZXJfdXNlciJ9LCJ1c2VybmFtZSI6IkhEQl9BRE1JTiJ9LCJpYXQiOjE2MDUwNjQ0MjMsImV4cCI6MTYwNTE1MDgyMywic3ViIjoib3BlcmF0aW9uIn0.VVZdhlh7_xFEaGPwhAh6VJ1d7eisiF3ok3ZwLTQAMWZB6umb2S7pPSTbXAmqAGHRlFAK3BYfnwT3YWt0gZbHvk24_0x3s_dej3PYJ8khIxzMjqpkR6qSjQIC2dhKqpwRPNtoqW_xnep9L-qf5iPtqkwsqWhF1c5VSN8nFouLWMZSuJ6Mag04soNhFvY0AF6QiTyzajMTb6uurRMWOnxk8hwMrY_5xtupabqtZheXP_0DV8l10B7GFi_oWf_lDLmwRmNbeUfW8ZyCIJMj36bjN3PsfVIxog87SWKKCwbWZWfJWw0KEph-HvU0ay35deyGWPIaDQmujuh2vtz-B0GoIAC58PJdXNyQRzES_nSb6Oqc_wGZsLM6EsNn_lrIp3mK_3a5jirZ8s6Z2SfcYKaLF2hCevdm05gRjFJ6ijxZrUSOR2S415wLxmqCCWCp_-sEUz8erUrf07_aj-Bv99GUub4b_znOsQF3uABKd4KKff2cNSMhAa-6sro5GDRRJg376dcLi2_9HOZbnSo90zrpVq8RNV900aydyzDdlXkZja8jdHBk4mxSSewYBvM7up6I0G4X-ZlzFOp30T7kjdLa6480Qp34iYRMMtq0Htpb5k2jPt8dNFnzW-Q2eRy1wNBbH3cCH0rd7_BIGuTCrl4hGU8QjlBiF7Gj0_-uJYhKnhg" +} +``` diff --git a/site/versioned_docs/version-4.2/developers/operations-api/users-and-roles.md b/site/versioned_docs/version-4.2/developers/operations-api/users-and-roles.md new file mode 100644 index 00000000..59b33a51 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/users-and-roles.md @@ -0,0 +1,484 @@ +--- +title: Users and Roles +--- + +# Users and Roles + +## List Roles +Returns a list of all roles. Learn more about HarperDB roles here: https:/harperdb.io/docs/security/users-roles/. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `list_roles` + +### Body +```json +{ + "operation": "list_roles" +} +``` + +### Response: 200 +```json +[ + { + "__createdtime__": 1611615061106, + "__updatedtime__": 1611615061106, + "id": "05c2ffcd-f780-40b1-9432-cfe8ba5ad890", + "permission": { + "super_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + }, + "role": "developer" + }, + { + "__createdtime__": 1610749235614, + "__updatedtime__": 1610749235614, + "id": "136f03fa-a0e9-46c3-bd5d-7f3e7dd5b564", + "permission": { + "cluster_user": true + }, + "role": "cluster_user" + }, + { + "__createdtime__": 1610749235609, + "__updatedtime__": 1610749235609, + "id": "745b3138-a7cf-455a-8256-ac03722eef12", + "permission": { + "super_user": true + }, + "role": "super_user" + } +] +``` + +--- + +## Add Role +Creates a new role with the specified permissions. Learn more about HarperDB roles here: [https:/harperdb.io/docs/security/users-roles/](https:/harperdb.io/docs/security/users-roles/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_role` +* role _(required)_ - name of role you are defining +* permission _(required)_ - object defining permissions for users associated with this role: + * super_user _(optional)_ - boolean which, if set to true, gives users associated with this role full access to all operations and methods. If not included, value will be assumed to be false. + * structure_user (optional) - boolean OR array of schema names (as strings). If boolean, user can create new schemas and tables. If array of strings, users can only manage tables within the specified schemas. This overrides any individual table permissions for specified schemas, or for all schemas if the value is true. + +### Body +```json +{ + "operation": "add_role", + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + } +} +``` + +### Response: 200 +```json +{ + "role": "develope3r", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + }, + "id": "0a9368b0-bd81-482f-9f5a-8722e3582f96", + "__updatedtime__": 1598549532897, + "__createdtime__": 1598549532897 +} +``` + +--- + +## Alter Role +Modifies an existing role with the specified permissions. updates permissions from an existing role. Learn more about HarperDB roles here: [https:/harperdb.io/docs/security/users-roles/](https:/harperdb.io/docs/security/users-roles/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `alter_role` +* id _(required)_ - the id value for the role you are altering +* role _(optional)_ - name value to update on the role you are altering +* permission _(required)_ - object defining permissions for users associated with this role: + * super_user _(optional)_ - boolean which, if set to true, gives users associated with this role full access to all operations and methods. If not included, value will be assumed to be false. + * structure_user (optional) - boolean OR array of schema names (as strings). If boolean, user can create new schemas and tables. If array of strings, users can only manage tables within the specified schemas. This overrides any individual table permissions for specified schemas, or for all schemas if the value is true. + +### Body + +```json +{ + "operation": "alter_role", + "id": "f92162e2-cd17-450c-aae0-372a76859038", + "role": "another_developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": false, + "insert": true, + "update": true + } + ] + } + } + } + } +} +``` + +### Response: 200 +```json +{ + "id": "a7cb91e9-32e4-4dbf-a327-fab4fa9191ea", + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": false, + "insert": true, + "update": true + } + ] + } + } + } + }, + "__updatedtime__": 1598549996106 +} +``` + +--- + +## Drop Role +Deletes an existing role from the database. NOTE: Role with associated users cannot be dropped. Learn more about HarperDB roles here: https:/harperdb.io/docs/security/users-roles/. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this must always be `drop_role` +* id _(required)_ - this is the id of the role you are dropping + +### Body +```json +{ + "operation": "drop_role", + "id": "2ebc3415-0aa0-4eea-9b8e-40860b436119" +} +``` + +### Response: 200 +```json +{ + "message": "developer successfully deleted" +} +``` + +--- + +## List Users +Returns a list of all users. Learn more about HarperDB users here: https:/harperdb.io/docs/security/users-roles/. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `list_users` + +### Body +```json +{ + "operation": "list_users" +} +``` + +### Response: 200 +```json +[ + { + "__createdtime__": 1635520961165, + "__updatedtime__": 1635520961165, + "active": true, + "role": { + "__createdtime__": 1635520961161, + "__updatedtime__": 1635520961161, + "id": "7c78ef13-c1f3-4063-8ea3-725127a78279", + "permission": { + "super_user": true, + "system": { + "tables": { + "hdb_table": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_attribute": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_schema": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_user": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_role": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_job": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_license": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_info": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_nodes": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_temp": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + } + } + } + }, + "role": "super_user" + }, + "username": "HDB_ADMIN" + } +] +``` + +--- + +## User Info +Returns user data for the associated user credentials. + +* operation _(required)_ - must always be `user_info` + +### Body +```json +{ + "operation": "user_info" +} +``` + +### Response: 200 +```json +{ + "__createdtime__": 1610749235611, + "__updatedtime__": 1610749235611, + "active": true, + "role": { + "__createdtime__": 1610749235609, + "__updatedtime__": 1610749235609, + "id": "745b3138-a7cf-455a-8256-ac03722eef12", + "permission": { + "super_user": true + }, + "role": "super_user" + }, + "username": "HDB_ADMIN" +} +``` + +--- + +## Add User +Creates a new user with the specified role and credentials. Learn more about HarperDB users here: https:/harperdb.io/docs/security/users-roles/. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_user` +* role _(required)_ - 'role' name value of the role you wish to assign to the user. See `add_role` for more detail +* username _(required)_ - username assigned to the user. It can not be altered after adding the user. It serves as the hash +* password _(required)_ - clear text for password. HarperDB will encrypt the password upon receipt +* active _(required)_ - boolean value for status of user's access to your HarperDB instance. If set to false, user will not be able to access your instance of HarperDB. + +### Body +```json +{ + "operation": "add_user", + "role": "role_name", + "username": "hdb_user", + "password": "password", + "active": true +} +``` + +### Response: 200 +```json +{ + "message": "hdb_user successfully added" +} +``` + +--- + +## Alter User +Modifies an existing user's role and/or credentials. Learn more about HarperDB users here: https:/harperdb.io/docs/security/users-roles/. + +_Operation is restricted to super\_user roles only_ + + * operation _(required)_ - must always be `alter_user` + * username _(required)_ - username assigned to the user. It can not be altered after adding the user. It serves as the hash. + * password _(optional)_ - clear text for password. HarperDB will encrypt the password upon receipt + * role _(optional)_ - `role` name value of the role you wish to assign to the user. See `add_role` for more detail + * active _(optional)_ - status of user's access to your HarperDB instance. See `add_role` for more detail + +### Body +```json +{ + "operation": "alter_user", + "role": "role_name", + "username": "hdb_user", + "password": "password", + "active": true +} +``` + +### Response: 200 +```json +{ + "message": "updated 1 of 1 records", + "new_attributes": [], + "txn_time": 1611615114397.988, + "update_hashes": [ + "hdb_user" + ], + "skipped_hashes": [] +} +``` + +--- + +## Drop User +Deletes an existing user by username. Learn more about HarperDB users here: https:/harperdb.io/docs/security/users-roles/. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `drop_user` +* username _(required)_ - username assigned to the user + +### Body +```json +{ + "operation": "drop_user", + "username": "sgoldberg" +} +``` + +### Response: 200 +```json +{ + "message": "sgoldberg successfully deleted" +} +``` diff --git a/site/versioned_docs/version-4.2/developers/operations-api/utilities.md b/site/versioned_docs/version-4.2/developers/operations-api/utilities.md new file mode 100644 index 00000000..8e8a80d5 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/operations-api/utilities.md @@ -0,0 +1,358 @@ +--- +title: Utilities +--- + +# Utilities + +## Restart +Restarts the HarperDB instance. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `restart` + +### Body +```json +{ + "operation": "restart" +} +``` + +### Response: 200 +```json +{ + "message": "Restarting HarperDB. This may take up to 60 seconds." +} +``` +--- + +## Restart Service +Restarts servers for the specified HarperDB service. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `restart_service` +* service _(required)_ - must be one of: `http_workers`, `clustering_config` or `clustering` + +### Body +```json +{ + "operation": "restart_service", + "service": "http_workers" +} +``` + +### Response: 200 +```json +{ + "message": "Restarting http_workers" +} +``` + +--- +## System Information +Returns detailed metrics on the host system. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `system_information` +* attributes _(optional)_ - string array of top level attributes desired in the response, if no value is supplied all attributes will be returned. Available attributes are: ['system', 'time', 'cpu', 'memory', 'disk', 'network', 'harperdb_processes', 'table_size', 'replication'] + +### Body +```json +{ + "operation": "system_information" +} +``` + +--- + +## Delete Records Before + +Delete data before the specified timestamp on the specified database table exclusively on the node where it is executed. Any clustered nodes with replicated data will retain that data. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `delete_records_before` +* date _(required)_ - records older than this date will be deleted. Supported format looks like: `YYYY-MM-DDThh:mm:ss.sZ` +* schema _(required)_ - name of the schema where you are deleting your data +* table _(required)_ - name of the table where you are deleting your data + +### Body +```json +{ + "operation": "delete_records_before", + "date": "2021-01-25T23:05:27.464", + "schema": "dev", + "table": "breed" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id d3aed926-e9fe-4ec1-aea7-0fb4451bd373", + "job_id": "d3aed926-e9fe-4ec1-aea7-0fb4451bd373" +} +``` + +--- + +## Export Local +Exports data based on a given search operation to a local file in JSON or CSV format. + +* operation _(required)_ - must always be `export_local` +* format _(required)_ - the format you wish to export the data, options are `json` & `csv` +* path _(required)_ - path local to the server to export the data +* search_operation _(required)_ - search_operation of `search_by_hash`, `search_by_value` or `sql` + +### Body +```json +{ + "operation": "export_local", + "format": "json", + "path": "/data/", + "search_operation": { + "operation": "sql", + "sql": "SELECT * FROM dev.breed" + } +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 6fc18eaa-3504-4374-815c-44840a12e7e5" +} +``` + +--- + +## Export To S3 +Exports data based on a given search operation from table to AWS S3 in JSON or CSV format. + +* operation _(required)_ - must always be `export_to_s3` +* format _(required)_ - the format you wish to export the data, options are `json` & `csv` +* s3 _(required)_ - details your access keys, bucket, bucket region and key for saving the data to S3 +* search_operation _(required)_ - search_operation of `search_by_hash`, `search_by_value` or `sql` + +### Body +```json +{ + "operation": "export_to_s3", + "format": "json", + "s3": { + "aws_access_key_id": "YOUR_KEY", + "aws_secret_access_key": "YOUR_SECRET_KEY", + "bucket": "BUCKET_NAME", + "key": "OBJECT_NAME", + "region": "BUCKET_REGION" + }, + "search_operation": { + "operation": "sql", + "sql": "SELECT * FROM dev.dog" + } +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 9fa85968-4cb1-4008-976e-506c4b13fc4a", + "job_id": "9fa85968-4cb1-4008-976e-506c4b13fc4a" +} +``` + +--- + +## Install Node Modules +Executes npm install against specified custom function projects. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `install_node_modules` +* projects _(required)_ - must ba an array of custom functions projects. +* dry_run _(optional)_ - refers to the npm --dry-run flag: [https:/docs.npmjs.com/cli/v8/commands/npm-install#dry-run](https:/docs.npmjs.com/cli/v8/commands/npm-install#dry-run). Defaults to false. + +### Body +```json +{ + "operation": "install_node_modules", + "projects": [ + "dogs", + "cats" + ], + "dry_run": true +} +``` + +--- + +## Set Configuration + +Modifies the HarperDB configuration file parameters. Must follow with a restart or restart_service operation. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_configuration` +* logging_level _(example/optional)_ - one or more configuration keywords to be updated in the HarperDB configuration file +* clustering_enabled _(example/optional)_ - one or more configuration keywords to be updated in the HarperDB configuration file + +### Body +```json +{ + "operation": "set_configuration", + "logging_level": "trace", + "clustering_enabled": true +} +``` + +### Response: 200 +```json +{ + "message": "Configuration successfully set. You must restart HarperDB for new config settings to take effect." +} +``` + +--- + +## Get Configuration +Returns the HarperDB configuration parameters. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_configuration` + +### Body +```json +{ + "operation": "get_configuration" +} +``` + +### Response: 200 +```json +{ + "http": { + "compressionThreshold": 1200, + "cors": false, + "corsAccessList": [ + null + ], + "keepAliveTimeout": 30000, + "port": 9926, + "securePort": null, + "timeout": 120000 + }, + "threads": 11, + "authentication": { + "cacheTTL": 30000, + "enableSessions": true, + "operationTokenTimeout": "1d", + "refreshTokenTimeout": "30d" + }, + "analytics": { + "aggregatePeriod": 60 + }, + "clustering": { + "enabled": true, + "hubServer": { + "cluster": { + "name": "harperdb", + "network": { + "port": 12345, + "routes": null + } + }, + "leafNodes": { + "network": { + "port": 9931 + } + }, + "network": { + "port": 9930 + } + }, + "leafServer": { + "network": { + "port": 9940, + "routes": null + }, + "streams": { + "maxAge": null, + "maxBytes": null, + "maxMsgs": null, + "path": "/Users/hdb/clustering/leaf" + } + }, + "logLevel": "info", + "nodeName": "node1", + "republishMessages": false, + "databaseLevel": false, + "tls": { + "certificate": "/Users/hdb/keys/certificate.pem", + "certificateAuthority": "/Users/hdb/keys/ca.pem", + "privateKey": "/Users/hdb/keys/privateKey.pem", + "insecure": true, + "verify": true + }, + "user": "cluster_user" + }, + "componentsRoot": "/Users/hdb/components", + "localStudio": { + "enabled": false + }, + "logging": { + "auditAuthEvents": { + "logFailed": false, + "logSuccessful": false + }, + "auditLog": true, + "auditRetention": "3d", + "file": true, + "level": "error", + "root": "/Users/hdb/log", + "rotation": { + "enabled": false, + "compress": false, + "interval": null, + "maxSize": null, + "path": "/Users/hdb/log" + }, + "stdStreams": false + }, + "mqtt": { + "network": { + "port": 1883, + "securePort": 8883 + }, + "webSocket": true, + "requireAuthentication": true + }, + "operationsApi": { + "network": { + "cors": true, + "corsAccessList": [ + "*" + ], + "domainSocket": "/Users/hdb/operations-server", + "port": 9925, + "securePort": null + } + }, + "rootPath": "/Users/hdb", + "storage": { + "writeAsync": false, + "caching": true, + "compression": false, + "noReadAhead": true, + "path": "/Users/hdb/database", + "prefetchWrites": true + }, + "tls": { + "certificate": "/Users/hdb/keys/certificate.pem", + "certificateAuthority": "/Users/hdb/keys/ca.pem", + "privateKey": "/Users/hdb/keys/privateKey.pem" + } +} +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/developers/real-time.md b/site/versioned_docs/version-4.2/developers/real-time.md new file mode 100644 index 00000000..bcb84756 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/real-time.md @@ -0,0 +1,158 @@ +--- +title: Real-Time +--- + +# Real-Time + +## Real-Time + +HarperDB provides real-time access to data and messaging. This allows clients to monitor and subscribe to data for changes in real-time as well as handling data-oriented messaging. HarperDB supports multiple standardized protocols to facilitate diverse standards-based client interaction. + +HarperDB real-time communication is based around database tables. Declared tables are the basis for monitoring data, and defining "topics" for publishing and subscribing to messages. Declaring a table that establishes a topic can be as simple as adding a table with no attributes to your [schema.graphql in a HarperDB application folder](./applications/): +``` +type MyTopic @table @export +``` +You can then subscribe to records or sub-topics in this topic/namespace, as well as save data and publish messages, with the protocols discussed below. + +### Content Negotiation + +HarperDB is a database, not a generic broker, and therefore highly adept at handling _structured_ data. Data can be published and subscribed in all supported structured/object formats, including JSON, CBOR, and MessagePack, and the data will be stored and handled as structured data. This means that different clients can individually choose which format they prefer, both for inbound and outbound messages. One client could publish in JSON, and another client could choose to receive messages in CBOR. + +## Protocols + +### MQTT + +HarperDB supports MQTT as an interface to this real-time data delivery. It is important to note that MQTT in HarperDB is not just a generic pub/sub hub, but is deeply integrated with the database providing subscriptions directly to database records, and publishing to these records. In this document we will explain how MQTT pub/sub concepts are aligned and integrated with database functionality. + +#### Configuration + +HarperDB supports MQTT with its `mqtt` server module and HarperDB supports MQTT over standard TCP sockets or over WebSockets. This is enabled by default, but can be configured in your `harperdb-config.yaml` configuration, allowing you to change which ports it listens on, if secure TLS connections are used, and MQTT is accepted over WebSockets: + +```yaml +mqtt: + network: + port: 1883 + securePort: 8883 # for TLS + webSocket: true # will also enable WS support through the default HTTP interface/port + requireAuthentication: true +``` + +Note that if you are using WebSockets for MQTT, the sub-protocol should be set to "mqtt" (this is required by the MQTT specification, and should be included by any conformant client): `Sec-WebSocket-Protocol: mqtt`. + +#### Capabilities + +HarperDB's MQTT capabilities includes support for MQTT versions v3.1 and v5 with standard publish and subscription capabilities with multi-level topics, QoS 0 and 1 levels, and durable (non-clean) sessions. MQTT supports QoS 2 interaction, but doesn't guarantee exactly once delivery (although any guarantees of exactly once over unstable networks is a fictional aspiration). MQTT doesn't currently support last will, nor single-level wildcards (only multi-level wildcards). + +### Topics + +In MQTT, messages are published to, and subscribed from, topics. In HarperDB topics are aligned with resource endpoint paths in exactly the same way as the REST endpoints. If you define a table or resource in your schema, with a path/endpoint of "my-resource", that means that this can be addressed as a topic just like a URL path. So a topic of "my-resource/some-id" would correspond to the record in the my-resource table (or custom resource) with a record id of "some-id". + +This means that you can subscribe to "my-resource/some-id" and making this subscription means you will receive notification messages for any updates to this record. If this record is modified or deleted, a message will be sent to listeners of this subscription. + +The current value of this record is also treated as the "retained" message for this topic. When you subscribe to "my-resource/some-id", you will immediately receive the record for this id, through a "publish" command from the server, as the initial "retained" message that is first delivered. This provides a simple and effective way to get the current state of a record and future updates to that record without having to worry about timing issues of aligning a retrieval and subscription separately. + +Similarly, publishing a message to a "topic" also interacts with the database. Publishing a message with "retain" flag enabled is interpreted as an update or put to that record. The published message will replace the current record with the contents of the published message. + +If a message is published without a `retain` flag, the message will not alter the record at all, but will still be published to any subscribers to that record. + +HarperDB supports QoS 0 and 1 for publishing and subscribing. + +HarperDB supports multi-level topics, both for subscribing and publishing. HarperDB also supports multi-level wildcards, so you can subscribe to /`my-resource/#` to receive notifications for `my-resource/some-id` as well as `my-resource/nested/id`, or you can subscribe to `my-resource/nested/#` and receive the latter, but not the former, topic messages. HarperDB currently only supports trailing multi-level wildcards (no single-level wildcards with '\*'). + +### Ordering + +HarperDB is designed to be a distributed database, and an intrinsic characteristic of distributed servers is that messages may take different amounts of time to traverse the network and may arrive in a different order depending on server location and network topology. HarperDB is designed for distributed data with minimal latency, and so messages are delivered to subscribers immediately when they arrive, HarperDB does not delay messages for coordinating confirmation or consensus among other nodes, which would significantly increase latency, messages are delivered as quickly as possible. + +As an example, let's consider message #1 is published to node A, which then sends the message to node B and node C, but the message takes a while to get there. Slightly later, while the first message is still in transit, message #2 is published to node B, which then replicates it to A and C, and because of network conditions, message #2 arrives at node C before message #1. Because HarperDB prioritizes low latency, when node C receives message #2, it immediately publishes it to all its local subscribers (it has no knowledge that message #1 is in transit). + +When message #1 is received by node C, the behavior of what it does with this message is dependent on whether the message is a "retained" message (was published with a retain flag set to true, or was put/update/upsert/inserted into the database) or was a non-retained message. In the case of a non-retained message, this message will be delivered to all local subscribers (even though it had been published earlier), thereby prioritizing the delivery of every message. On the other hand, a retained message will not deliver the earlier out-of-order message to clients, and HarperDB will keep the message with the latest timestamp as the "winning" record state (and will be retained message for any subsequent subscriptions). Retained messages maintain (eventual) consistency across the entire cluster of servers, all nodes will converge to the same message as the being the latest and retained message (#2 in this case). + +Non-retained messages are generally a good choice for applications like chat, where every message needs to be delivered even if they might arrive out-of-order (the order may not be consistent across all servers). Retained messages can be thought of a "superseding" messages, and are a good fit for applications like instrument measurements like temperature readings, where the priority to provide the _latest_ temperature and older temperature readings are not important to publish after a new reading, and consistency of the most-recent record (across the network) is important. + +### WebSockets + +WebSockets are supported through the REST interface and go through the `connect(incomingMessages)` method on resources. By default, making a WebSockets connection to a URL will subscribe to the referenced resource. For example, making a WebSocket connection to `new WebSocket('wss:/server/my-resource/341')` will access the resource defined for 'my-resource' and the resource id of 341 and connect to it. On the web platform this could be: + +```javascript +let ws = new WebSocket('wss:/server/my-resource/341'); +ws.onmessage = (event) => { + / received a notification from the server + let data = JSON.parse(event.data); +}; +``` + +By default, the resources will make a subscription to that resource, monitoring any changes to the records or messages published to it, and will return events on the WebSockets connection. You can also override `connect(incomingMessages)` with your own handler. The `connect` method simply needs to return an iterable (asynchronous iterable) that represents the stream of messages to be sent to the client. One easy way to create an iterable stream is to define the `connect` method as a generator and `yield` messages as they become available. For example, a simple WebSockets echo server for a resource could be written: + +```javascript +export class Echo extends Resource { + async *connect(incomingMessages) { + for await (let message of incomingMessages) { / wait for each incoming message from the client + / and send the message back to the client + yield message; + } + } +``` + +You can also call the default `connect` and it will provide a convenient streaming iterable with events for the outgoing messages, with a `send` method that you can call to send messages on the iterable, and a `close` event for determining when the connection is closed. The incoming messages iterable is also an event emitter, and you can listen for `data` events to get the incoming messages using event style: + +```javascript +export class Example extends Resource { + connect(incomingMessages) { + let outgoingMessages = super.connect(); + let timer = setInterval(() => { + outgoingMessages.send({greeting: 'hi again!'}); + }, 1000); / send a message once a second + incomingMessages.on('data', (message) => { + / another way of echo-ing the data back to the client + outgoingMessages.send(message); + }); + outgoingMessages.on('close', () => { + / make sure we end the timer once the connection is closed + clearInterval(timer); + }); + return outgoingMessages; + } +``` + +### Server Sent Events + +Server Sent Events (SSE) are also supported through the REST server interface, and provide a simple and efficient mechanism for web-based applications to receive real-time updates. For consistency of push delivery, SSE connections go through the `connect()` method on resources, much like WebSockets. The primary difference is that `connect` is called without any `incomingMessages` argument, since SSE is a one-directional transport mechanism. This can be used much like WebSockets, specifying a resource URL path will connect to that resource, and by default provides a stream of messages for changes and messages for that resource. For example, you can connect to receive notification in a browser for a resource like: + +```javascript +let eventSource = new EventSource('https:/server/my-resource/341', { withCredentials: true }); +eventSource.onmessage = (event) => { + / received a notification from the server + let data = JSON.parse(event.data); +}; +``` + +### MQTT Feature Support Matrix + +| Feature | Support | +| ------- | ------- | +| Connections, protocol negotiation, and acknowledgement with v3.1.1 | :heavy_check_mark: | +| Connections, protocol negotiation, and acknowledgement with v5 | :heavy_check_mark: | +| Secure MQTTS | :heavy_check_mark: | +| MQTTS over WebSockets | :heavy_check_mark: | +| MQTT authentication via user/pass | :heavy_check_mark: | +| MQTT authentication via mTLS | :heavy_check_mark: | +| Publish | :heavy_check_mark: | +| Subscribe | :heavy_check_mark: | +| Multi-level wildcard | :heavy_check_mark: | +| Single-level wildcard | :heavy_check_mark: | +| QoS 0 | :heavy_check_mark: | +| QoS 1 | :heavy_check_mark: | +| QoS 2 | Not fully supported, can perform conversation but does persist | +| Clean session | :heavy_check_mark: | +| Durable session | :heavy_check_mark: | +| Distributed durable session | | +| Will | :heavy_check_mark: | +| MQTT V5 User properties | | +| MQTT V5 Will properties | | +| MQTT V5 Connection properties | | +| MQTT V5 Connection acknowledgement properties | | +| MQTT V5 Publish properties | | +| MQTT V5 Subscribe properties | | +| MQTT V5 Ack properties | | +| MQTT V5 AUTH command | | +| MQTT V5 Shared Subscriptions | | \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/developers/rest.md b/site/versioned_docs/version-4.2/developers/rest.md new file mode 100644 index 00000000..6b44783f --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/rest.md @@ -0,0 +1,201 @@ +--- +title: REST +--- + +# REST + +HarperDB provides a powerful, efficient, and standard-compliant HTTP REST interface for interacting with tables and other resources. The REST interface is the recommended interface for data access, querying, and manipulation (for HTTP interactions), providing the best performance and HTTP interoperability with different clients. + +Resources, including tables, can be configured as RESTful endpoints. The name of the query or the [exported](./applications/defining-schemas#export) name of the resource defines the beginning of the endpoint path. From there, a record id or query can be appended. Following uniform interface principles, HTTP methods define different actions with resources. For each method, this describes the default action. + +The default path structure provides access to resources at several different levels: + +* `/my-resource` - The root path of a resource usually has a description of the resource (like a describe operation for a table). +* `/my-resource/` - The trailing slash in a path indicates it is a collection of the records. The root collection for a table represents all the records in a table, and usually you will append query parameters to query and search for more specific records. +* `/my-resource/record-id` - This resource locator represents a specific record, referenced by its id. This is typically how you can retrieve, update, and delete individual records. +* `/my-resource/record-id/` - Again, a trailing slash indicates a collection; here it is the collection of the records that begin with the specified id prefix. +* `/my-resource/record-id/with/multiple/parts` - A record id can consist of multiple path segments. + +## GET + +These can be used to retrieve individual records or perform searches. This is handled by the Resource method `get()` (and can be overridden). + +### `GET /my-resource/` + +This can be used to retrieve a record by its primary key. The response will include the record as the body. + +#### Caching/Conditional Requests + +A `GET` response for a record will include an encoded version, a timestamp of the last modification, of this record in the `ETag` request headers (or any accessed record when used in a custom get method). On subsequent requests, a client (that has a cached copy) may include an `If-None-Match` request header with this tag. If the record has not been updated since this date, the response will have a 304 status and no body. This facilitates significant performance gains since the response data doesn't need to be serialized and transferred over the network. + +### `GET /my-resource/?property=value` + +This can be used to search for records by the specified property name and value. See the querying section for more information. + +### `GET /my-resource/.property` + +This can be used to retrieve the specified property of the specified record. + +## PUT + +This can be used to create or update a record with the provided object/data (similar to an "upsert") with a specified key. This is handled by the Resource method `put(record)`. + +### `PUT /my-resource/` + +This will create or update the record with the URL path that maps to the record's primary key. The record will be replaced with the contents of the data in the request body. The new record will exactly match the data that was sent (this will remove any properties that were present in the previous record and not included in the body). Future GETs will return the exact data that was provided by PUT (what you PUT is what you GET). For example: + +```http +PUT /MyTable/123 +Content-Type: application/json + +{ "name": "some data" } +``` + +This will create or replace the record with a primary key of "123" with the object defined by the JSON in the body. This is handled by the Resource method `put()`. + +## DELETE + +This can be used to delete a record or records. + +## `DELETE /my-resource/` + +This will delete a record with the given primary key. This is handled by the Resource's `delete` method. For example: + +```http +DELETE /MyTable/123 +``` + +This will delete the record with the primary key of "123". + +## `DELETE /my-resource/?property=value` + +This will delete all the records that match the provided query. + +## POST + +Generally the POST method can be used for custom actions since POST has the broadest semantics. For tables that are expost\ed as endpoints, this also can be used to create new records. + +### `POST /my-resource/` + +This is handled by the Resource method `post(data)`, which is a good method to extend to make various other types of modifications. Also, with a table you can create a new record without specifying a primary key, for example: + +````http +````http +POST /MyTable/ +Content-Type: application/json + +`{ "name": "some data" }` +```` + +This will create a new record, auto-assigning a primary key, which will be returned in the `Location` header. + +## Querying through URL query parameters + +URL query parameters provide a powerful language for specifying database queries in HarperDB. This can be used to search by a single property name and value, to find all records which provide value for the given property/attribute. It is important to note that this property must be configured to be indexed to search on it. For example: + +````http +GET /my-resource/?property=value +``` + +We can specify multiple properties that must match: + +```http +GET /my-resource/?property=value&property2=another-value +``` + +Note that only one of the properties needs to be indexed for this query to execute. + +We can also specify different comparators such as less than and greater than queries using [FIQL](https:/datatracker.ietf.org/doc/html/draft-nottingham-atompub-fiql-00) syntax. If we want to specify records with an `age` value greater than 20: + +```http +GET /my-resource/?age=gt=20 +``` + +Or less than or equal to 20: + +```http +GET /my-resource/?age=le=20 +``` + +The comparison operators include `lt` (less than), `le` (less than or equal), `gt` (greater than), `ge` (greater than or equal), and `ne` (not equal). These comparison operators can also be combined with other query parameters with `&`. For example, if we wanted products with a category of software and price between 100 and 200, we could write: + +```http +GET /product/?category=software&price=gt=100&price=lt=200 +``` + +HarperDB has several special query functions that use "call" syntax. These can be included in the query string as its own query entry (separated from other query conditions with an `&`). These include: + +### `select(properties)` + +This allows you to specify which properties should be included in the responses. This takes several forms: + +* `?select(property)`: This will return the values of the specified property directly in the response (will not be put in an object). +* `?select(property1,property2)`: This returns the records as objects, but limited to the specified properties. +* `?select([property1,property2,...])`: This returns the records as arrays of the property values in the specified properties. +* `?select(property1,)`: This can be used to specify that objects should be returned with the single specified property. + +To get a list of product names with a category of software: + +```http +GET /product/?category=software&select(name) +``` + +### `limit(start,end)` or `limit(end)` + +Specifies a limit on the number of records returned, optionally providing a starting offset. + +For example, to find the first twenty records with a `rating` greater than 3, `inStock` equal to true, only returning the `rating` and `name` properties, you could use: + +```http +GET /product?rating=gt=3&inStock=true&select(rating,name)&limit(20) +``` + +### Content Types and Negotiation + +HTTP defines a couple of headers for indicating the (preferred) content type of the request and response. The `Content-Type` request header can be used to specify the content type of the request body (for PUT, PATCH, and POST). The `Accept` request header indicates the preferred content type of the response. For general records with object structures, HarperDB supports the following content types: `application/json` - Common format, easy to read, with great tooling support. `application/cbor` - Recommended binary format for optimal encoding efficiency and performance. `application/x-msgpack` - This is also an efficient format, but CBOR is preferable, as it has better streaming capabilities and faster time-to-first-byte. `text/csv` - CSV, lacks explicit typing, not well suited for heterogeneous data structures, but good for moving data to and from a spreadsheet. + +CBOR is generally the most efficient and powerful encoding format, with the best performance, most compact encoding, and most expansive ability to encode different data types like Dates, Maps, and Sets. MessagePack is very similar and tends to have broader adoption. However, JSON can be easier to work with and may have better tooling. Also, if you are using compression for data transfer (gzip or brotli), JSON will often result in more compact compressed data due to character frequencies that better align with Huffman coding, making JSON a good choice for web applications that do not require specific data types beyond the standard JSON types. + +Requesting a specific content type can also be done in a URL by suffixing the path with extension for the content type. If you want to retrieve a record in CSV format, you could request: + +```http +GET /product/some-id.csv +``` + +Or you could request a query response in MessagePack: + +```http +GET /product/.msgpack?category=software +``` + +However, generally it is not recommended that you use extensions in paths and it is best practice to use the `Accept` header to specify acceptable content types. + +### Specific Content Objects + +You can specify other content types, and the data will be stored as a record or object that holds the type and contents of the data. For example, if you do: + +``` +PUT /my-resource/33 +Content-Type: text/calendar + +BEGIN:VCALENDAR +VERSION:2.0 +... +``` + +This would store a record equivalent to JSON: + +``` +{ "contentType": "text/calendar", data: "BEGIN:VCALENDAR\nVERSION:2.0\n... +``` + +Retrieving a record with `contentType` and `data` properties will likewise return a response with the specified `Content-Type` and body. If the `Content-Type` is not of the `text` family, the data will be treated as binary data (a Node.js `Buffer`). + +You can also use `application/octet-stream` to indicate that the request body should be preserved in binary form. This also useful for uploading to a specific property: + +``` +PUT /my-resource/33/image +Content-Type: image/gif + +...image data... +``` diff --git a/site/versioned_docs/version-4.2/developers/security/basic-auth.md b/site/versioned_docs/version-4.2/developers/security/basic-auth.md new file mode 100644 index 00000000..00ab8b6d --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/security/basic-auth.md @@ -0,0 +1,62 @@ +--- +title: Basic Authentication +--- + +# Basic Authentication + +HarperDB uses Basic Auth and JSON Web Tokens (JWTs) to secure our HTTP requests. In the context of an HTTP transaction, **basic access authentication** is a method for an HTTP user agent to provide a username and password when making a request. + +** _**You do not need to log in separately. Basic Auth is added to each HTTP request like create\_schema, create\_table, insert etc… via headers.**_ ** + +A header is added to each HTTP request. The header key is **“Authorization”** the header value is **“Basic <<your username and password buffer token>>”** + +## Authentication in HarperDB Studio + +In the below code sample, you can see where we add the authorization header to the request. This needs to be added for each and every HTTP request for HarperDB. + +_Note: This function uses btoa. Learn about_ [_btoa here_](https:/developer.mozilla.org/en-US/docs/Web/API/btoa)_._ + +```javascript +function callHarperDB(call_object, operation, callback){ + + const options = { + "method": "POST", + "hostname": call_object.endpoint_url, + "port": call_object.endpoint_port, + "path": "/", + "headers": { + "content-type": "application/json", + "authorization": "Basic " + btoa(call_object.username + ':' + call_object.password), + "cache-control": "no-cache" + + } + }; + + const http_req = http.request(options, function (hdb_res) { + let chunks = []; + + hdb_res.on("data", function (chunk) { + chunks.push(chunk); + }); + + hdb_res.on("end", function () { + const body = Buffer.concat(chunks); + if (isJson(body)) { + return callback(null, JSON.parse(body)); + } else { + return callback(body, null); + + } + + }); + }); + + http_req.on("error", function (chunk) { + return callback("Failed to connect", null); + }); + + http_req.write(JSON.stringify(operation)); + http_req.end(); + +} +``` diff --git a/site/versioned_docs/version-4.2/developers/security/certificate-management.md b/site/versioned_docs/version-4.2/developers/security/certificate-management.md new file mode 100644 index 00000000..eb69df74 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/security/certificate-management.md @@ -0,0 +1,62 @@ +--- +title: Certificate Management +--- + +# Certificate Management + +This document is information on managing certificates for HarperDB external facing APIs. For information on certificate management for clustering see [clustering certificate management](../clustering/certificate-management). + +## Development + +An out of the box install of HarperDB does not have HTTPS enabled (see [configuration](../../deployments/configuration) for relevant configuration file settings.) This is great for local development. If you are developing using a remote server and your requests are traversing the Internet, we recommend that you enable HTTPS. + +To enable HTTPS, set `http.securePort` in `harperdb-config.yaml` to the port you wish to use for HTTPS connections and restart HarperDB. + +By default HarperDB will generate certificates and place them at `/keys/`. These certificates will not have a valid Common Name (CN) for your HarperDB node, so you will be able to use HTTPS, but your HTTPS client must be configured to accept the invalid certificate. + +## Production + +For production deployments, in addition to using HTTPS, we recommend using your own certificate authority (CA) or a public CA such as Let's Encrypt, to generate certificates with CNs that match the Fully Qualified Domain Name (FQDN) of your HarperDB node. + +We have a few recommended options for enabling HTTPS in a production setting. + +### Option: Enable HarperDB HTTPS and Replace Certificates + +To enable HTTPS, set `http.securePort` in `harperdb-config.yaml` to the port you wish to use for HTTPS connections and restart HarperDB. + +To replace the certificates, either replace the contents of the existing certificate files at `/keys/`, or update the HarperDB configuration with the path of your new certificate files, and then restart HarperDB. + +```yaml +tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`operationsApi.tls` configuration is optional. If it is not set HarperDB will default to the values in the `tls` section. + +```yaml +operationsApi: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +### Option: Nginx Reverse Proxy + +Instead of enabling HTTPS for HarperDB, Nginx can be used as a reverse proxy for HarperDB. + +Install Nginx, configure Nginx to use certificates issued from your own CA or a public CA, then configure Nginx to listen for HTTPS requests and forward to HarperDB as HTTP requests. + +[Certbot](https:/certbot.eff.org/) is a great tool for automatically requesting and renewing Let’s Encrypt certificates used by Nginx. + +### Option: External Reverse Proxy + +Instead of enabling HTTPS for HarperDB, a number of different external services can be used as a reverse proxy for HarperDB. These services typically have integrated certificate management. Configure the service to listen for HTTPS requests and forward (over a private network) to HarperDB as HTTP requests. + +Examples of these types of services include an AWS Application Load Balancer or a GCP external HTTP(S) load balancer. + +### Additional Considerations + +It is possible to use different certificates for the Operations API and the Custom Functions API. In scenarios where only your Custom Functions endpoints need to be exposed to the Internet and the Operations API is reserved for HarperDB administration, you may want to use a private CA to issue certificates for the Operations API and a public CA for the Custom Functions API certificates. diff --git a/site/versioned_docs/version-4.2/developers/security/configuration.md b/site/versioned_docs/version-4.2/developers/security/configuration.md new file mode 100644 index 00000000..67d959fd --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/security/configuration.md @@ -0,0 +1,39 @@ +--- +title: Configuration +--- + +# Configuration + +HarperDB was set up to require very minimal configuration to work out of the box. There are, however, some best practices we encourage for anyone building an app with HarperDB. + +## CORS + +HarperDB allows for managing [cross-origin HTTP requests](https:/developer.mozilla.org/en-US/docs/Web/HTTP/Access\_control\_CORS). By default, HarperDB enables CORS for all domains if you need to disable CORS completely or set up an access list of domains you can do the following: + +1. Open the harperdb-config.yaml file, which can be found in \, the location you specified during install. +1. In harperdb-config.yaml there should be 2 entries under `operationsApi.network`: cors and corsAccessList. + * `cors` + 1. To turn off, change to: `cors: false` + 1. To turn on, change to: `cors: true` + * `corsAccessList` + 1. The `corsAccessList` will only be recognized by the system when `cors` is `true` + 1. To create an access list you set `corsAccessList` to a comma-separated list of domains. + + i.e. `corsAccessList` is `http:/harperdb.io,http:/products.harperdb.io` + 1. To clear out the access list and allow all domains: `corsAccessList` is `[null]` + +## SSL + +HarperDB provides the option to use an HTTP or HTTPS and HTTP/2 interface. The default port for the server is 9925. + +These default ports can be changed by updating the `operationsApi.network.port` value in `/harperdb-config.yaml` + +By default, HTTPS is turned off and HTTP is turned on. It is recommended that you never directly expose HarperDB's HTTP interface through a publicly available port. HTTP is intended for local or private network use. + +You can toggle HTTPS and HTTP in the settings file. By setting `operationsApi.network.https` to true/false. When `https` is set to `false`, the server will use HTTP (version 1.1). Enabling HTTPS will enable both HTTPS/1.1 and HTTPS/2. + +HarperDB automatically generates a certificate (certificate.pem), a certificate authority (ca.pem) and a private key file (privateKey.pem) which live at `/keys/`. + +You can replace these with your own certificates and key. + +**Changes to these settings require a restart. Use operation `harperdb restart` from HarperDB Operations API.** diff --git a/site/versioned_docs/version-4.2/developers/security/index.md b/site/versioned_docs/version-4.2/developers/security/index.md new file mode 100644 index 00000000..cc5dcfc2 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/security/index.md @@ -0,0 +1,12 @@ +--- +title: Security +--- + +# Security + +HarperDB uses role-based, attribute-level security to ensure that users can only gain access to the data they’re supposed to be able to access. Our granular permissions allow for unparalleled flexibility and control, and can actually lower the total cost of ownership compared to other database solutions, since you no longer have to replicate subsets of your data to isolate use cases. + +* [JWT Authentication](./jwt-auth) +* [Basic Authentication](./basic-auth) +* [Configuration](./configuration) +* [Users and Roles](./users-and-roles) diff --git a/site/versioned_docs/version-4.2/developers/security/jwt-auth.md b/site/versioned_docs/version-4.2/developers/security/jwt-auth.md new file mode 100644 index 00000000..f48fe0ee --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/security/jwt-auth.md @@ -0,0 +1,96 @@ +--- +title: JWT Authentication +--- + +# JWT Authentication + +HarperDB uses token based authentication with JSON Web Tokens, JWTs. + +This consists of two primary operations `create_authentication_tokens` and `refresh_operation_token`. These generate two types of tokens, as follows: + +* The `operation_token` which is used to authenticate all HarperDB operations in the Bearer Token Authorization Header. The default expiry is one day. +* The `refresh_token` which is used to generate a new `operation_token` upon expiry. This token is used in the Bearer Token Authorization Header for the `refresh_operation_token` operation only. The default expiry is thirty days. + +The `create_authentication_tokens` operation can be used at any time to refresh both tokens in the event that both have expired or been lost. + +## Create Authentication Tokens + +Users must initially create tokens using their HarperDB credentials. The following POST body is sent to HarperDB. No headers are required for this POST operation. + +```json +{ + "operation": "create_authentication_tokens", + "username": "username", + "password": "password" +} +``` + +A full cURL example can be seen here: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "operation": "create_authentication_tokens", + "username": "username", + "password": "password" +}' +``` + +An example expected return object is: + +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDUwNjQ2MDAsInN1YiI6Im9wZXJhdGlvbiJ9.MpQA-9CMjA-mn-7mHyUXSuSC_-kqMqJXp_NDiKLFtbtMRbodCuY3DzH401rvy_4vb0yCELf0B5EapLVY1545sv80nxSl6FoZFxQaDWYXycoia6zHpiveR8hKlmA6_XTWHJbY2FM1HAFrdtt3yUTiF-ylkdNbPG7u7fRjTmHfsZ78gd2MNWIDkHoqWuFxIyqk8XydQpsjULf2Uacirt9FmHfkMZ-Jr_rRpcIEW0FZyLInbm6uxLfseFt87wA0TbZ0ofImjAuaW_3mYs-3H48CxP152UJ0jByPb0kHsk1QKP7YHWx1-Wce9NgNADfG5rfgMHANL85zvkv8sJmIGZIoSpMuU3CIqD2rgYnMY-L5dQN1fgfROrPMuAtlYCRK7r-IpjvMDQtRmCiNG45nGsM4DTzsa5GyDrkGssd5OBhl9gr9z9Bb5HQVYhSKIOiy72dK5dQNBklD4eGLMmo-u322zBITmE0lKaBcwYGJw2mmkYcrjDOmsDseU6Bf_zVUd9WF3FqwNkhg4D7nrfNSC_flalkxPHckU5EC_79cqoUIX2ogufBW5XgYbU4WfLloKcIpb51YTZlZfwBHlHPSyaq_guaXFaeCUXKq39_i1n0HRF_mRaxNru0cNDFT9Fm3eD7V8axFijSVAMDyQs_JR7SY483YDKUfN4l-vw-EVynImr4", + "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDc1NzAyMDAsInN1YiI6InJlZnJlc2gifQ.acaCsk-CJWIMLGDZdGnsthyZsJfQ8ihXLyE8mTji8PgGkpbwhs7e1O0uitMgP_pGjHq2tey1BHSwoeCL49b18WyMIB10hK-q2BXGKQkykltjTrQbg7VsdFi0h57mGfO0IqAwYd55_hzHZNnyJMh4b0iPQFDwU7iTD7x9doHhZAvzElpkWbc_NKVw5_Mw3znjntSzbuPN105zlp4Niurin-_5BnukwvoJWLEJ-ZlF6hE4wKhaMB1pWTJjMvJQJE8khTTvlUN8tGxmzoaDYoe1aCGNxmDEQnx8Y5gKzVd89sylhqi54d2nQrJ2-ElfEDsMoXpR01Ps6fNDFtLTuPTp7ixj8LvgL2nCjAg996Ga3PtdvXJAZPDYCqqvaBkZZcsiqOgqLV0vGo3VVlfrcgJXQImMYRr_Inu0FCe47A93IAWuQTs-KplM1KdGJsHSnNBV6oe6QEkROJT5qZME-8xhvBYvOXqp9Znwg39bmiBCMxk26Ce66_vw06MNgoa3D5AlXPWemfdVKPZDnj_aLVjZSs0gAfFElcVn7l9yjWJOaT2Muk26U8bJl-2BEq_DSclqKHODuYM5kkPKIdE4NFrsqsDYuGxcA25rlNETFyl0q-UXj1aoz_joy5Hdnr4mFELmjnoo4jYQuakufP9xeGPsj1skaodKl0mmoGcCD6v1F60" +} +``` + +## Using JWT Authentication Tokens + +The `operation_token` value is used to authenticate all operations in place of our standard Basic auth. In order to pass the token you will need to create an Bearer Token Authorization Header like the following request: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDUwNjQ2MDAsInN1YiI6Im9wZXJhdGlvbiJ9.MpQA-9CMjA-mn-7mHyUXSuSC_-kqMqJXp_NDiKLFtbtMRbodCuY3DzH401rvy_4vb0yCELf0B5EapLVY1545sv80nxSl6FoZFxQaDWYXycoia6zHpiveR8hKlmA6_XTWHJbY2FM1HAFrdtt3yUTiF-ylkdNbPG7u7fRjTmHfsZ78gd2MNWIDkHoqWuFxIyqk8XydQpsjULf2Uacirt9FmHfkMZ-Jr_rRpcIEW0FZyLInbm6uxLfseFt87wA0TbZ0ofImjAuaW_3mYs-3H48CxP152UJ0jByPb0kHsk1QKP7YHWx1-Wce9NgNADfG5rfgMHANL85zvkv8sJmIGZIoSpMuU3CIqD2rgYnMY-L5dQN1fgfROrPMuAtlYCRK7r-IpjvMDQtRmCiNG45nGsM4DTzsa5GyDrkGssd5OBhl9gr9z9Bb5HQVYhSKIOiy72dK5dQNBklD4eGLMmo-u322zBITmE0lKaBcwYGJw2mmkYcrjDOmsDseU6Bf_zVUd9WF3FqwNkhg4D7nrfNSC_flalkxPHckU5EC_79cqoUIX2ogufBW5XgYbU4WfLloKcIpb51YTZlZfwBHlHPSyaq_guaXFaeCUXKq39_i1n0HRF_mRaxNru0cNDFT9Fm3eD7V8axFijSVAMDyQs_JR7SY483YDKUfN4l-vw-EVynImr4' \ +--data-raw '{ + "operation":"search_by_hash", + "schema":"dev", + "table":"dog", + "hash_values":[1], + "get_attributes": ["*"] +}' +``` + +## Token Expiration + +`operation_token` expires at a set interval. Once it expires it will no longer be accepted by HarperDB. This duration defaults to one day, and is configurable in [harperdb-config.yaml](../../deployments/configuration). To generate a new `operation_token`, the `refresh_operation_token` operation is used, passing the `refresh_token` in the Bearer Token Authorization Header. A full cURL example can be seen here: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDc1NzAyMDAsInN1YiI6InJlZnJlc2gifQ.acaCsk-CJWIMLGDZdGnsthyZsJfQ8ihXLyE8mTji8PgGkpbwhs7e1O0uitMgP_pGjHq2tey1BHSwoeCL49b18WyMIB10hK-q2BXGKQkykltjTrQbg7VsdFi0h57mGfO0IqAwYd55_hzHZNnyJMh4b0iPQFDwU7iTD7x9doHhZAvzElpkWbc_NKVw5_Mw3znjntSzbuPN105zlp4Niurin-_5BnukwvoJWLEJ-ZlF6hE4wKhaMB1pWTJjMvJQJE8khTTvlUN8tGxmzoaDYoe1aCGNxmDEQnx8Y5gKzVd89sylhqi54d2nQrJ2-ElfEDsMoXpR01Ps6fNDFtLTuPTp7ixj8LvgL2nCjAg996Ga3PtdvXJAZPDYCqqvaBkZZcsiqOgqLV0vGo3VVlfrcgJXQImMYRr_Inu0FCe47A93IAWuQTs-KplM1KdGJsHSnNBV6oe6QEkROJT5qZME-8xhvBYvOXqp9Znwg39bmiBCMxk26Ce66_vw06MNgoa3D5AlXPWemfdVKPZDnj_aLVjZSs0gAfFElcVn7l9yjWJOaT2Muk26U8bJl-2BEq_DSclqKHODuYM5kkPKIdE4NFrsqsDYuGxcA25rlNETFyl0q-UXj1aoz_joy5Hdnr4mFELmjnoo4jYQuakufP9xeGPsj1skaodKl0mmoGcCD6v1F60' \ +--data-raw '{ + "operation":"refresh_operation_token" +}' +``` + +This will return a new `operation_token`. An example expected return object is: + +```bash +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6eyJfX2NyZWF0ZWR0aW1lX18iOjE2MDQ5NzgxODkxNTEsIl9fdXBkYXRlZHRpbWVfXyI6MTYwNDk3ODE4OTE1MSwiYWN0aXZlIjp0cnVlLCJyb2xlIjp7Il9fY3JlYXRlZHRpbWVfXyI6MTYwNDk0NDE1MTM0NywiX191cGRhdGVkdGltZV9fIjoxNjA0OTQ0MTUxMzQ3LCJpZCI6IjdiNDNlNzM1LTkzYzctNDQzYi05NGY3LWQwMzY3Njg5NDc4YSIsInBlcm1pc3Npb24iOnsic3VwZXJfdXNlciI6dHJ1ZSwic3lzdGVtIjp7InRhYmxlcyI6eyJoZGJfdGFibGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9hdHRyaWJ1dGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9zY2hlbWEiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl91c2VyIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfcm9sZSI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2pvYiI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2xpY2Vuc2UiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9pbmZvIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfbm9kZXMiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl90ZW1wIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119fX19LCJyb2xlIjoic3VwZXJfdXNlciJ9LCJ1c2VybmFtZSI6InVzZXJuYW1lIn0sImlhdCI6MTYwNDk3ODcxMywiZXhwIjoxNjA1MDY1MTEzLCJzdWIiOiJvcGVyYXRpb24ifQ.qB4FS7fzryCO5epQlFCQe4mQcUEhzXjfsXRFPgauXrGZwSeSr2o2a1tE1xjiI3qjK0r3f2bdi2xpFlDR1thdY-m0mOpHTICNOae4KdKzp7cyzRaOFurQnVYmkWjuV_Ww4PJgr6P3XDgXs5_B2d7ZVBR-BaAimYhVRIIShfpWk-4iN1XDk96TwloCkYx01BuN87o-VOvAnOG-K_EISA9RuEBpSkfUEuvHx8IU4VgfywdbhNMh6WXM0VP7ZzSpshgsS07MGjysGtZHNTVExEvFh14lyfjfqKjDoIJbo2msQwD2FvrTTb0iaQry1-Wwz9QJjVAUtid7tJuP8aBeNqvKyMIXRVnl5viFUr-Gs-Zl_WtyVvKlYWw0_rUn3ucmurK8tTy6iHyJ6XdUf4pYQebpEkIvi2rd__e_Z60V84MPvIYs6F_8CAy78aaYmUg5pihUEehIvGRj1RUZgdfaXElw90-m-M5hMOTI04LrzzVnBu7DcMYg4UC1W-WDrrj4zUq7y8_LczDA-yBC2-bkvWwLVtHLgV5yIEuIx2zAN74RQ4eCy1ffWDrVxYJBau4yiIyCc68dsatwHHH6bMK0uI9ib6Y9lsxCYjh-7MFcbP-4UBhgoDDXN9xoUToDLRqR9FTHqAHrGHp7BCdF5d6TQTVL5fmmg61MrLucOo-LZBXs1NY" +} +``` + +The `refresh_token` also expires at a set interval, but a longer interval. Once it expires it will no longer be accepted by HarperDB. This duration defaults to thirty days, and is configurable in [harperdb-config.yaml](../../deployments/configuration). To generate a new `operation_token` and a new `refresh_token` the `create_authentication_tokensoperation` is called. + +## Configuration + +Token timeouts are configurable in [harperdb-config.yaml](../../deployments/configuration) with the following parameters: + +* `operationsApi.authentication.operationTokenTimeout`: Defines the length of time until the operation\_token expires (default 1d). +* `operationsApi.authentication.refreshTokenTimeout`: Defines the length of time until the refresh\_token expires (default 30d). + +A full list of valid values for both parameters can be found [here](https:/github.com/vercel/ms). diff --git a/site/versioned_docs/version-4.2/developers/security/users-and-roles.md b/site/versioned_docs/version-4.2/developers/security/users-and-roles.md new file mode 100644 index 00000000..6c9fbfb5 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/security/users-and-roles.md @@ -0,0 +1,267 @@ +--- +title: Users & Roles +--- + +# Users & Roles + +HarperDB utilizes a Role-Based Access Control (RBAC) framework to manage access to HarperDB instances. A user is assigned a role that determines the user’s permissions to access database resources and run core operations. + +## Roles in HarperDB + +Role permissions in HarperDB are broken into two categories – permissions around database manipulation and permissions around database definition. + +**Database Manipulation**: A role defines CRUD (create, read, update, delete) permissions against database resources (i.e. data) in a HarperDB instance. + +1. At the table-level access, permissions must be explicitly defined when adding or altering a role – _i.e. HarperDB will assume CRUD access to be FALSE if not explicitly provided in the permissions JSON passed to the `add_role` and/or `alter_role` API operations._ +1. At the attribute-level, permissions for attributes in all tables included in the permissions set will be assigned based on either the specific attribute-level permissions defined in the table’s permission set or, if there are no attribute-level permissions defined, permissions will be based on the table’s CRUD set. + +**Database Definition**: Permissions related to managing schemas, tables, roles, users, and other system settings and operations are restricted to the built-in `super_user` role. + +**Built-In Roles** + +There are three built-in roles within HarperDB. See full breakdown of operations restricted to only super\_user roles [here](./users-and-roles#role-based-operation-restrictions). + +* `super_user` - This role provides full access to all operations and methods within a HarperDB instance, this can be considered the admin role. + * This role provides full access to all Database Definition operations and the ability to run Database Manipulation operations across the entire database schema with no restrictions. +* `cluster_user` - This role is an internal system role type that is managed internally to allow clustered instances to communicate with one another. + * This role is an internally managed role to facilitate communication between clustered instances. +* `structure_user` - This role provides specific access for creation and deletion of data. + * When defining this role type you can either assign a value of true which will allow the role to create and drop schemas & tables. Alternatively the role type can be assigned a string array. The values in this array are schemas and allows the role to only create and drop tables in the designated schemas. + +**User-Defined Roles** + +In addition to built-in roles, admins (i.e. users assigned to the super\_user role) can create customized roles for other users to interact with and manipulate the data within explicitly defined tables and attributes. + +* Unless the user-defined role is given `super_user` permissions, permissions must be defined explicitly within the request body JSON. +* Describe operations will return metadata for all schemas, tables, and attributes that a user-defined role has CRUD permissions for. + +**Role Permissions** + +When creating a new, user-defined role in a HarperDB instance, you must provide a role name and the permissions to assign to that role. _Reminder, only super users can create and manage roles._ + +* `role` name used to easily identify the role assigned to individual users. + + _Roles can be altered/dropped based on the role name used in and returned from a successful `add_role` , `alter_role`, or `list_roles` operation._ +* `permissions` used to explicitly define CRUD access to existing table data. + +Example JSON for `add_role` request + +```json +{ + "operation":"add_role", + "role":"software_developer", + "permission":{ + "super_user":false, + "schema_name":{ + "tables": { + "table_name1": { + "read":true, + "insert":true, + "update":true, + "delete":false, + "attribute_permissions":[ + { + "attribute_name":"attribute1", + "read":true, + "insert":true, + "update":true + } + ] + }, + "table_name2": { + "read":true, + "insert":true, + "update":true, + "delete":false, + "attribute_permissions":[] + } + } + } + } +} +``` + +**Setting Role Permissions** + +There are two parts to a permissions set: + +* `super_user` – boolean value indicating if role should be provided super\_user access. + + _If `super_user` is set to true, there should be no additional schema-specific permissions values included since the role will have access to the entire database schema. If permissions are included in the body of the operation, they will be stored within HarperDB, but ignored, as super\_users have full access to the database._ +* `permissions`: Schema tables that a role should have specific CRUD access to should be included in the final, schema-specific `permissions` JSON. + + _For user-defined roles (i.e. non-super\_user roles, blank permissions will result in the user being restricted from accessing any of the database schema._ + +**Table Permissions JSON** + +Each table that a role should be given some level of CRUD permissions to must be included in the `tables` array for its schema in the roles permissions JSON passed to the API (_see example above_). + +```json +{ + "table_name": { / the name of the table to define CRUD perms for + "read": boolean, / access to read from this table + "insert": boolean, / access to insert data to table + "update": boolean, / access to update data in table + "delete": boolean, / access to delete row data in table + "attribute_permissions": [ / permissions for specific table attributes + { + "attribute_name": "attribute_name", / attribute to assign permissions to + "read": boolean, / access to read this attribute from table + "insert": boolean, / access to insert this attribute into the table + "update": boolean / access to update this attribute in the table + } + ] +} +``` + +**Important Notes About Table Permissions** + +1. If a schema and/or any of its tables are not included in the permissions JSON, the role will not have any CRUD access to the schema and/or tables. +1. If a table-level CRUD permission is set to false, any attribute-level with that same CRUD permission set to true will return an error. + +**Important Notes About Attribute Permissions** + +1. If there are attribute-specific CRUD permissions that need to be enforced on a table, those need to be explicitly described in the `attribute_permissions` array. +1. If a non-hash attribute is given some level of CRUD access, that same access will be assigned to the table’s `hash_attribute` (also referred to as the `primary_key`), even if it is not explicitly defined in the permissions JSON. + + _See table\_name1’s permission set for an example of this – even though the table’s hash attribute is not specifically defined in the attribute\_permissions array, because the role has CRUD access to ‘attribute1’, the role will have the same access to the table’s hash attribute._ +1. If attribute-level permissions are set – _i.e. attribute\_permissions.length > 0_ – any table attribute not explicitly included will be assumed to have not CRUD access (with the exception of the `hash_attribute` described in #2). + + _See table\_name1’s permission set for an example of this – in this scenario, the role will have the ability to create, insert and update ‘attribute1’ and the table’s hash attribute but no other attributes on that table._ +1. If an `attribute_permissions` array is empty, the role’s access to a table’s attributes will be based on the table-level CRUD permissions. + + _See table\_name2’s permission set for an example of this._ +1. The `__createdtime__` and `__updatedtime__` attributes that HarperDB manages internally can have read perms set but, if set, all other attribute-level permissions will be ignored. +1. Please note that DELETE permissions are not included as a part of an individual attribute-level permission set. That is because it is not possible to delete individual attributes from a row, rows must be deleted in full. + * If a role needs the ability to delete rows from a table, that permission should be set on the table-level. + * The practical approach to deleting an individual attribute of a row would be to set that attribute to null via an update statement. + +## `Role-Based Operation Restrictions ` + +The table below includes all API operations available in HarperDB and indicates whether or not the operation is restricted to super\_user roles. + +_Keep in mind that non-super\_user roles will also be restricted within the operations they do have access to by the schema-level CRUD permissions set for the roles._ + +| Schemas and Tables | Restricted to Super\_Users | +| ------------------ | :------------------------: | +| describe\_all | | +| describe\_schema | | +| describe\_table | | +| create\_schema | X | +| drop\_schema | X | +| create\_table | X | +| drop\_table | X | +| create\_attribute | | +| drop\_attribute | X | + +| NoSQL Operations | Restricted to Super\_Users | +| ---------------------- | :------------------------: | +| insert | | +| update | | +| upsert | | +| delete | | +| search\_by\_hash | | +| search\_by\_value | | +| search\_by\_conditions | | + +| SQL Operations | Restricted to Super\_Users | +| -------------- | :------------------------: | +| select | | +| insert | | +| update | | +| delete | | + +| Bulk Operations | Restricted to Super\_Users | +| ---------------- | :------------------------: | +| csv\_data\_load | | +| csv\_file\_load | | +| csv\_url\_load | | +| import\_from\_s3 | | + +| Users and Roles | Restricted to Super\_Users | +| --------------- | :------------------------: | +| list\_roles | X | +| add\_role | X | +| alter\_role | X | +| drop\_role | X | +| list\_users | X | +| user\_info | | +| add\_user | X | +| alter\_user | X | +| drop\_user | X | + +| Clustering | Restricted to Super\_Users | +| ----------------------- | :------------------------: | +| cluster\_set\_routes | X | +| cluster\_get\_routes | X | +| cluster\_delete\_routes | X | +| add\_node | X | +| update\_node | X | +| cluster\_status | X | +| remove\_node | X | +| configure\_cluster | X | + +| Components | Restricted to Super\_Users | +| -------------------- | :------------------------: | +| get\_components | X | +| get\_component\_file | X | +| set\_component\_file | X | +| drop\_component | X | +| add\_component | X | +| package\_component | X | +| deploy\_component | X | + +| Custom Functions | Restricted to Super\_Users | +| ---------------------------------- | :------------------------: | +| custom\_functions\_status | X | +| get\_custom\_functions | X | +| get\_custom\_function | X | +| set\_custom\_function | X | +| drop\_custom\_function | X | +| add\_custom\_function\_project | X | +| drop\_custom\_function\_project | X | +| package\_custom\_function\_project | X | +| deploy\_custom\_function\_project | X | + +| Registration | Restricted to Super\_Users | +| ------------------ | :------------------------: | +| registration\_info | | +| get\_fingerprint | X | +| set\_license | X | + +| Jobs | Restricted to Super\_Users | +| ----------------------------- | :------------------------: | +| get\_job | | +| search\_jobs\_by\_start\_date | X | + +| Logs | Restricted to Super\_Users | +| --------------------------------- | :------------------------: | +| read\_log | X | +| read\_transaction\_log | X | +| delete\_transaction\_logs\_before | X | +| read\_audit\_log | X | +| delete\_audit\_logs\_before | X | + +| Utilities | Restricted to Super\_Users | +| ----------------------- | :------------------------: | +| delete\_records\_before | X | +| export\_local | X | +| export\_to\_s3 | X | +| system\_information | X | +| restart | X | +| restart\_service | X | +| get\_configuration | X | +| configure\_cluster | X | + +| Token Authentication | Restricted to Super\_Users | +| ------------------------------ | :------------------------: | +| create\_authentication\_tokens | | +| refresh\_operation\_token | | + +## Error: Must execute as User + +**You may have gotten an error like,** `Error: Must execute as <>`. + +This means that you installed HarperDB as `<>`. Because HarperDB stores files natively on the operating system, we only allow the HarperDB executable to be run by a single user. This prevents permissions issues on files. + +For example if you installed as user\_a, but later wanted to run as user\_b. User\_b may not have access to the hdb files HarperDB needs. This also keeps HarperDB more secure as it allows you to lock files down to a specific user and prevents other users from accessing your files. diff --git a/site/versioned_docs/version-4.2/developers/sql-guide/date-functions.md b/site/versioned_docs/version-4.2/developers/sql-guide/date-functions.md new file mode 100644 index 00000000..f19d2126 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/sql-guide/date-functions.md @@ -0,0 +1,222 @@ +--- +title: SQL Date Functions +--- + +# SQL Date Functions + +HarperDB utilizes [Coordinated Universal Time (UTC)](https:/en.wikipedia.org/wiki/Coordinated_Universal_Time) in all internal SQL operations. This means that date values passed into any of the functions below will be assumed to be in UTC or in a format that can be translated to UTC. + +When parsing date values passed to SQL date functions in HDB, we first check for [ISO 8601](https:/en.wikipedia.org/wiki/ISO_8601) formats, then for [RFC 2822](https:/tools.ietf.org/html/rfc2822#section-3.3) date-time format and then fall back to new Date(date_string)if a known format is not found. + +### CURRENT_DATE() + +Returns the current date in UTC in `YYYY-MM-DD` String format. + +``` +"SELECT CURRENT_DATE() AS current_date_result" returns + { + "current_date_result": "2020-04-22" + } +``` + +### CURRENT_TIME() + +Returns the current time in UTC in `HH:mm:ss.SSS` String format. + +``` +"SELECT CURRENT_TIME() AS current_time_result" returns + { + "current_time_result": "15:18:14.639" + } +``` + +### CURRENT_TIMESTAMP + +Referencing this variable will evaluate as the current Unix Timestamp in milliseconds. + +``` +"SELECT CURRENT_TIMESTAMP AS current_timestamp_result" returns + { + "current_timestamp_result": 1587568845765 + } +``` +### DATE([date_string]) + +Formats and returns the date_string argument in UTC in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. + +If a date_string is not provided, the function will return the current UTC date/time value in the return format defined above. + +``` +"SELECT DATE(1587568845765) AS date_result" returns + { + "date_result": "2020-04-22T15:20:45.765+0000" + } +``` + +``` +"SELECT DATE(CURRENT_TIMESTAMP) AS date_result2" returns + { + "date_result2": "2020-04-22T15:20:45.765+0000" + } +``` + +### DATE_ADD(date, value, interval) + +Adds the defined amount of time to the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted interval values: Either string value (key or shorthand) can be passed as the interval argument. + + +| Key | Shorthand | +|--------------|-----------| +| years | y | +| quarters | Q | +| months | M | +| weeks | w | +| days | d | +| hours | h | +| minutes | m | +| seconds | s | +| milliseconds | ms | + + +``` +"SELECT DATE_ADD(1587568845765, 1, 'days') AS date_add_result" AND +"SELECT DATE_ADD(1587568845765, 1, 'd') AS date_add_result" both return + { + "date_add_result": 1587655245765 + } +``` + +``` +"SELECT DATE_ADD(CURRENT_TIMESTAMP, 2, 'years') +AS date_add_result2" returns + { + "date_add_result2": 1650643129017 + } +``` + +### DATE_DIFF(date_1, date_2[, interval]) + +Returns the difference between the two date values passed based on the interval as a Number. If an interval is not provided, the function will return the difference value in milliseconds. + +Accepted interval values: +* years +* months +* weeks +* days +* hours +* minutes +* seconds + +``` +"SELECT DATE_DIFF(CURRENT_TIMESTAMP, 1650643129017, 'hours') +AS date_diff_result" returns + { + "date_diff_result": -17519.753333333334 + } +``` + +### DATE_FORMAT(date, format) + +Formats and returns a date value in the String format provided. Find more details on accepted format values in the [moment.js docs](https:/momentjs.com/docs/#/displaying/format/). + +``` +"SELECT DATE_FORMAT(1524412627973, 'YYYY-MM-DD HH:mm:ss') +AS date_format_result" returns + { + "date_format_result": "2018-04-22 15:57:07" + } +``` + +### DATE_SUB(date, value, interval) + +Subtracts the defined amount of time from the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted date_sub interval values- Either string value (key or shorthand) can be passed as the interval argument. + +| Key | Shorthand | +|--------------|-----------| +| years | y | +| quarters | Q | +| months | M | +| weeks | w | +| days | d | +| hours | h | +| minutes | m | +| seconds | s | +| milliseconds | ms | + + +``` +"SELECT DATE_SUB(1587568845765, 2, 'years') AS date_sub_result" returns + { + "date_sub_result": 1524410445765 + } +``` + +### EXTRACT(date, date_part) + +Extracts and returns the date_part requested as a String value. Accepted date_part values below show value returned for date = “2020-03-26T15:13:02.041+000” + +| date_part | Example return value* | +|--------------|------------------------| +| year | “2020” | +| month | “3” | +| day | “26” | + | hour | “15” | +| minute | “13” | +| second | “2” | +| millisecond | “41” | + +``` +"SELECT EXTRACT(1587568845765, 'year') AS extract_result" returns + { + "extract_result": "2020" + } +``` + +### GETDATE() + +Returns the current Unix Timestamp in milliseconds. + +``` +"SELECT GETDATE() AS getdate_result" returns + { + "getdate_result": 1587568845765 + } +``` + +### GET_SERVER_TIME() +Returns the current date/time value based on the server’s timezone in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. + +``` +"SELECT GET_SERVER_TIME() AS get_server_time_result" returns + { + "get_server_time_result": "2020-04-22T15:20:45.765+0000" + } +``` + +### OFFSET_UTC(date, offset) +Returns the UTC date time value with the offset provided included in the return String value formatted as `YYYY-MM-DDTHH:mm:ss.SSSZZ`. The offset argument will be added as minutes unless the value is less than 16 and greater than -16, in which case it will be treated as hours. + +``` +"SELECT OFFSET_UTC(1587568845765, 240) AS offset_utc_result" returns + { + "offset_utc_result": "2020-04-22T19:20:45.765+0400" + } +``` + +``` +"SELECT OFFSET_UTC(1587568845765, 10) AS offset_utc_result2" returns + { + "offset_utc_result2": "2020-04-23T01:20:45.765+1000" + } +``` + +### NOW() +Returns the current Unix Timestamp in milliseconds. + +``` +"SELECT NOW() AS now_result" returns + { + "now_result": 1587568845765 + } +``` + diff --git a/site/versioned_docs/version-4.2/developers/sql-guide/features-matrix.md b/site/versioned_docs/version-4.2/developers/sql-guide/features-matrix.md new file mode 100644 index 00000000..f0ee3072 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/sql-guide/features-matrix.md @@ -0,0 +1,83 @@ +--- +title: SQL Features Matrix +--- + +# SQL Features Matrix + +HarperDB provides access to most SQL functions, and we’re always expanding that list. Check below to see if we cover what you need. If not, feel free to [add a Feature Request](https:/feedback.harperdb.io/). + + +| INSERT | | +|------------------------------------|-----| +| Values - multiple values supported | ✔ | +| Sub-SELECT | ✗ | + +| UPDATE | | +|-----------------|-----| +| SET | ✔ | +| Sub-SELECT | ✗ | +| Conditions | ✔ | +| Date Functions* | ✔ | +| Math Functions | ✔ | + +| DELETE | | +|------------|-----| +| FROM | ✔ | +| Sub-SELECT | ✗ | +| Conditions | ✔ | + +| SELECT | | +|-----------------------|-----| +| Column SELECT | ✔ | +| Aliases | ✔ | +| Aggregator Functions | ✔ | +| Date Functions* | ✔ | +| Math Functions | ✔ | +| Constant Values | ✔ | +| Distinct | ✔ | +| Sub-SELECT | ✗ | + +| FROM | | +|-------------------|-----| +| Multi-table JOIN | ✔ | +| INNER JOIN | ✔ | +| LEFT OUTER JOIN | ✔ | +| LEFT INNER JOIN | ✔ | +| RIGHT OUTER JOIN | ✔ | +| RIGHT INNER JOIN | ✔ | +| FULL JOIN | ✔ | +| UNION | ✗ | +| Sub-SELECT | ✗ | +| TOP | ✔ | + +| WHERE | | +|----------------------------|-----| +| Multi-Conditions | ✔ | +| Wildcards | ✔ | +| IN | ✔ | +| LIKE | ✔ | +| Bit-wise Operators AND, OR | ✔ | +| Bit-wise Operators NOT | ✔ | +| NULL | ✔ | +| BETWEEN | ✔ | +| EXISTS,ANY,ALL | ✔ | +| Compare columns | ✔ | +| Compare constants | ✔ | +| Date Functions* | ✔ | +| Math Functions | ✔ | +| Sub-SELECT | ✗ | + +| GROUP BY | | +|-----------------------|-----| +| Multi-Column GROUP BY | ✔ | + +| HAVING | | +|--------------------------------|-----| +| Aggregate function conditions | ✔ | + +| ORDER BY | | +|-----------------------|-----| +| Multi-Column ORDER BY | ✔ | +| Aliases | ✔ | +| Date Functions* | ✔ | +| Math Functions | ✔ | \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/developers/sql-guide/functions.md b/site/versioned_docs/version-4.2/developers/sql-guide/functions.md new file mode 100644 index 00000000..ccd6f247 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/sql-guide/functions.md @@ -0,0 +1,153 @@ +--- +title: HarperDB SQL Functions +--- + +# HarperDB SQL Functions + +This SQL keywords reference contains the SQL functions available in HarperDB. + +## Functions +### Aggregate + +| Keyword | Syntax | Description | +|-----------------|-------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------| +| AVG | AVG(_expression_) | Returns the average of a given numeric expression. | +| COUNT | SELECT COUNT(_column_name_) FROM _schema.table_ WHERE _condition_ | Returns the number records that match the given criteria. Nulls are not counted. | +| GROUP_CONCAT | GROUP_CONCAT(_expression_) | Returns a string with concatenated values that are comma separated and that are non-null from a group. Will return null when there are non-null values. | +| MAX | SELECT MAX(_column_name_) FROM _schema.table_ WHERE _condition_ | Returns largest value in a specified column. | +| MIN | SELECT MIN(_column_name_) FROM _schema.table_ WHERE _condition_ | Returns smallest value in a specified column. | +| SUM | SUM(_column_name_) | Returns the sum of the numeric values provided. | +| ARRAY* | ARRAY(_expression_) | Returns a list of data as a field. | +| DISTINCT_ARRAY* | DISTINCT_ARRAY(_expression_) | When placed around a standard ARRAY() function, returns a distinct (deduplicated) results set. | + +*For more information on ARRAY() and DISTINCT_ARRAY() see [this blog](https:/www.harperdb.io/post/sql-queries-to-complex-objects). + +### Conversion + +| Keyword | Syntax | Description | +|---------|--------------------------------------------------|------------------------------------------------------------------------| +| CAST | CAST(_expression AS datatype(length)_) | Converts a value to a specified datatype. | +| CONVERT | CONVERT(_data_type(length), expression, style_) | Converts a value from one datatype to a different, specified datatype. | + + +### Date & Time + +| Keyword | Syntax | Description | +|-------------------|-----------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CURRENT_DATE | CURRENT_DATE() | Returns the current date in UTC in “YYYY-MM-DD” String format. | +| CURRENT_TIME | CURRENT_TIME() | Returns the current time in UTC in “HH:mm:ss.SSS” string format. | +| CURRENT_TIMESTAMP | CURRENT_TIMESTAMP | Referencing this variable will evaluate as the current Unix Timestamp in milliseconds. For more information, go here. | +| +| DATE | DATE([_date_string_]) | Formats and returns the date_string argument in UTC in ‘YYYY-MM-DDTHH:mm:ss.SSSZZ’ string format. If a date_string is not provided, the function will return the current UTC date/time value in the return format defined above. For more information, go here. | +| +| DATE_ADD | DATE_ADD(_date, value, interval_) | Adds the defined amount of time to the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted interval values: Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | +| +| DATE_DIFF | DATEDIFF(_date_1, date_2[, interval]_) | Returns the difference between the two date values passed based on the interval as a Number. If an interval is not provided, the function will return the difference value in milliseconds. For more information, go here. | +| +| DATE_FORMAT | DATE_FORMAT(_date, format_) | Formats and returns a date value in the String format provided. Find more details on accepted format values in the moment.js docs. For more information, go here. | +| +| DATE_SUB | DATE_SUB(_date, format_) | Subtracts the defined amount of time from the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted date_sub interval values- Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | +| +| DAY | DAY(_date_) | Return the day of the month for the given date. | +| +| DAYOFWEEK | DAYOFWEEK(_date_) | Returns the numeric value of the weekday of the date given(“YYYY-MM-DD”).NOTE: 0=Sunday, 1=Monday, 2=Tuesday, 3=Wednesday, 4=Thursday, 5=Friday, and 6=Saturday. | +| EXTRACT | EXTRACT(_date, date_part_) | Extracts and returns the date_part requested as a String value. Accepted date_part values below show value returned for date = “2020-03-26T15:13:02.041+000” For more information, go here. | +| +| GETDATE | GETDATE() | Returns the current Unix Timestamp in milliseconds. | +| GET_SERVER_TIME | GET_SERVER_TIME() | Returns the current date/time value based on the server’s timezone in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. | +| OFFSET_UTC | OFFSET_UTC(_date, offset_) | Returns the UTC date time value with the offset provided included in the return String value formatted as `YYYY-MM-DDTHH:mm:ss.SSSZZ`. The offset argument will be added as minutes unless the value is less than 16 and greater than -16, in which case it will be treated as hours. | +| NOW | NOW() | Returns the current Unix Timestamp in milliseconds. | +| +| HOUR | HOUR(_datetime_) | Returns the hour part of a given date in range of 0 to 838. | +| +| MINUTE | MINUTE(_datetime_) | Returns the minute part of a time/datetime in range of 0 to 59. | +| +| MONTH | MONTH(_date_) | Returns month part for a specified date in range of 1 to 12. | +| +| SECOND | SECOND(_datetime_) | Returns the seconds part of a time/datetime in range of 0 to 59. | +| YEAR | YEAR(_date_) | Returns the year part for a specified date. | +| + +### Logical + +| Keyword | Syntax | Description | +|---------|--------------------------------------------------|--------------------------------------------------------------------------------------------| +| IF | IF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | +| IIF | IIF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | +| IFNULL | IFNULL(_expression, alt_value_) | Returns a specified value if the expression is null. | +| NULLIF | NULLIF(_expression_1, expression_2_) | Returns null if expression_1 is equal to expression_2, if not equal, returns expression_1. | + +### Mathematical + +| Keyword | Syntax | Description | +|---------|---------------------------------|-----------------------------------------------------------------------------------------------------| +| ABS | ABS(_expression_) | Returns the absolute value of a given numeric expression. | +| CEIL | CEIL(_number_) | Returns integer ceiling, the smallest integer value that is bigger than or equal to a given number. | +| EXP | EXP(_number_) | Returns e to the power of a specified number. | +| FLOOR | FLOOR(_number_) | Returns the largest integer value that is smaller than, or equal to, a given number. | +| RANDOM | RANDOM(_seed_) | Returns a pseudo random number. | +| ROUND | ROUND(_number,decimal_places_) | Rounds a given number to a specified number of decimal places. | +| SQRT | SQRT(_expression_) | Returns the square root of an expression. | + + +### String + +| Keyword | Syntax | Description | +|-------------|------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CONCAT | CONCAT(_string_1, string_2, ...., string_n_) | Concatenates, or joins, two or more strings together, resulting in a single string. | +| CONCAT_WS | CONCAT_WS(_separator, string_1, string_2, ...., string_n_) | Concatenates, or joins, two or more strings together with a separator, resulting in a single string. | +| INSTR | INSTR(_string_1, string_2_) | Returns the first position, as an integer, of string_2 within string_1. | +| LEN | LEN(_string_) | Returns the length of a string. | +| LOWER | LOWER(_string_) | Converts a string to lower-case. | +| REGEXP | SELECT _column_name_ FROM _schema.table_ WHERE _column_name_ REGEXP _pattern_ | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | +| REGEXP_LIKE | SELECT _column_name_ FROM _schema.table_ WHERE REGEXP_LIKE(_column_name, pattern_) | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | +| REPLACE | REPLACE(_string, old_string, new_string_) | Replaces all instances of old_string within new_string, with string. | +| SUBSTRING | SUBSTRING(_string, string_position, length_of_substring_) | Extracts a specified amount of characters from a string. | +| TRIM | TRIM([_character(s) FROM_] _string_) | Removes leading and trailing spaces, or specified character(s), from a string. | +| UPPER | UPPER(_string_) | Converts a string to upper-case. | + +## Operators +### Logical Operators + +| Keyword | Syntax | Description | +|----------|--------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------| +| BETWEEN | SELECT _column_name(s)_ FROM _schema.table_ WHERE _column_name_ BETWEEN _value_1_ AND _value_2_ | (inclusive) Returns values(numbers, text, or dates) within a given range. | +| IN | SELECT _column_name(s)_ FROM _schema.table_ WHERE _column_name_ IN(_value(s)_) | Used to specify multiple values in a WHERE clause. | +| LIKE | SELECT _column_name(s)_ FROM _schema.table_ WHERE _column_n_ LIKE _pattern_ | Searches for a specified pattern within a WHERE clause. | + +## Queries +### General + +| Keyword | Syntax | Description | +|-----------|--------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------| +| DISTINCT | SELECT DISTINCT _column_name(s)_ FROM _schema.table_ | Returns only unique values, eliminating duplicate records. | +| FROM | FROM _schema.table_ | Used to list the schema(s), table(s), and any joins required for a SQL statement. | +| GROUP BY | SELECT _column_name(s)_ FROM _schema.table_ WHERE _condition_ GROUP BY _column_name(s)_ ORDER BY _column_name(s)_ | Groups rows that have the same values into summary rows. | +| HAVING | SELECT _column_name(s)_ FROM _schema.table_ WHERE _condition_ GROUP BY _column_name(s)_ HAVING _condition_ ORDER BY _column_name(s)_ | Filters data based on a group or aggregate function. | +| SELECT | SELECT _column_name(s)_ FROM _schema.table_ | Selects data from table. | +| WHERE | SELECT _column_name(s)_ FROM _schema.table_ WHERE _condition_ | Extracts records based on a defined condition. | + +### Joins + +| Keyword | Syntax | Description | +|---------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CROSS JOIN | SELECT _column_name(s)_ FROM _schema.table_1_ CROSS JOIN _schema.table_2_ | Returns a paired combination of each row from _table_1_ with row from _table_2_. _Note: CROSS JOIN can return very large result sets and is generally considered bad practice._ | +| FULL OUTER | SELECT _column_name(s)_ FROM _schema.table_1_ FULL OUTER JOIN _schema.table_2_ ON _table_1.column_name_ _= table_2.column_name_ WHERE _condition_ | Returns all records when there is a match in either _table_1_ (left table) or _table_2_ (right table). | +| [INNER] JOIN | SELECT _column_name(s)_ FROM _schema.table_1_ INNER JOIN _schema.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return only matching records from _table_1_ (left table) and _table_2_ (right table). The INNER keyword is optional and does not affect the result. | +| LEFT [OUTER] JOIN | SELECT _column_name(s)_ FROM _schema.table_1_ LEFT OUTER JOIN _schema.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return all records from _table_1_ (left table) and matching data from _table_2_ (right table). The OUTER keyword is optional and does not affect the result. | +| RIGHT [OUTER] JOIN | SELECT _column_name(s)_ FROM _schema.table_1_ RIGHT OUTER JOIN _schema.table_2_ ON _table_1.column_name = table_2.column_name_ | Return all records from _table_2_ (right table) and matching data from _table_1_ (left table). The OUTER keyword is optional and does not affect the result. | + +### Predicates + +| Keyword | Syntax | Description | +|--------------|------------------------------------------------------------------------------|----------------------------| +| IS NOT NULL | SELECT _column_name(s)_ FROM _schema.table_ WHERE _column_name_ IS NOT NULL | Tests for non-null values. | +| IS NULL | SELECT _column_name(s)_ FROM _schema.table_ WHERE _column_name_ IS NULL | Tests for null values. | + +### Statements + +| Keyword | Syntax | Description | +|---------|---------------------------------------------------------------------------------------------|-------------------------------------| +| DELETE | DELETE FROM _schema.table_ WHERE condition | Deletes existing data from a table. | +| INSERT | INSERT INTO _schema.table(column_name(s))_ VALUES(_value(s)_) | Inserts new records into a table. | +| UPDATE | UPDATE _schema.table_ SET _column_1 = value_1, column_2 = value_2, ....,_ WHERE _condition_ | Alters existing records in a table. | diff --git a/site/versioned_docs/version-4.2/developers/sql-guide/index.md b/site/versioned_docs/version-4.2/developers/sql-guide/index.md new file mode 100644 index 00000000..ae274bd3 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/sql-guide/index.md @@ -0,0 +1,88 @@ +--- +title: SQL Guide +--- + +# SQL Guide + +:::warning +HarperDB encourages developers to utilize other querying tools over SQL for performance purposes. HarperDB SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +## HarperDB SQL Guide + +The purpose of this guide is to describe the available functionality of HarperDB as it relates to supported SQL functionality. The SQL parser is still actively being developed, many SQL features may not be optimized or utilize indexes. This document will be updated as more features and functionality becomes available. Generally, the REST interface provides a more stable, secure, and performant interface for data interaction, but the SQL functionality can be useful for administrative ad-hoc querying, and utilizing existing SQL statements. **A high-level view of supported features can be found** [**here**](./features-matrix)**.** + +HarperDB adheres to the concept of database & tables. This allows developers to isolate table structures from each other all within one database. + +## Select + +HarperDB has robust SELECT support, from simple queries all the way to complex joins with multi-conditions, aggregates, grouping & ordering. + +All results are returned as JSON object arrays. + +Query for all records and attributes in the dev.dog table: + +``` +SELECT * FROM dev.dog +``` + +Query specific columns from all rows in the dev.dog table: + +``` +SELECT id, dog_name, age FROM dev.dog +``` + +Query for all records and attributes in the dev.dog table ORDERED BY age in ASC order: + +``` +SELECT * FROM dev.dog ORDER BY age +``` + +_The ORDER BY keyword sorts in ascending order by default. To sort in descending order, use the DESC keyword._ + +## Insert + +HarperDB supports inserting 1 to n records into a table. The primary key must be unique (not used by any other record). If no primary key is provided, it will be assigned an auto-generated UUID. HarperDB does not support selecting from one table to insert into another at this time. + +``` +INSERT INTO dev.dog (id, dog_name, age, breed_id) + VALUES(1, 'Penny', 5, 347), (2, 'Kato', 4, 347) +``` + +## Update + +HarperDB supports updating existing table row(s) via UPDATE statements. Multiple conditions can be applied to filter the row(s) to update. At this time selecting from one table to update another is not supported. + +``` +UPDATE dev.dog + SET owner_name = 'Kyle' + WHERE id IN (1, 2) +``` + +## Delete + +HarperDB supports deleting records from a table with condition support. + +``` +DELETE FROM dev.dog + WHERE age < 4 +``` + +## Joins + +HarperDB allows developers to join any number of tables and currently supports the following join types: + +* INNER JOIN LEFT +* INNER JOIN LEFT +* OUTER JOIN + +Here’s a basic example joining two tables from our Get Started example- joining a dogs table with a breeds table: + +``` +SELECT d.id, d.dog_name, d.owner_name, b.name, b.section + FROM dev.dog AS d + INNER JOIN dev.breed AS b ON d.breed_id = b.id + WHERE d.owner_name IN ('Kyle', 'Zach', 'Stephen') + AND b.section = 'Mutt' + ORDER BY d.dog_name +``` diff --git a/site/versioned_docs/version-4.2/developers/sql-guide/json-search.md b/site/versioned_docs/version-4.2/developers/sql-guide/json-search.md new file mode 100644 index 00000000..7d160413 --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/sql-guide/json-search.md @@ -0,0 +1,173 @@ +--- +title: SQL JSON Search +--- + +# SQL JSON Search + +HarperDB automatically indexes all top level attributes in a row / object written to a table. However, any attributes which hold JSON data do not have their nested attributes indexed. In order to make searching and/or transforming these JSON documents easy, HarperDB offers a special SQL function called SEARCH\_JSON. The SEARCH\_JSON function works in SELECT & WHERE clauses allowing queries to perform powerful filtering on any element of your JSON by implementing the [JSONata library](http:/docs.jsonata.org/overview.html) into our SQL engine. + +## Syntax + +SEARCH\_JSON(_expression, attribute_) + +Executes the supplied string _expression_ against data of the defined top level _attribute_ for each row. The expression both filters and defines output from the JSON document. + +### Example 1 + +#### Search a string array + +Here are two records in the database: + +```json +[ + { + "id": 1, + "name": ["Harper", "Penny"] + }, + { + "id": 2, + "name": ["Penny"] + } +] +``` + +Here is a simple query that gets any record with "Harper" found in the name. + +``` +SELECT * +FROM dev.dog +WHERE search_json('"Harper" in *', name) +``` + +### Example 2 + +The purpose of this query is to give us every movie where at least two of our favorite actors from Marvel films have acted together. The results will return the movie title, the overview, release date and an object array of the actor’s name and their character name in the movie. + +Both function calls evaluate the credits.cast attribute, this attribute is an object array of every cast member in a movie. + +``` +SELECT m.title, + m.overview, + m.release_date, + SEARCH_JSON($[name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"]].{"actor": name, "character": character}, c.`cast`) AS characters +FROM movies.credits c + INNER JOIN movies.movie m + ON c.movie_id = m.id +WHERE SEARCH_JSON($count($[name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"]]), c.`cast`) >= 2 +``` + +A sample of this data from the movie The Avengers looks like + +```json +[ + { + "cast_id": 46, + "character": "Tony Stark / Iron Man", + "credit_id": "52fe4495c3a368484e02b251", + "gender": "male", + "id": 3223, + "name": "Robert Downey Jr.", + "order": 0 + }, + { + "cast_id": 2, + "character": "Steve Rogers / Captain America", + "credit_id": "52fe4495c3a368484e02b19b", + "gender": "male", + "id": 16828, + "name": "Chris Evans", + "order": 1 + }, + { + "cast_id": 307, + "character": "Bruce Banner / The Hulk", + "credit_id": "5e85e8083344c60015411cfa", + "gender": "male", + "id": 103, + "name": "Mark Ruffalo", + "order": 2 + } +] +``` + +Let’s break down the SEARCH\_JSON function call in the SELECT: + +``` +SEARCH_JSON( + $[name in [ + "Robert Downey Jr.", + "Chris Evans", + "Scarlett Johansson", + "Mark Ruffalo", + "Chris Hemsworth", + "Jeremy Renner", + "Clark Gregg", + "Samuel L. Jackson", + "Gwyneth Paltrow", + "Don Cheadle" + ]].{ + "actor": name, + "character": character + }, + c.`cast` +) +``` + +The first argument passed to SEARCH\_JSON is the expression to execute against the second argument which is the cast attribute on the credits table. This expression will execute for every row. Looking into the expression it starts with “$\[…]” this tells the expression to iterate all elements of the cast array. + +Then the expression tells the function to only return entries where the name attribute matches any of the actors defined in the array: + +``` +name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"] +``` + +So far, we’ve iterated the array and filtered out rows, but we also want the results formatted in a specific way, so we’ve chained an expression on our filter with: `{“actor”: name, “character”: character}`. This tells the function to create a specific object for each matching entry. + +**Sample Result** + +```json +[ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + } +] +``` + +Just having the SEARCH\_JSON function in our SELECT is powerful, but given our criteria it would still return every other movie that doesn’t have our matching actors, in order to filter out the movies we do not want we also use SEARCH\_JSON in the WHERE clause. + +This function call in the WHERE clause is similar, but we don’t need to perform the same transformation as occurred in the SELECT: + +``` +SEARCH_JSON( + $count( + $[name in [ + "Robert Downey Jr.", + "Chris Evans", + "Scarlett Johansson", + "Mark Ruffalo", + "Chris Hemsworth", + "Jeremy Renner", + "Clark Gregg", + "Samuel L. Jackson", + "Gwyneth Paltrow", + "Don Cheadle" + ]] + ), + c.`cast` +) >= 2 +``` + +As seen above we execute the same name filter against the cast array, the primary difference is we are wrapping the filtered results in $count(…). As it looks this returns a count of the results back which we then use against our SQL comparator of >= 2. + +To see further SEARCH\_JSON examples in action view our Postman Collection that provides a [sample schema & data with query examples](../operations-api/advanced-json-sql-examples). + +To learn more about how to build expressions check out the JSONata documentation: [http:/docs.jsonata.org/overview](http:/docs.jsonata.org/overview) diff --git a/site/versioned_docs/version-4.2/developers/sql-guide/reserved-word.md b/site/versioned_docs/version-4.2/developers/sql-guide/reserved-word.md new file mode 100644 index 00000000..bcefa00a --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/sql-guide/reserved-word.md @@ -0,0 +1,203 @@ +--- +title: HarperDB SQL Reserved Words +--- + +# HarperDB SQL Reserved Words + +This is a list of reserved words in the SQL Parser. Use of these words or symbols may result in unexpected behavior or inaccessible tables/attributes. If any of these words must be used, any SQL call referencing a schema, table, or attribute must have backticks (`…`) or brackets ([…]) around the variable. + +For Example, for a table called ASSERT in the dev schema, a SQL select on that table would look like: + +``` +SELECT * from dev.`ASSERT` +``` + +Alternatively: + +``` +SELECT * from dev.[ASSERT] +``` + +### RESERVED WORD LIST + +* ABSOLUTE +* ACTION +* ADD +* AGGR +* ALL +* ALTER +* AND +* ANTI +* ANY +* APPLY +* ARRAY +* AS +* ASSERT +* ASC +* ATTACH +* AUTOINCREMENT +* AUTO_INCREMENT +* AVG +* BEGIN +* BETWEEN +* BREAK +* BY +* CALL +* CASE +* CAST +* CHECK +* CLASS +* CLOSE +* COLLATE +* COLUMN +* COLUMNS +* COMMIT +* CONSTRAINT +* CONTENT +* CONTINUE +* CONVERT +* CORRESPONDING +* COUNT +* CREATE +* CROSS +* CUBE +* CURRENT_TIMESTAMP +* CURSOR +* DATABASE +* DECLARE +* DEFAULT +* DELETE +* DELETED +* DESC +* DETACH +* DISTINCT +* DOUBLEPRECISION +* DROP +* ECHO +* EDGE +* END +* ENUM +* ELSE +* EXCEPT +* EXISTS +* EXPLAIN +* FALSE +* FETCH +* FIRST +* FOREIGN +* FROM +* GO +* GRAPH +* GROUP +* GROUPING +* HAVING +* HDB_HASH +* HELP +* IF +* IDENTITY +* IS +* IN +* INDEX +* INNER +* INSERT +* INSERTED +* INTERSECT +* INTO +* JOIN +* KEY +* LAST +* LET +* LEFT +* LIKE +* LIMIT +* LOOP +* MATCHED +* MATRIX +* MAX +* MERGE +* MIN +* MINUS +* MODIFY +* NATURAL +* NEXT +* NEW +* NOCASE +* NO +* NOT +* NULL +* OFF +* ON +* ONLY +* OFFSET +* OPEN +* OPTION +* OR +* ORDER +* OUTER +* OVER +* PATH +* PARTITION +* PERCENT +* PLAN +* PRIMARY +* PRINT +* PRIOR +* QUERY +* READ +* RECORDSET +* REDUCE +* REFERENCES +* RELATIVE +* REPLACE +* REMOVE +* RENAME +* REQUIRE +* RESTORE +* RETURN +* RETURNS +* RIGHT +* ROLLBACK +* ROLLUP +* ROW +* SCHEMA +* SCHEMAS +* SEARCH +* SELECT +* SEMI +* SET +* SETS +* SHOW +* SOME +* SOURCE +* STRATEGY +* STORE +* SYSTEM +* SUM +* TABLE +* TABLES +* TARGET +* TEMP +* TEMPORARY +* TEXTSTRING +* THEN +* TIMEOUT +* TO +* TOP +* TRAN +* TRANSACTION +* TRIGGER +* TRUE +* TRUNCATE +* UNION +* UNIQUE +* UPDATE +* USE +* USING +* VALUE +* VERTEX +* VIEW +* WHEN +* WHERE +* WHILE +* WITH +* WORK diff --git a/site/versioned_docs/version-4.2/developers/sql-guide/sql-geospatial-functions.md b/site/versioned_docs/version-4.2/developers/sql-guide/sql-geospatial-functions.md new file mode 100644 index 00000000..e557b5be --- /dev/null +++ b/site/versioned_docs/version-4.2/developers/sql-guide/sql-geospatial-functions.md @@ -0,0 +1,380 @@ +--- +title: SQL Geospatial Functions +--- + +# SQL Geospatial Functions + +HarperDB geospatial features require data to be stored in a single column using the [GeoJSON standard](http:/geojson.org/), a standard commonly used in geospatial technologies. Geospatial functions are available to be used in SQL statements. + + + +If you are new to GeoJSON you should check out the full specification here: http:/geojson.org/. There are a few important things to point out before getting started. + + + +1) All GeoJSON coordinates are stored in `[longitude, latitude]` format. +2) Coordinates or GeoJSON geometries must be passed as string when written directly in a SQL statement. +3) Note if you are using Postman for you testing. Due to limitations in the Postman client, you will need to escape quotes in your strings and your SQL will need to be passed on a single line. + + +In the examples contained in the left-hand navigation, schema and table names may change, but all GeoJSON data will be stored in a column named geo_data. + +# geoArea + +The geoArea() function returns the area of one or more features in square meters. + +### Syntax +geoArea(_geoJSON_) + +### Parameters +| Parameter | Description | +|-----------|---------------------------------| +| geoJSON | Required. One or more features. | + +#### Example 1 +Calculate the area, in square meters, of a manually passed GeoJSON polygon. + +``` +SELECT geoArea('{ + "type":"Feature", + "geometry":{ + "type":"Polygon", + "coordinates":[[ + [0,0], + [0.123456,0], + [0.123456,0.123456], + [0,0.123456] + ]] + } +}') +``` + +#### Example 2 +Find all records that have an area less than 1 square mile (or 2589988 square meters). + +``` +SELECT * FROM dev.locations +WHERE geoArea(geo_data) < 2589988 +``` + +# geoLength +Takes a GeoJSON and measures its length in the specified units (default is kilometers). + +## Syntax +geoLength(_geoJSON_[_, units_]) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------| +| geoJSON | Required. GeoJSON to measure. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 +Calculate the length, in kilometers, of a manually passed GeoJSON linestring. + +``` +SELECT geoLength('{ + "type": "Feature", + "geometry": { + "type": "LineString", + "coordinates": [ + [-104.97963309288025,39.76163265441438], + [-104.9823260307312,39.76365323407955], + [-104.99193906784058,39.75616442110704] + ] + } +}') +``` + +### Example 2 +Find all data plus the calculated length in miles of the GeoJSON, restrict the response to only lengths less than 5 miles, and return the data in order of lengths smallest to largest. + +``` +SELECT *, geoLength(geo_data, 'miles') as length +FROM dev.locations +WHERE geoLength(geo_data, 'miles') < 5 +ORDER BY length ASC +``` +# geoDifference +Returns a new polygon with the difference of the second polygon clipped from the first polygon. + +## Syntax +geoDifference(_polygon1, polygon2_) + +## Parameters +| Parameter | Description | +|------------|----------------------------------------------------------------------------| +| polygon1 | Required. Polygon or MultiPolygon GeoJSON feature. | +| polygon2 | Required. Polygon or MultiPolygon GeoJSON feature to remove from polygon1. | + +### Example +Return a GeoJSON Polygon that removes City Park (_polygon2_) from Colorado (_polygon1_). + +``` +SELECT geoDifference('{ + "type": "Feature", + "properties": { + "name":"Colorado" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-109.072265625,37.00255267215955], + [-102.01904296874999,37.00255267215955], + [-102.01904296874999,41.0130657870063], + [-109.072265625,41.0130657870063], + [-109.072265625,37.00255267215955] + ]] + } + }', + '{ + "type": "Feature", + "properties": { + "name":"City Park" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.95973110198975,39.7543828214657], + [-104.95955944061278,39.744781185675386], + [-104.95904445648193,39.74422022399989], + [-104.95835781097412,39.74402223643582], + [-104.94097709655762,39.74392324244047], + [-104.9408483505249,39.75434982844515], + [-104.95973110198975,39.7543828214657] + ]] + } + }' +) +``` + +# geoDistance +Calculates the distance between two points in units (default is kilometers). + +## Syntax +geoDistance(_point1, point2_[_, units_]) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------| +| point1 | Required. GeoJSON Point specifying the origin. | +| point2 | Required. GeoJSON Point specifying the destination. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 +Calculate the distance, in miles, between HarperDB’s headquarters and the Washington Monument. + +``` +SELECT geoDistance('[-104.979127,39.761563]', '[-77.035248,38.889475]', 'miles') +``` + +### Example 2 +Find all locations that are within 40 kilometers of a given point, return that distance in miles, and sort by distance in an ascending order. + +``` +SELECT *, geoDistance('[-104.979127,39.761563]', geo_data, 'miles') as distance +FROM dev.locations +WHERE geoDistance('[-104.979127,39.761563]', geo_data, 'kilometers') < 40 +ORDER BY distance ASC +``` + +# geoNear +Determines if point1 and point2 are within a specified distance from each other, default units are kilometers. Returns a Boolean. + +## Syntax +geoNear(_point1, point2, distance_[_, units_]) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------| +| point1 | Required. GeoJSON Point specifying the origin. | +| point2 | Required. GeoJSON Point specifying the destination. | +| distance | Required. The maximum distance in units as an integer or decimal. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 +Return all locations within 50 miles of a given point. + +``` +SELECT * +FROM dev.locations +WHERE geoNear('[-104.979127,39.761563]', geo_data, 50, 'miles') +``` + +### Example 2 +Return all locations within 2 degrees of the earth of a given point. (Each degree lat/long is about 69 miles [111 kilometers]). Return all data and the distance in miles, sorted by ascending distance. + +``` +SELECT *, geoDistance('[-104.979127,39.761563]', geo_data, 'miles') as distance +FROM dev.locations +WHERE geoNear('[-104.979127,39.761563]', geo_data, 2, 'degrees') +ORDER BY distance ASC +``` + +# geoContains +Determines if geo2 is completely contained by geo1. Returns a Boolean. + +## Syntax +geoContains(_geo1, geo2_) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------| +| geo1 | Required. Polygon or MultiPolygon GeoJSON feature. | +| geo2 | Required. Polygon or MultiPolygon GeoJSON feature tested to be contained by geo1. | + +### Example 1 +Return all locations within the state of Colorado (passed as a GeoJSON string). + +``` +SELECT * +FROM dev.locations +WHERE geoContains('{ + "type": "Feature", + "properties": { + "name":"Colorado" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-109.072265625,37.00255267], + [-102.01904296874999,37.00255267], + [-102.01904296874999,41.01306579], + [-109.072265625,41.01306579], + [-109.072265625,37.00255267] + ]] + } +}', geo_data) +``` + +### Example 2 +Return all locations which contain HarperDB Headquarters. + +``` +SELECT * +FROM dev.locations +WHERE geoContains(geo_data, '{ + "type": "Feature", + "properties": { + "name": "HarperDB Headquarters" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.98060941696167,39.760704817357905], + [-104.98053967952728,39.76065120861263], + [-104.98055577278137,39.760642961109674], + [-104.98037070035934,39.76049450588716], + [-104.9802714586258,39.76056254790385], + [-104.9805235862732,39.76076461167841], + [-104.98060941696167,39.760704817357905] + ]] + } +}') +``` + +# geoEqual +Determines if two GeoJSON features are the same type and have identical X,Y coordinate values. For more information see https:/developers.arcgis.com/documentation/spatial-references/. Returns a Boolean. + +## Syntax +geoEqual(_geo1_, _geo2_) + +## Parameters +| Parameter | Description | +|------------|----------------------------------------| +| geo1 | Required. GeoJSON geometry or feature. | +| geo2 | Required. GeoJSON geometry or feature. | + +### Example +Find HarperDB Headquarters within all locations within the database. + +``` +SELECT * +FROM dev.locations +WHERE geoEqual(geo_data, '{ + "type": "Feature", + "properties": { + "name": "HarperDB Headquarters" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.98060941696167,39.760704817357905], + [-104.98053967952728,39.76065120861263], + [-104.98055577278137,39.760642961109674], + [-104.98037070035934,39.76049450588716], + [-104.9802714586258,39.76056254790385], + [-104.9805235862732,39.76076461167841], + [-104.98060941696167,39.760704817357905] + ]] + } +}') +``` + +# geoCrosses +Determines if the geometries cross over each other. Returns boolean. + +## Syntax +geoCrosses(_geo1, geo2_) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------| +| geo1 | Required. GeoJSON geometry or feature. | +| geo2 | Required. GeoJSON geometry or feature. | + +### Example +Find all locations that cross over a highway. + +``` +SELECT * +FROM dev.locations +WHERE geoCrosses( + geo_data, + '{ + "type": "Feature", + "properties": { + "name": "Highway I-25" + }, + "geometry": { + "type": "LineString", + "coordinates": [ + [-104.9139404296875,41.00477542222947], + [-105.0238037109375,39.715638134796336], + [-104.853515625,39.53370327008705], + [-104.853515625,38.81403111409755], + [-104.61181640625,38.39764411353178], + [-104.8974609375,37.68382032669382], + [-104.501953125,37.00255267215955] + ] + } + }' +) +``` + +# geoConvert + +Converts a series of coordinates into a GeoJSON of the specified type. + +## Syntax +geoConvert(_coordinates, geo_type_[, _properties_]) + +## Parameters +| Parameter | Description | +|--------------|------------------------------------------------------------------------------------------------------------------------------------| +| coordinates | Required. One or more coordinates | +| geo_type | Required. GeoJSON geometry type. Options are ‘point’, ‘lineString’, ‘multiLineString’, ‘multiPoint’, ‘multiPolygon’, and ‘polygon’ | +| properties | Optional. Escaped JSON array with properties to be added to the GeoJSON output. | + +### Example +Convert a given coordinate into a GeoJSON point with specified properties. + +``` +SELECT geoConvert( + '[-104.979127,39.761563]', + 'point', + '{ + "name": "HarperDB Headquarters" + }' +) +``` diff --git a/site/versioned_docs/version-4.2/getting-started.md b/site/versioned_docs/version-4.2/getting-started.md new file mode 100644 index 00000000..3f2a5e53 --- /dev/null +++ b/site/versioned_docs/version-4.2/getting-started.md @@ -0,0 +1,84 @@ +--- +title: Getting Started +--- + +# Getting Started + +HarperDB is designed for quick and simple setup and deployment, with smart defaults that lead to fast, scalable, and globally distributed database applications. + +You can easily create a HarperDB database in the cloud through our studio or install it locally. The quickest way to get HarperDB up and running is with [HarperDB Cloud](./deployments/harperdb-cloud/), our database-as-a-service offering. However, HarperDB is a [database application platform](./developers/applications/), and to leverage HarperDB’s full application development capabilities of defining schemas, endpoints, messaging, and gateway capabilities, you may wish to install and run HarperDB locally so that you can use your standard local IDE tools, debugging, and version control. + +### Installing a HarperDB Instance + +You can simply install HarperDB with npm (or yarn, or other package managers): + +```shell +npm install -g harperdb +``` + +Here we installed HarperDB globally (and we recommend this) to make it easy to run a single HarperDB instance with multiple projects, but you can install it locally (not globally) as well. + +You can run HarperDB by running: + +```javascript +harperdb +``` + +You can now use HarperDB as a standalone database. You can also create a cloud instance (see below), which is also an easy way to get started. + +#### Developing Database Applications with HarperDB + +HarperDB is more than just a database, with HarperDB you build "database applications" which package your schema, endpoints, and application logic together. You can then deploy your application to an entire cluster of HarperDB instances, ready to scale to on-the-edge delivery of data and application endpoints directly to your users. To get started with HarperDB, take a look at our application development guide, with quick and easy examples: + +[Database application development guide](./developers/applications/) + +### Setting up a Cloud Instance + +To set up a HarperDB cloud instance, simply sign up and create a new instance: + +1. [Sign up for the HarperDB Studio](https:/studio.harperdb.io/sign-up) +1. [Create a new HarperDB Cloud instance](./administration/harperdb-studio/instances#create-a-new-instance) + +Note that a local instance and cloud instance are not mutually exclusive. You can register your local instance in the HarperDB Studio, and a common development flow is to develop locally and then deploy your application to your cloud instance. + +HarperDB Cloud instance provisioning typically takes 5-15 minutes. You will receive an email notification when your instance is ready. + +#### Using the HarperDB Studio + +Now that you have a HarperDB instance, if you want to use HarperDB as a standalone database, you can fully administer and interact with our database through the Studio. This section links to appropriate articles to get you started interacting with your data. + +1. [Create a schema](./administration/harperdb-studio/manage-schemas-browse-data#create-a-schema) +1. [Create a table](./administration/harperdb-studio/manage-schemas-browse-data#create-a-table) +1. [Add a record](./administration/harperdb-studio/manage-schemas-browse-data#add-a-record) +1. [Load CSV data](./administration/harperdb-studio/manage-schemas-browse-data#load-csv-data) (Here’s a sample CSV of the HarperDB team’s dogs) +1. [Query data via SQL](./administration/harperdb-studio/query-instance-data) + +## Administering HarperDB + +If you are deploying and administering HarperDB, you may want to look at our [configuration documentation](./deployments/configuration) and our administrative operations API below. + +### HarperDB APIs + +The preferred way to interact with HarperDB for typical querying, accessing, and updating data (CRUD) operations is through the REST interface, described in the [REST documentation](./developers/rest). + +The Operations API provides extensive administrative capabilities for HarperDB, and the [Operations API documentation has usage and examples](./developers/operations-api/). Generally it is recommended that you use the RESTful interface as your primary interface for performant data access, querying, and manipulation (DML) for building production applications (under heavy load), and the operations API (and SQL) for data definition (DDL) and administrative purposes. + +The HarperDB Operations API is single endpoint, which means the only thing that needs to change across different calls is the body. For example purposes, a basic cURL command is shown below to create a schema called dev. To change this behavior, swap out the operation in the `data-raw` body parameter. + +``` +curl --location --request POST 'https:/instance-subdomain.harperdbcloud.com' \ +--header 'Authorization: Basic YourBase64EncodedInstanceUser:Pass' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "operation": "create_schema", + "schema": "dev" +}' +``` + +## Support and Learning More + +If you find yourself in need of additional support you can submit a [HarperDB support ticket](https:/harperdbhelp.zendesk.com/hc/en-us/requests/new). You can also learn more about available HarperDB projects by searching [Github](https:/github.com/search?q=harperdb). + +### Video Tutorials + +[HarperDB video tutorials are available on our YouTube channel](https:/www.youtube.com/@harperdbio). HarperDB and the HarperDB Studio are constantly changing, as such, there may be small discrepancies in UI/UX. diff --git a/site/versioned_docs/version-4.2/index.md b/site/versioned_docs/version-4.2/index.md new file mode 100644 index 00000000..fd7be9a8 --- /dev/null +++ b/site/versioned_docs/version-4.2/index.md @@ -0,0 +1,106 @@ +--- +title: HarperDB Docs +--- + +# HarperDB Docs + +HarperDB is a globally-distributed edge application platform. It reduces complexity, increases performance, and lowers costs by combining user-defined applications, a high-performance database, and an enterprise-grade streaming broker into a single package. The platform offers unlimited horizontal scale at the click of a button, and syncs data across the cluster in milliseconds. HarperDB simplifies the process of delivering applications and the data that drives them to the edge, which dramatically improves both the user experience and total cost of ownership for large-scale applications. Deploying HarperDB on global infrastructure enables a CDN-like solution for enterprise data and applications. + +HarperDB's documentation covers installation, getting started, administrative operation APIs, security, and much more. Browse the topics at left, or choose one of the commonly used documentation sections below. + +:::info +Wondering what's new with HarperDB 4.2? Take a look at our latest [Release Notes](./technical-details/release-notes/v4-tucker/4.2.0). +::: + +## Getting Started + +
+
+

+ + Getting Started Guide + +

+

+ Get up and running with HarperDB +

+
+
+

+ + Quick Install HarperDB + +

+

+ Run HarperDB on your on hardware +

+
+
+

+ + Try HarperDB Cloud + +

+

+ Spin up an instance in minutes to going fast +

+
+
+ +## Building with HarperDB + +
+
+

+ + HarperDB Applications + +

+

+ Build your a fully featured HarperDB Component with custom functionality +

+
+
+

+ + REST Queries + +

+

+ The recommended HTTP interface for data access, querying, and manipulation +

+
+
+

+ + Operations API + +

+

+ Configure, deploy, administer, and control your HarperDB instance +

+
+
+ +
+
+

+ + Clustering & Replication + +

+

+ The process of connecting multiple HarperDB databases together to create a database mesh network that enables users to define data replication patterns. +

+
+
+

+ + Explore the HarperDB Studio + +

+

+ The web-based GUI for HarperDB. Studio enables you to administer, navigate, and monitor all of your HarperDB instances in a simple, user friendly interface. +

+
+
diff --git a/site/versioned_docs/version-4.2/technical-details/_category_.json b/site/versioned_docs/version-4.2/technical-details/_category_.json new file mode 100644 index 00000000..69ce80a6 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Technical Details", + "position": 4, + "link": { + "type": "generated-index", + "title": "Technical Details Documentation", + "description": "Reference documentation and technical specifications", + "keywords": [ + "technical-details" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/reference/analytics.md b/site/versioned_docs/version-4.2/technical-details/reference/analytics.md new file mode 100644 index 00000000..7b475176 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/reference/analytics.md @@ -0,0 +1,117 @@ +--- +title: Analytics +--- + +# Analytics + +HarperDB provides extensive telemetry and analytics data to help monitor the status of the server and work loads, and to help understand traffic and usage patterns to identify issues and scaling needs, and identify queries and actions that are consuming the most resources. + +HarperDB collects statistics for all operations, URL endpoints, and messaging topics, aggregating information by thread, operation, resource, and methods, in real-time. These statistics are logged in the `hdb_raw_analytics` and `hdb_analytics` table in the `system` database. + +There are two "levels" of analytics in the HarperDB analytics table: the first is the immediate level of raw direct logging of real-time statistics. These analytics entries are recorded once a second (when there is activity) by each thread, and include all recorded activity in the last second, along with system resource information. The records have a primary key that is the timestamp in milliseconds since epoch. This can be queried (with `superuser` permission) using the search\_by\_conditions operation (this will search for 10 seconds worth of analytics) on the `hdb_raw_analytics` table: + +``` +POST http:/localhost:9925 +Content-Type: application/json + +{ + "operation": "search_by_conditions", + "schema": "system", + "table": "hdb_raw_analytics", + "conditions": [{ + "search_attribute": "id", + "search_type": "between", + "search_value": [168859400000, 1688594010000] + }] +} +``` + +And a typical response looks like: + +``` +{ + "time": 1688594390708, + "period": 1000.8336279988289, + "metrics": [ + { + "metric": "bytes-sent", + "path": "search_by_conditions", + "type": "operation", + "median": 202, + "mean": 202, + "p95": 202, + "p90": 202, + "count": 1 + }, + ... + { + "metric": "memory", + "threadId": 2, + "rss": 1492664320, + "heapTotal": 124596224, + "heapUsed": 119563120, + "external": 3469790, + "arrayBuffers": 798721 + }, + { + "metric": "utilization", + "idle": 138227.52767700003, + "active": 70.5066209952347, + "utilization": 0.0005098165086230495 + } + ], + "threadId": 2, + "totalBytesProcessed": 12182820, + "id": 1688594390708.6853 +} +``` + +The second level of analytics recording is aggregate data. The aggregate records are recorded once a minute, and aggregate the results from all the per-second entries from all the threads, creating a summary of statistics once a minute. The ids for these milliseconds since epoch can be queried from the `hdb_analytics` table. You can query these with an operation like: + +``` +POST http:/localhost:9925 +Content-Type: application/json + +{ + "operation": "search_by_conditions", + "schema": "system", + "table": "hdb_analytics", + "conditions": [{ + "search_attribute": "id", + "search_type": "between", + "search_value": [1688194100000, 1688594990000] + }] +} +``` + +And a summary record looks like: + +``` +{ + "period": 60000, + "metric": "bytes-sent", + "method": "connack", + "type": "mqtt", + "median": 4, + "mean": 4, + "p95": 4, + "p90": 4, + "count": 1, + "id": 1688589569646, + "time": 1688589569646 +} +``` + +The following are general resource usage statistics that are tracked: + +* memory - This includes RSS, heap, buffer and external data usage. +* utilization - How much of the time the worker was processing requests. +* mqtt-connections - The number of MQTT connections. + +The following types of information is tracked for each HTTP request: + +* success - How many requests returned a successful response (20x response code). TTFB - Time to first byte in the response to the client. +* transfer - Time to finish the transfer of the data to the client. +* bytes-sent - How many bytes of data were sent to the client. + +Requests are categorized by operation name, for the operations API, by the resource (name) with the REST API, and by command for the MQTT interface. diff --git a/site/versioned_docs/version-4.2/technical-details/reference/architecture.md b/site/versioned_docs/version-4.2/technical-details/reference/architecture.md new file mode 100644 index 00000000..f2881d3c --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/reference/architecture.md @@ -0,0 +1,42 @@ +--- +title: Architecture +--- + +# Architecture + +HarperDB's architecture consists of resources, which includes tables and user defined data sources and extensions, and server interfaces, which includes the RESTful HTTP interface, operations API, and MQTT. Servers are supported by routing and auth services. + +``` + ┌──────────┐ ┌──────────┐ + │ Clients │ │ Clients │ + └────┬─────┘ └────┬─────┘ + │ │ + ▼ ▼ + ┌────────────────────────────────────────┐ + │ │ + │ Socket routing/management │ + ├───────────────────────┬────────────────┤ + │ │ │ + │ Server Interfaces ─►│ Authentication │ + │ RESTful HTTP, MQTT │ Authorization │ + │ ◄─┤ │ + │ ▲ └────────────────┤ + │ │ │ │ + ├───┼──────────┼─────────────────────────┤ + │ │ │ ▲ │ + │ ▼ Resources ▲ │ ┌───────────┐ │ + │ │ └─┤ │ │ + ├─────────────────┴────┐ │ App │ │ + │ ├─►│ resources │ │ + │ Database tables │ └───────────┘ │ + │ │ ▲ │ + ├──────────────────────┘ │ │ + │ ▲ ▼ │ │ + │ ┌────────────────┐ │ │ + │ │ External │ │ │ + │ │ data sources ├────┘ │ + │ │ │ │ + │ └────────────────┘ │ + │ │ + └────────────────────────────────────────┘ +``` diff --git a/site/versioned_docs/version-4.2/technical-details/reference/content-types.md b/site/versioned_docs/version-4.2/technical-details/reference/content-types.md new file mode 100644 index 00000000..6aee4850 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/reference/content-types.md @@ -0,0 +1,27 @@ +--- +title: Content Types +--- + +# Content Types + +HarperDB supports several different content types (or MIME types) for both HTTP request bodies (describing operations) as well as for serializing content into HTTP response bodies. HarperDB follows HTTP standards for specifying both request body content types and acceptable response body content types. Any of these content types can be used with any of the standard HarperDB operations. + +For request body content, the content type should be specified with the `Content-Type` header. For example with JSON, use `Content-Type: application/json` and for CBOR, include `Content-Type: application/cbor`. To request that the response body be encoded with a specific content type, use the `Accept` header. If you want the response to be in JSON, use `Accept: application/json`. If you want the response to be in CBOR, use `Accept: application/cbor`. + +The following content types are supported: + +## JSON - application/json + +JSON is the most widely used content type, and is relatively readable and easy to work with. However, JSON does not support all the data types that are supported by HarperDB, and can't be used to natively encode data types like binary data or explicit Maps/Sets. Also, JSON is not as efficient as binary formats. When using JSON, compression is recommended (this also follows standard HTTP protocol with the `Accept-Encoding` header) to improve network transfer performance (although there is server performance overhead). JSON is a good choice for web development and when standard JSON types are sufficient and when combined with compression and debuggability/observability is important. + +## CBOR - application/cbor + +CBOR is a highly efficient binary format, and is a recommended format for most production use cases with HarperDB. CBOR supports the full range of HarperDB data types, including binary data, typed dates, and explicit Maps/Sets. CBOR is very performant and space efficient even without compression. Compression will still yield better network transfer size/performance, but compressed CBOR is generally not any smaller than compressed JSON. CBOR also natively supports streaming for optimal performance (using indefinite length arrays). The CBOR format has excellent standardization and HarperDB's CBOR provides an excellent balance of performance and size efficiency. + +## MessagePack - application/x-msgpack + +MessagePack is another efficient binary format like CBOR, with support for all HarperDB data types. MessagePack generally has wider adoption than CBOR and can be useful in systems that don't have CBOR support (or good support). However, MessagePack does not have native support for streaming of arrays of data (for query results), and so query results are returned as a (concatenated) sequence of MessagePack objects/maps. MessagePack decoders used with HarperDB's MessagePack must be prepared to decode a direct sequence of MessagePack values to properly read responses. + +## Comma-separated Values (CSV) - text/csv + +Comma-separated values is an easy to use and understand format that can be readily imported into spreadsheets or used for data processing. CSV lacks hierarchical structure for most data types, and shouldn't be used for frequent/production use, but when you need it, it is available. diff --git a/site/versioned_docs/version-4.2/technical-details/reference/data-types.md b/site/versioned_docs/version-4.2/technical-details/reference/data-types.md new file mode 100644 index 00000000..fca44b40 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/reference/data-types.md @@ -0,0 +1,45 @@ +--- +title: Data Types +--- + +# Data Types + +HarperDB supports a rich set of data types for use in records in databases. Various data types can be used from both direct JavaScript interfaces in Custom Functions and the HTTP operations APIs. Using JSON for communication naturally limits the data types to those available in JSON (HarperDB’s supports all of JSON data types), but JavaScript code and alternate data formats facilitate the use of additional data types. As of v4.1, HarperDB supports MessagePack and CBOR, which allows for all of HarperDB supported data types. This includes: + +(Note that these labels are descriptive, they do not necessarily correspond to the GraphQL schema type names, but the schema type names are noted where possible) + +## Boolean + +true or false. The GraphQL schema type name is `Boolean`. + +## String + +Strings, or text, are a sequence of any unicode characters and are internally encoded with UTF-8. The GraphQL schema type name is `String`. + +## Number + +Numbers can be stored as signed integers up to 64-bit or floating point with 64-bit floating point precision, and numbers are automatically stored using the most optimal type. JSON is parsed by JS, so the maximum safe (precise) integer is 9007199254740991 (larger numbers can be stored, but aren’t guaranteed integer precision). Custom Functions may use BigInt numbers to store/access larger 64-bit integers, but integers beyond 64-bit can’t be stored with integer precision (will be stored as standard double-precision numbers). The GraphQL schema type name is `Float` (`Int` can also be used to describe numbers that should fit into signed 32-bit integers). + +## Object/Map + +Objects, or maps, that hold a set named properties can be stored in HarperDB. When provided as JSON objects or JavaScript objects, all property keys are stored as strings. The order of properties is also preserved in HarperDB’s storage. Duplicate property keys are not allowed (they are dropped in parsing any incoming data). + +## Array + +Arrays hold an ordered sequence of values and can be stored in HarperDB. There is no support for sparse arrays, although you can use objects to store data with numbers (converted to strings) as properties. + +## Null + +A null value can be stored in HarperDB property values as well. + +## Date + +Dates can be stored as a specific data type. This is not supported in JSON, but is supported by MessagePack and CBOR. Custom Functions can also store and use Dates using JavaScript Date instances. The GraphQL schema type name is `Date`. + +## Binary Data + +Binary data can be stored in property values as well. JSON doesn’t have any support for encoding binary data, but MessagePack and CBOR support binary data in data structures, and this will be preserved in HarperDB. Custom Functions can also store binary data by using NodeJS’s Buffer or Uint8Array instances to hold the binary data. The GraphQL schema type name is `Bytes`. + +## Explicit Map/Set + +Explicit instances of JavaScript Maps and Sets can be stored and preserved in HarperDB as well. This can’t be represented with JSON, but can be with CBOR. diff --git a/site/versioned_docs/version-4.2/technical-details/reference/dynamic-schema.md b/site/versioned_docs/version-4.2/technical-details/reference/dynamic-schema.md new file mode 100644 index 00000000..c10aaf8e --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/reference/dynamic-schema.md @@ -0,0 +1,148 @@ +--- +title: Dynamic Schema +--- + +# Dynamic Schema + +When tables are created without any schema, through the operations API (without specifying attributes) or studio, the tables follow "dynamic-schema" behavior. Generally it is best-practice to define schemas for your tables to ensure predictable, consistent structures with data integrity and precise control over indexing, without dependency on data itself. However, it can often be simpler and quicker to simply create a table and let the data auto-generate the schema dynamically with everything being auto-indexed for broad querying. + +With dynamic schemas individual attributes are reflexively created as data is ingested, meaning the table will adapt to the structure of data ingested. HarperDB tracks the metadata around schemas, tables, and attributes allowing for describe table, describe schema, and describe all operations. + +### Databases + +HarperDB databases hold a collection of tables together in a single file that are transactionally connected. This means that operations across tables within a database can be performed in a single atomic transaction. By default tables are added to the default database called "data", but other databases can be created and specified for tables. + +### Tables + +HarperDB tables group records together with a common data pattern. To create a table users must provide a table name and a primary key. + +* **Table Name**: Used to identify the table. +* **Primary Key**: This is a required attribute that serves as the unique identifier for a record and is also known as the `hash_attribute` in HarperDB operations API. + +## Primary Key + +The primary key (also referred to as the `hash_attribute`) is used to uniquely identify records. Uniqueness is enforced on the primary; inserts with the same primary key will be rejected. If a primary key is not provided on insert, a GUID will be automatically generated and returned to the user. The [HarperDB Storage Algorithm](./storage-algorithm) utilizes this value for indexing. + +**Standard Attributes** + +With tables that are using dynamic schemas, additional attributes are reflexively added via insert and update operations (in both SQL and NoSQL) when new attributes are included in the data structure provided to HarperDB. As a result, schemas are additive, meaning new attributes are created in the underlying storage algorithm as additional data structures are provided. HarperDB offers `create_attribute` and `drop_attribute` operations for users who prefer to manually define their data model independent of data ingestion. When new attributes are added to tables with existing data the value of that new attribute will be assumed `null` for all existing records. + +**Audit Attributes** + +HarperDB automatically creates two audit attributes used on each record if the table is created without a schema. + +* `__createdtime__`: The time the record was created in [Unix Epoch with milliseconds](https:/www.epochconverter.com/) format. +* `__updatedtime__`: The time the record was updated in [Unix Epoch with milliseconds](https:/www.epochconverter.com/) format. + +### Dynamic Schema Example + +To better understand the behavior let’s take a look at an example. This example utilizes [HarperDB API operations](../../developers/operations-api/databases-and-tables). + +**Create a Database** + +```bash +{ + "operation": "create_database", + "schema": "dev" +} +``` + +**Create a Table** + +Notice the schema name, table name, and primary key name are the only required parameters. + +```bash +{ + "operation": "create_table", + "database": "dev", + "table": "dog", + "primary_key": "id" +} +``` + +At this point the table does not have structure beyond what we provided, so the table looks like this: + +**dev.dog** + +![](/img/v4.2/reference/dynamic\_schema\_2\_create\_table.png.webp) + +**Insert Record** + +To define attributes we do not need to do anything beyond sending them in with an insert operation. + +```bash +{ + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + {"id": 1, "dog_name": "Penny", "owner_name": "Kyle"} + ] +} +``` + +With a single record inserted and new attributes defined, our table now looks like this: + +**dev.dog** + +![](/img/v4.2/reference/dynamic\_schema\_3\_insert\_record.png.webp) + +Indexes have been automatically created for `dog_name` and `owner_name` attributes. + +**Insert Additional Record** + +If we continue inserting records with the same data schema no schema updates are required. One record will omit the hash attribute from the insert to demonstrate GUID generation. + +```bash +{ + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + {"id": 2, "dog_name": "Monk", "owner_name": "Aron"}, + {"dog_name": "Harper","owner_name": "Stephen"} + ] +} +``` + +In this case, there is no change to the schema. Our table now looks like this: + +**dev.dog** + +![](/img/v4.2/reference/dynamic\_schema\_4\_insert\_additional\_record.png.webp) + +**Update Existing Record** + +In this case, we will update a record with a new attribute not previously defined on the table. + +```bash +{ + "operation": "update", + "database": "dev", + "table": "dog", + "records": [ + {"id": 2, "weight_lbs": 35} + ] +} +``` + +Now we have a new attribute called `weight_lbs`. Our table now looks like this: + +**dev.dog** + +![](/img/v4.2/reference/dynamic\_schema\_5\_update\_existing\_record.png.webp) + +**Query Table with SQL** + +Now if we query for all records where `weight_lbs` is `null` we expect to get back two records. + +```bash +{ + "operation": "sql", + "sql": "SELECT * FROM dev.dog WHERE weight_lbs IS NULL" +} +``` + +This results in the expected two records being returned. + +![](/img/v4.2/reference/dynamic\_schema\_6\_query\_table\_with\_sql.png.webp) diff --git a/site/versioned_docs/version-4.2/technical-details/reference/globals.md b/site/versioned_docs/version-4.2/technical-details/reference/globals.md new file mode 100644 index 00000000..68623f59 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/reference/globals.md @@ -0,0 +1,80 @@ +--- +title: Globals +--- + +# Globals + +The primary way that JavaScript code can interact with HarperDB is through the global variables, which has several objects and classes that provide access to the tables, server hooks, and resources that HarperDB provides for building applications. As global variables, these can be directly accessed in any module. + +These global variables are also available through the `harperdb` module/package, which can provide better typing in TypeScript. To use this with your own directory, make sure you link the package to your current `harperdb` installation: + +```bash +npm link harperdb +``` + +The `harperdb` package is automatically linked for all installed components. Once linked, if you are using EcmaScript module syntax you can import function from `harperdb` like: + +```javascript +import { tables, Resource } from 'harperdb'; +``` + +Or if you are using CommonJS format for your modules: + +```javascript +const { tables, Resource } = require('harperdb'); +``` + +The global variables include: + +### `tables` + +This is an object that holds all the tables for the default database (called `data`) as properties. Each of these property values is a table class that subclasses the Resource interface and provides access to the table through the Resource interface. For example, you can get a record from a table (in the default database) called 'my-table' with: + +```javascript +import { tables } from 'harperdb'; +const { MyTable } = tables; +async function getRecord() { + let record = await MyTable.get(recordId); +} +``` + +It is recommended that you [define a schema](../../getting-started/) for all the tables that are required to exist in your application. This will ensure that the tables exist on the `tables` object. Also note that the property names follow a CamelCase convention for use in JavaScript and in the GraphQL Schemas, but these are translated to snake\_case for the actual table names, and converted back to CamelCase when added to the `tables` object. + +### `databases` + +This is an object that holds all the databases in HarperDB, and can be used to explicitly access a table by database name. Each database will be a property on this object, each of these property values will be an object with the set of all tables in that database. The default database, `databases.data` should equal the `tables` export. For example, if you want to access the "dog" table in the "dev" database, you could do so: + +```javascript +import { databases } from 'harperdb'; +const { Dog } = databases.dev; +``` + +### `Resource` + +This is the base class for all resources, including tables and external data sources. This is provided so that you can extend it to implement custom data source providers. See the [Resource API documentation](./resource) for more details about implementing a Resource class. + +### `auth(username, password?): Promise` + +This returns the user object with permissions/authorization information based on the provided username. If a password is provided, the password will be verified before returning the user object (if the password is incorrect, an error will be thrown). + +### `logger` + +This provides methods `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify` for logging. See the [logging documentation](../../administration/logging/standard-logging) for more information. + +### `server` + +This provides a number of functions and objects to interact with the server including: + +#### `server.config` + +This provides access to the HarperDB configuration object. This comes from the [harperdb-config.yaml](../../deployments/configuration) (parsed into object form). + +#### `server.recordAnalytics(value, metric, path?, method?, type?)` + +This records the provided value as a metric into HarperDB's analytics. HarperDB efficiently records and tracks these metrics and makes them available through [analytics API](./analytics). The values are aggregated and statistical information is computed when many operations are performed. The optional parameters can be used to group statistics. For the parameters, make sure you are not grouping on too fine of a level for useful aggregation. The parameters are: + +* `value` - This is a numeric value for the metric that is being recorded. This can be a value measuring time or bytes, for example. +* `metric` - This is the name of the metric. +* `path` - This is an optional path (like a URL path). For a URL like /my-resource/, you would typically include a path of "my-resource", not including the id so you can group by all the requests to "my-resource" instead of individually aggregating by each individual id. +* `method` - Optional method to group by. +* `type` - Optional type to group by. diff --git a/site/versioned_docs/version-4.2/technical-details/reference/headers.md b/site/versioned_docs/version-4.2/technical-details/reference/headers.md new file mode 100644 index 00000000..c58bb7ec --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/reference/headers.md @@ -0,0 +1,12 @@ +--- +title: HarperDB Headers +--- + +# HarperDB Headers + +All HarperDB API responses include headers that are important for interoperability and debugging purposes. The following headers are returned with all HarperDB API responses: + +| Key | Example Value | Description | +|-------------------|------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------| +| server-timing | db;dur=7.165 | This reports the duration of the operation, in milliseconds. This follows the standard for Server-Timing and can be consumed by network monitoring tools. | +| content-type | application/json | This reports the MIME type of the returned content, which is negotiated based on the requested content type in the Accept header. | diff --git a/site/versioned_docs/version-4.2/technical-details/reference/index.md b/site/versioned_docs/version-4.2/technical-details/reference/index.md new file mode 100644 index 00000000..e9a6ebf9 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/reference/index.md @@ -0,0 +1,16 @@ +--- +title: Reference +--- + +# Reference + +This section contains technical details and reference materials for HarperDB. + +* [Resource API](./resource) +* [Transactions](./transactions) +* [Storage Algorithm](./storage-algorithm) +* [Dynamic Schema](./dynamic-schema) +* [Headers](./headers) +* [Limitations](./limits) +* Content Types +* [Data Types](./data-types) diff --git a/site/versioned_docs/version-4.2/technical-details/reference/limits.md b/site/versioned_docs/version-4.2/technical-details/reference/limits.md new file mode 100644 index 00000000..ccad9d64 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/reference/limits.md @@ -0,0 +1,33 @@ +--- +title: HarperDB Limits +--- + +# HarperDB Limits + +This document outlines limitations of HarperDB. + +## Database Naming Restrictions + +**Case Sensitivity** + +HarperDB database metadata (database names, table names, and attribute/column names) are case sensitive. Meaning databases, tables, and attributes can differ only by the case of their characters. + +**Restrictions on Database Metadata Names** + +HarperDB database metadata (database names, table names, and attribute names) cannot contain the following UTF-8 characters: + +``` +/`¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ +``` + +Additionally, they cannot contain the first 31 non-printing characters. Spaces are allowed, but not recommended as best practice. The regular expression used to verify a name is valid is: + +``` +^[\x20-\x2E|\x30-\x5F|\x61-\x7E]*$ +``` + +## Table Limitations + +**Attribute Maximum** + +HarperDB limits the number of total indexed attributes across tables (including the primary key of each table) to 10,000 per database. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/reference/resource.md b/site/versioned_docs/version-4.2/technical-details/reference/resource.md new file mode 100644 index 00000000..708e8457 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/reference/resource.md @@ -0,0 +1,531 @@ +--- +title: Resource Class +--- + +# Resource Class + +## Resource Class + +The Resource class is designed to model different data resources within HarperDB. The Resource class can be extended to create new data sources. Resources can be exported to define endpoints. Tables themselves extend the Resource class, and can be extended by users. + +Conceptually, a Resource class provides an interface for accessing, querying, modifying, and monitoring a set of entities or records. Instances of a Resource class can represent a single record or entity, or a collection of records, at a given point in time, that you can interact with through various methods or queries. Resource instances can represent an atomic transactional view of a resource and facilitate transactional interaction. Therefore there are distinct resource instances created for every record or query that is accessed, and the instance methods are used for interaction with the data. + +The RESTful HTTP server and other server interfaces will instantiate/load resources to fulfill incoming requests so resources can be defined as endpoints for external interaction. When resources are used by the server interfaces, they will be executed in transaction and the access checks will be performed before the method is executed. + +Paths (URL, MQTT topics) are mapped to different resource instances. Using a path that does specify an ID like `/MyResource/3492` will be mapped to a Resource instance where the instance's ID will be `3492`, and interactions will use the instance methods like `get()`, `put()`, and `post()`. Using the root path (`/MyResource/`) will map to a Resource instance with an ID of `null`. + +You can create classes that extend Resource to define your own data sources, typically to interface with external data sources (the Resource base class is available as a global variable in the HarperDB JS environment). In doing this, you will generally be extending and providing implementations for the instance methods below. For example: + +```javascript +export class MyExternalData extends Resource { + get() { + / fetch data from an external source, using our primary key + this.fetch(this.id) + } + put(data) { + / send the data into the external source + } + delete() { + / delete an entity in the external data source + } + subscribe(options) { + / if the external data source is capable of real-time notification of changes, can subscribe + } +} +/ we can export this class from resources.json as our own endpoint, or use this as the source for +/ a HarperDB data to store and cache the data coming from this data source: +tables.MyCache.sourcedFrom(MyExternalData); +``` + +You can also extend table classes in the same way, overriding the instance methods for custom functionality. The `tables` object is a global variable in the HarperDB JavaScript environment, along with `Resource`: + +```javascript +export class MyTable extends tables.MyTable { + get() { + / we can add properties or change properties before returning data: + this.newProperty = 'newValue'; + this.existingProperty = 44; + return super.get(); / returns the record, modified with the changes above + } + put(data) { + / can change data any way we want + super.put(data); + } + delete() { + super.delete(); + } + post(data) { + / providing a post handler (for HTTP POST requests) is a common way to create additional + / actions that aren't well described with just PUT or DELETE + } +} +``` +Make sure that if are extending and `export`ing your table with this class, that you remove the `@export` directive in the your schema, so that you aren't exporting the same table/class twice. + +## Global Variables + +### `tables` + +This is an object with all the tables in the default database (the default database is "data"). Each table that has been declared or created will be available as a (standard) property on this object, and the value will be the table class that can be used to interact with that table. The table classes implement the Resource API. + +### `databases` + +This is an object with all the databases that have been defined in HarperDB (in the running instance). Each database that has been declared or created will be available as a (standard) property on this object. The property values are an object with the tables in that database, where each property is a table, like the `tables` object. In fact, `databases.data === tables` should always be true. + +### `Resource` + +This is the Resource base class. This can be directly extended for custom resources, and is the base class for all tables. + +### `server` + +This object provides extension points for extension components that wish to implement new server functionality (new protocols, authentication, etc.). See the [extensions documentation for more information](../../developers/components/writing-extensions). + +### `transaction` + +This provides a function for starting transactions. See the transactions section below for more information. + +### `contentTypes` + +This provides an interface for defining new content type handlers. See the [content type extensions documentation](../../developers/components/writing-extensions) for more information. + +### TypeScript Support + +While these objects/methods are all available as global variables, it is easier to get TypeScript support (code assistance, type checking) for these interfaces by explicitly `import`ing them. This can be done by setting up a package link to the main HarperDB package in your app: + +``` +# you may need to go to your harperdb directory and set it up as a link first +npm link harperdb +``` + +And then you can import any of the main HarperDB APIs you will use, and your IDE should understand the full typings associated with them: + +``` +import { databases, tables, Resource } from 'harperdb'; +``` + +## Resource Class (Instance) Methods + +### Properties/attributes declared in schema + +Properties that have been defined in your table's schema can be accessed and modified as direct properties on the Resource instances. + +### `get(queryOrProperty?)`: Resource|AsyncIterable + +This is called to return the record or data for this resource, and is called by HTTP GET requests. This may be optionally called with a `query` object to specify a query should be performed, or a string to indicate that the specified property value should be returned. When defining Resource classes, you can define or override this method to define exactly what should be returned when retrieving a record. The default `get` method (`super.get()`) returns the current record as a plain object. + +The query object can be used to access any query parameters that were included in the URL. For example, with a request to `/my-resource/some-id?param1=value`, we can access URL/request information: + +```javascript +get(query) { + / note that query will only exist (as an object) if there is a query string + let param1 = query?.get?.('param1'); / returns 'value' + let id = this.getId(); / returns 'some-id' + ... +} +``` +If `get` is called for a single record (for a request like `/Table/some-id`), the default action is to return `this` instance of the resource. If `get` is called on a collection (`/Table/?name=value`), the default action is to `search` and return an AsyncIterable of results. + +### `search(query: Query)`: AsyncIterable + +By default this is called by `get(query)` from a collection resource. + +### `getId(): string|number|Array` + +Returns the primary key value for this resource. + +### `put(data: object)` + +This will assign the provided record or data to this resource, and is called for HTTP PUT requests. You can define or override this method to define how records should be updated. The default `put` method on tables (`super.put(data)`) writes the record to the table (updating or inserting depending on if the record previously existed) as part of the current transaction for the resource instance. + +### `patch(data: object)` + +This will update the existing record with the provided data's properties, and is called for HTTP PATCH requests. You can define or override this method to define how records should be updated. The default `patch` method on tables (`super.patch(data)`) updates the record. The properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `data` object. This is performed as part of the current transaction for the resource instance. + +### `update(data: object, fullUpdate: boolean?)` + +This is called by the default `put` and `patch` handlers to update a record. `put` calls with `fullUpdate` as `true` to indicate a full record replacement (`patch` calls it with the second argument as `false`). Any additional property changes that are made before the transaction commits will also be persisted. + +### `delete(queryOrProperty?)` + +This will delete this record or resource, and is called for HTTP DELETE requests. You can define or override this method to define how records should be deleted. The default `delete` method on tables (`super.put(record)`) deletes the record from the table as part of the current transaction. + +### `publish(message)` + +This will publish a message to this resource, and is called for MQTT publish commands. You can define or override this method to define how messages should be published. The default `publish` method on tables (`super.publish(message)`) records the published message as part of the current transaction; this will not change the data in the record but will notify any subscribers to the record/topic. + +### `post(data)` + +This is called for HTTP POST requests. You can define this method to provide your own implementation of how POST requests should be handled. Generally this provides a generic mechanism for various types of data updates. + +### `invalidate()` + +This method is available on tables. This will invalidate the current record in the table. This can be used with a caching table and is used to indicate that the source data has changed, and the record needs to be reloaded when next accessed. + +### `subscribe(subscriptionRequest): Promise` + +This will subscribe to the current resource, and is called for MQTT subscribe commands. You can define or override this method to define how subscriptions should be handled. The default `subscribe` method on tables (`super.publish(message)`) will set up a listener that will be called for any changes or published messages to this resource. + +The returned (promise resolves to) Subscription object is an `AsyncIterable` that you can use a `for await` to iterate through. It also has a `queue` property which holds (an array of) any messages that are ready to be delivered immediately (if you have specified a start time, previous count, or there is a message for the current or "retained" record, these may be immediately returned). + +The `subscriptionRequest` object supports the following properties (all optional): + +* `id` - The primary key of the record (or topic) that you want to subscribe to. If omitted, this will be a subscription to the whole table. +* `isCollection` - If this is enabled and the `id` was included, this will create a subscription to all the record updates/messages that are prefixed with the id. For example, a subscription request of `{id:'sub', isCollection: true}` would return events for any update with an id/topic of the form sub/\* (like `sub/1`). +* `startTime` - This will begin the subscription at a past point in time, returning all updates/messages since the start time (a catch-up of historical messages). This can be used to resume a subscription, getting all messages since the last subscription. +* `previousCount` - This specifies the number of previous updates/messages to deliver. For example, `previousCount: 10` would return the last ten messages. Note that `previousCount` can not be used in conjunction with `startTime`. +* `omitCurrent` - Indicates that the current (or retained) record should _not_ be immediately sent as the first update in the subscription (if no `startTime` or `previousCount` was used). By default, the current record is sent as the first update. + +### `connect(incomingMessages?: AsyncIterable): AsyncIterable` + +This is called when a connection is received through WebSockets or Server Sent Events (SSE) to this resource path. This is called with `incomingMessages` as an iterable stream of incoming messages when the connection is from WebSockets, and is called with no arguments when the connection is from a SSE connection. This can return an asynchronous iterable representing the stream of messages to be sent to the client. + +### `set(property, value)` + +This will assign the provided value to the designated property in the resource's record. During a write operation, this will indicate that the record has changed and the changes will be saved during commit. During a read operation, this will modify the copy of the record that will be serialized during serialization (converted to the output format of JSON, MessagePack, etc.). + +### `allowCreate(user)` + +This is called to determine if the user has permission to create the current resource. This is called as part of external incoming requests (HTTP). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's insert permission to the table. + +### `allowRead(user)` + +This is called to determine if the user has permission to read from the current resource. This is called as part of external incoming requests (HTTP GET). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's read permission to the table. + +### `allowUpdate(user)` + +This is called to determine if the user has permission to update the current resource. This is called as part of external incoming requests (HTTP PUT). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's update permission to the table. + +### `allowDelete(user)` + +This is called to determine if the user has permission to delete the current resource. This is called as part of external incoming requests (HTTP DELETE). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's delete permission to the table. + +### `getUpdatedTime(): number` + +This returns the last updated time of the resource (timestamp of last commit). This is returned as milliseconds from epoch. + +### `wasLoadedFromSource(): boolean` + +Indicates if the record had been loaded from source. When using caching tables, this indicates that there was a cache miss and the data had to be loaded from the source (or waiting on an inflight request from the source to finish). + +### `getContext(): Context` + +Returns the context for this resource. The context contains information about the current transaction, the user that initiated this action, and other metadata that should be retained through the life of an action. + +#### `Context` + +The `Context` object has the following (potential) properties: + +* `user` - This is the user object, which includes information about the username, role, and authorizations. +* `transaction` - The current transaction If the current method was triggered by an HTTP request, the following properties are available: +* `lastModified` - This value is used to indicate the last modified or updated timestamp of any resource(s) that are accessed and will inform the response's `ETag` (or `Last-Modified`) header. This can be updated by application code if it knows that modification should cause this timestamp to be updated. + +When a resource gets a request through HTTP, the request object is the context, which has the following properties: + +* `url` - The local path/URL of the request (this will not include the protocol or host name, but will start at the path and includes the query string). +* `method` - The method of the HTTP request. +* `headers` - This is an object with the headers that were included in the HTTP request. You can access headers by calling `context.headers.get(headerName)`. +* `responseHeaders` - This is an object with the headers that will be included in the HTTP response. You can set headers by calling `context.responseHeaders.set(headerName, value)`. +* `pathname` - This provides the path part of the URL (no querystring). +* `host` - This provides the host name of the request (from the `Host` header). +* `ip` - This provides the ip address of the client that made the request. + +When a resource is accessed as a data source: + +* `requestContext` - For resources that are acting as a data source for another resource, this provides access to the context of the resource that is making a request for data from the data source resource. + +### `operation(operationObject: Object, authorize?: boolean): Promise` + +This method is available on tables and will execute a HarperDB operation, using the current table as the target of the operation (the `table` and `database` do not need to be specified). See the [operations API](https:/api.harperdb.io/) for available operations that can be performed. You can set the second argument to `true` if you want the current user to be checked for authorization for the operation (if `true`, will throw an error if they are not authorized). + +### `allowStaleWhileRevalidate(entry: { version: number, localTime: number, expiresAt: number, value: object }, id): boolean` + +For caching tables, this can be defined to allow stale entries to be returned while revalidation is taking place, rather than waiting for revalidation. The `version` is the timestamp/version from the source, the `localTime` is when the resource was last refreshed, the `expiresAt` is when the resource expired and became stale, and the `value` is the last value (the stale value) of the record/resource. All times are in milliseconds since epoch. Returning `true` will allow the current stale value to be returned while revalidation takes place concurrently. Returning `false` will cause the response to wait for the data source or origin to revalidate or provide the latest value first, and then return the latest value. + +## Resource Static Methods and Properties + +The Resource class also has static methods that mirror the instance methods with an initial argument that is the id of the record to act on. The static methods are generally the preferred and most convenient method for interacting with tables outside of methods that are directly extending a table. + +The get, put, delete, subscribe, and connect methods all have static equivalents. There is also a `static search()` method for specifically handling searching a table with query parameters. By default, the Resource static methods default to calling the instance methods. Again, generally static methods are the preferred way to interact with resources and call them from application code. These methods are available on all user Resource classes and tables. + +### `get(id: Id, context?: Resource|Context)` + +This will retrieve a resource instance by id. For example, if you want to retrieve comments by id in the retrieval of a blog post you could do: + +```javascript +const { MyTable } = tables; +... +/ in class: + async get() { + for (let commentId of this.commentIds) { + let comment = await Comment.get(commentId, this); + / now you can do something with the comment record + } + } +``` + +Type definition for `Id`: +``` +Id = string|number|array +``` + +### `put(record: object, context?: Resource|Context): Promise` +### `put(id: Id, record: object, context?: Resource|Context): Promise` + +This will save the provided record or data to this resource. This will fully replace the existing record. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `patch(recordUpdate: object, context?: Resource|Context): Promise` +### `patch(id: Id, recordUpdate: object, context?: Resource|Context): Promise` + +This will save the provided updates to the record. The `recordUpdate` object's properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `recordUpdate` object. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `delete(id: Id, context?: Resource|Context): Promise` + +Deletes this resource's record or data. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `publish(message: object, context?: Resource|Context): Promise` +### `publish(topic: Id, message: object, context?: Resource|Context): Promise` + +Publishes the given message to the record entry specified by the id in the context. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `subscribe(subscriptionRequest, context?: Resource|Context): Promise` + +Subscribes to a record/resource. + +### `search(query: Query, context?: Resource|Context): AsyncIterable` + +This will perform a query on this table or collection. The query parameter can be used to specify the desired query. + +### `primaryKey` + +This property indicates the name of the primary key attribute for a table. You can get the primary key for a record using this property name. For example: + +``` +let record34 = await Table.get(34); +record34[Table.primaryKey] -> 34 +``` + +There are additional methods that are only available on table classes (which are a type of resource). + +### `Table.sourcedFrom(Resource, options)` + +This defines the source for a table. This allows a table to function as a cache for an external resource. When a table is configured to have a source, any request for a record that is not found in the table will be delegated to the source resource to retrieve and the result will be cached/stored in the table. All writes to the table will also first be delegated to the source (if the source defines write functions like `put`, `delete`, etc.). The options parameter can include an `expiration` property that will configure the table with a time-to-live expiration window for automatic deletion or invalidation of older entries. + +If the source resource implements subscription support, real-time invalidation can be performed to ensure the cache is guaranteed to be fresh (and this can eliminate or reduce the need for time-based expiration of data). + +### `parsePath(path, context, query) {` + +This is called by static methods when they are responding to a URL (from HTTP request, for example), and translates the path to an id. By default, this will convert a multi-segment path to multipart id (an array), which facilitates hierarchical id-based data access, and also parses `.property` suffixes for accessing properties and specifying preferred content type in the URL. However, in some situations you may wish to preserve the path directly as a string. You can override `parsePath` for simpler path to id preservation: + +```javascript + static parsePath(path) { + return path; / return the path as the id + } +``` + +### `isCollection(resource: Resource): boolean` +This returns a boolean indicating if the provide resource instance represents a collection (can return a query result) or a single record/entity. + +### Context and Transactions + +Whenever you implement an action that is calling other resources, it is recommended that you provide the "context" for the action. This allows a secondary resource to be accessed through the same transaction, preserving atomicity and isolation. + +This also allows timestamps that are accessed during resolution to be used to determine the overall last updated timestamp, which informs the header timestamps (which facilitates accurate client-side caching). The context also maintains user, session, and request metadata information that is communicated so that contextual request information (like headers) can be accessed and any writes are properly attributed to the correct user. + +When using an export resource class, the REST interface will automatically create a context for you with a transaction and request metadata, and you can pass this to other actions by simply including `this` as the source argument (second argument) to the static methods. + +For example, if we had a method to post a comment on a blog, and when this happens we also want to update an array of comment IDs on the blog record, but then add the comment to a separate comment table. We might do this: + +```javascript +const { Comment } = tables; + +export class BlogPost extends tables.BlogPost { + post(comment) { + / add a comment record to the comment table, using this resource as the source for the context + Comment.put(comment, this); + this.comments.push(comment.id); / add the id for the record to our array of comment ids + / Both of these actions will be committed atomically as part of the same transaction + } +} +``` + +Please see the [transaction documentation](./transactions) for more information on how transactions work in HarperDB. + +### Query + +The `get`/`search` methods accept a Query object that can be used to specify a query for data. The query is an object that has the following properties, which are all optional: + +* `conditions`: This is an array of objects that specify the conditions to use the match records (if conditions are omitted or it is an empty array, this is a search for everything in the table). Each condition object has the following properties: + * `attribute`: Name of the property/attribute to match on. + * `value`: The value to match. + * `comparator`: This can specify how the value is compared. This defaults to "equals", but can also be "greater\_than", "greater\_than\_equal", "less\_than", "less\_than\_equal", "starts\_with", "contains", "ends\_with", "between", and "not_equal". +* `operator`: Specifies if the conditions should be applied as an `"and"` (records must match all conditions), or as an "or" (records must match at least one condition). This defaults to `"and"`. +* `limit`: This specifies the limit of the number of records that should be returned from the query. +* `offset`: This specifies the number of records that should be skipped prior to returning records in the query. This is often used with `limit` to implement "paging" of records. +* `select`: This specifies the specific properties that should be included in each record that is returned. This can be a string value, to specify that the value of the specified property should be returned for each iteration/element in the results. This can be an array, to specify a set of properties that should be included in the returned objects. The array can specify an `select.asArray = true` property and the query results will return a set of arrays of values of the specified properties instead of objects; this can be used to return more compact results. + +The query results are returned as an `AsyncIterable`. In order to access the elements of the query results, you must use a `for await` loop (it does _not_ return an array, you can not access the results by index). + +For example, we could do a query like: + +```javascript +let { Product } = tables; +let results = Product.search({ + conditions: [ + { attribute: 'rating', value: 4.5, comparator: 'greater_than' }, + { attribute: 'price', value: 100, comparator: 'less_than' }, + ], + offset: 20, + limit: 10, + select: ['id', 'name', 'price', 'rating'], +}) +for await (let record of results) { + / iterate through each record in the query results +} +``` +`AsyncIterable`s can be returned from resource methods, and will be properly serialized in responses. When a query is performed, this will open/reserve a read transaction until the query results are iterated, either through your own `for await` loop or through serialization. Failing to iterate the results this will result in a long-lived read transaction which can degrade performance (including write performance), and may eventually be aborted. + +### Interacting with the Resource Data Model + +When extending or interacting with table resources, when a resource instance is retrieved and instantiated, it will be loaded with the record data from its table. You can interact with this record through the resource instance. For any properties that have been defined in the table's schema, you can direct access or modify properties through standard property syntax. For example, let's say we defined a product schema: + +```graphql +type Product @table { + id: ID @primaryKey + name: String + rating: Int + price: Float +} +``` + +If we have extended this table class with our get() we can interact with any these specified attributes/properties: + +```javascript +export class CustomProduct extends Product { + get(query) { + let name = this.name; / this is the name of the current product + let rating = this.rating; / this is the rating of the current product + this.rating = 3 / we can also modify the rating for the current instance + / (with a get this won't be saved by default, but will be used when serialized) + return super.get(query); + } +} +``` + +Likewise, we can interact with resource instances in the same way when retrieving them through the static methods: + +```javascript +let product1 = await Product.get(1); +let name = product1.name; / this is the name of the product with a primary key of 1 +let rating = product1.rating; / this is the rating of the product with a primary key of 1 +product1.rating = 3 / modify the rating for this instance (this will be saved without a call to update()) + +``` + +If there are additional properties on (some) products that aren't defined in the schema, we can still access them through the resource instance, but since they aren't declared, there won't be getter/setter definition for direct property access, but we can access properties with the `get(propertyName)` method and modify properties with the `set(propertyName, value)` method: + +```javascript +let product1 = await Product.get(1); +let additionalInformation = product1.get('additionalInformation'); / get the additionalInformation property value even though it isn't defined in the schema +product1.set('newProperty', 'some value'); / we can assign any properties we want with set +``` + +And likewise, we can do this in an instance method, although you will probably want to use super.get()/set() so you don't have to write extra logic to avoid recursion: + +```javascript +export class CustomProduct extends Product { + get(query) { + let additionalInformation = super.get('additionalInformation'); / get the additionalInformation property value even though it isn't defined in the schema + super.set('newProperty', 'some value'); / we can assign any properties we want with set + } +} +``` + +Note that you may also need to use `get`/`set` for properties that conflict with existing method names. For example, your schema defines an attribute called `getId` (not recommended), you would need to access that property through `get('getId')` and `set('getId', value)`. + +If you want to save the changes you make, you can call the \`update()\`\` method: + +```javascript +let product1 = await Product.get(1); +product1.rating = 3; +product1.set('newProperty', 'some value'); +product1.update(); / save both of these property changes +``` + +Updates are automatically saved inside modifying methods like put and post: + +```javascript +export class CustomProduct extends Product { + post(data) { + this.name = data.name; + this.set('description', data.description); + / both of these changes will be saved automatically as this transaction commits + } +} +``` + +We can also interact with properties in nested objects and arrays, following the same patterns. For example we could define more complex types on our product: + +```graphql +type Product @table { + id: ID @primaryKey + name: String + rating: Int + price: Float + brand: Brand; + variations: [Variation]; +} +type Brand { + name: String +} +type Variation { + name: String + price: Float +} +``` + +We can interact with these nested properties: + +```javascript +export class CustomProduct extends Product { + post(data) { + let brandName = this.brand.name; + let firstVariationPrice = this.variations[0].price; + let additionalInfoOnBrand = this.brand.get('additionalInfo'); / not defined in schema, but can still try to access property + / make some changes + this.variations.splice(0, 1); / remove first variation + this.variations.push({ name: 'new variation', price: 9.99 }); / add a new variation + this.brand.name = 'new brand name'; + / all these change will be saved + } +} +``` + +If you need to delete a property, you can do with the `delete` method: + +```javascript +let product1 = await Product.get(1); +product1.delete('additionalInformation'); +product1.update(); +``` + +You can also get "plain" object representation of a resource instance by calling `toJSON`, which will return a simple object with all the properties (whether defined in the schema) as direct normal properties: + +```javascript +let product1 = await Product.get(1); +let plainObject = product1.toJSON(); +for (let key in plainObject) { + / can iterate through the properties of this record +} +``` + +### Throwing Errors + +You may throw errors (and leave them uncaught) from the response methods and these should be caught and handled by protocol the handler. For REST requests/responses, this will result in an error response. By default the status code will be 500. You can assign a property of `statusCode` to errors to indicate the HTTP status code that should be returned. For example: + +```javascript +if (notAuthorized()) { + let error = new Error('You are not authorized to access this'); + error.statusCode = 403; + throw error; +} +``` diff --git a/site/versioned_docs/version-4.2/technical-details/reference/storage-algorithm.md b/site/versioned_docs/version-4.2/technical-details/reference/storage-algorithm.md new file mode 100644 index 00000000..024109a5 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/reference/storage-algorithm.md @@ -0,0 +1,27 @@ +--- +title: Storage Algorithm +--- + +# Storage Algorithm + +The HarperDB storage algorithm is fundamental to the HarperDB core functionality, enabling the [Dynamic Schema](./dynamic-schema) and all other user-facing functionality. HarperDB is built on top of Lightning Memory-Mapped Database (LMDB), a key-value store offering industry leading performance and functionality, which allows for our storage algorithm to store data in tables as rows/objects. This document will provide additional details on how data is stored within HarperDB. + +## Query Language Agnostic + +The HarperDB storage algorithm was designed to abstract the data storage from any individual query language. HarperDB currently supports both SQL and NoSQL on top of this storage algorithm, with the ability to add additional query languages in the future. This means data can be inserted via NoSQL and read via SQL while hitting the same underlying data storage. + +## ACID Compliant + +Utilizing Multi-Version Concurrency Control (MVCC) through LMDB, HarperDB offers ACID compliance independently on each node. Readers and writers operate independently of each other, meaning readers don’t block writers and writers don’t block readers. Each HarperDB table has a single writer process, avoiding deadlocks and assuring that writes are executed in the order in which they were received. HarperDB tables can have multiple reader processes operating at the same time for consistent, high scale reads. + +## Universally Indexed + +All top level attributes are automatically indexed immediately upon ingestion. The [HarperDB Dynamic Schema](./dynamic-schema) reflexively creates both the attribute and index reflexively as new schema metadata comes in. Indexes are agnostic of datatype, honoring the following order: booleans, numbers ordered naturally, strings ordered lexically. Within the LMDB implementation, table records are grouped together into a single LMDB environment file, where each attribute index is a sub-database (dbi) inside said environment file. An example of the indexing scheme can be seen below. + +## Additional LMDB Benefits + +HarperDB inherits both functional and performance benefits by implementing LMDB as the underlying key-value store. Data is memory-mapped, which enables quick data access without data duplication. All writers are fully serialized, making writes deadlock-free. LMDB is built to maximize operating system features and functionality, fully exploiting buffer cache and built to run in CPU cache. To learn more about LMDB, visit their documentation. + +## HarperDB Indexing Example (Single Table) + +![](/img/v4.2/reference/HarperDB-3.0-Storage-Algorithm.png.webp) diff --git a/site/versioned_docs/version-4.2/technical-details/reference/transactions.md b/site/versioned_docs/version-4.2/technical-details/reference/transactions.md new file mode 100644 index 00000000..8dbb70ca --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/reference/transactions.md @@ -0,0 +1,40 @@ +--- +title: Transactions +--- + +# Transactions + +Transactions are an important part of robust handling of data in data-driven applications. HarperDB provides ACID-compliant support for transactions, allowing for guaranteed atomic, consistent, and isolated data handling within transactions, with durability guarantees on commit. Understanding how transactions are tracked and behave is important for properly leveraging transactional support in HarperDB. For most operations this is very intuitive, each HTTP request is executed in a transaction, so when multiple actions are executed in a single request, they are normally automatically included in the same transaction. + +Transactions span a database. Once a read snapshot is started, it is an atomic snapshot of all the tables in a database. And writes that span multiple tables in the database will all be committed atomically together (no writes in one table will be visible before writes in another table in the same database). If a transaction is used to access or write data in multiple databases, there will actually be a separate database transaction used for each database, and there is no guarantee of atomicity between separate transactions in separate databases. This can be an important consideration when deciding if and how tables should be organized into different databases. + +Because HarperDB is designed to be a low-latency distributed database, locks are avoided in data handling. Because of this, transactions do not lock data within the transaction. When a transaction starts, it will provide a read snapshot of the database for any retrievals or queries, which means all reads will be performed on a single version of the database isolated from any other writes that are concurrently taking place. And within a transaction all writes are aggregated and atomically written on commit. These writes are all isolated (from other transactions) until committed, and all become visible atomically. However, because transactions are non-locking, it is possible that writes from other transactions may occur between when reads are performed and when the writes are committed (at which point the last write will win for any records that have been written concurrently). Support for locks in transactions is planned for a future release. + +Transactions can also be explicitly started using the `transaction` global function that is provided in the HarperDB environment: + +## `transaction(context?, callback: (transaction) => any): Promise` + +This executes the callback in a transaction, providing a context that can be used for any resource methods that are called. This returns a promise for when the transaction has been committed. The callback itself may be asynchronous (return a promise), allowing for asynchronous activity within the transaction. This is useful for starting a transaction when your code is not already running within a transaction (in an HTTP request handler, a transaction will typically already be started). For example, if we wanted to run an action on a timer that periodically loads data, we could ensure that the data is loaded in single transactions like this (note that HDB is multi-threaded and if we do a timer-based job, we very likely want it to only run in one thread): + +```javascript +import { tables } from 'harperdb'; +const { MyTable } = tables; +if (isMainThread) / only on main thread + setInterval(async () => { + let someData = await (await fetch(... some URL ...)).json(); + transaction((txn) => { + for (let item in someData) { + MyTable.put(item, txn); + } + }); + }, 3600000); / every hour +``` + +You can provide your own context object for the transaction to attach to. If you call `transaction` with a context that already has a transaction started, it will simply use the current transaction, execute the callback and immediately return (this can be useful for ensuring that a transaction has started). + +Once the transaction callback is completed (for non-nested transaction calls), the transaction will commit, and if the callback throws an error, the transaction will abort. However, the callback is called with the `transaction` object, which also provides the following methods and property: + +* `commit(): Promise` - Commits the current transaction. The transaction will be committed once the returned promise resolves. +* `abort(): void` - Aborts the current transaction and resets it. +* `resetReadSnapshot(): void` - Resets the read snapshot for the transaction, resetting to the latest data in the database. +* `timestamp: number` - This is the timestamp associated with the current transaction. diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/End-of-Life.md b/site/versioned_docs/version-4.2/technical-details/release-notes/End-of-Life.md new file mode 100644 index 00000000..ca15f713 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/End-of-Life.md @@ -0,0 +1,14 @@ +--- +title: HarperDB Software Lifecycle Schedules +--- + +# HarperDB Software Lifecycle Schedules + +The lifecycle schedules below form a part of HarperDB’s Support Policies. They include Major Releases and Minor Release that have reached their end of life date in the past 3 years. + +| **Release** | **Release Date** | **End of Life Date** | +|-------------|------------------|----------------------| +| 3.2 | 6/22 | 6/25 | +| 3.3 | 9/22 | 9/25 | +| 4.0 | 1/23 | 1/26 | +| 4.1 | 4/23 | 4/26 | diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/index.md b/site/versioned_docs/version-4.2/technical-details/release-notes/index.md new file mode 100644 index 00000000..f44555ef --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/index.md @@ -0,0 +1,99 @@ +--- +title: Release Notes +--- + +# Release Notes + +### Current Release + +[Meet Tucker](../../technical-details/release-notes/v4-tucker) Our 4th Release Pup + +[4.2.8 Tucker](./v4-tucker/4.2.8) + +[4.2.7 Tucker](./v4-tucker/4.2.7) + +[4.2.6 Tucker](./v4-tucker/4.2.6) + +[4.2.5 Tucker](./v4-tucker/4.2.5) + +[4.2.4 Tucker](./v4-tucker/4.2.4) + +[4.2.3 Tucker](./v4-tucker/4.2.3) + +[4.2.2 Tucker](./v4-tucker/4.2.2) + +[4.2.1 Tucker](./v4-tucker/4.2.1) + +[4.2.0 Tucker](./v4-tucker/4.2.0) + +[4.1.2 Tucker](./v4-tucker/4.1.2) + +[4.1.1 Tucker](./v4-tucker/4.1.1) + +[4.1.0 Tucker](./v4-tucker/4.1.0) + +[4.0.7 Tucker](./v4-tucker/4.0.7) + +[4.0.6 Tucker](./v4-tucker/4.0.6) + +[4.0.5 Tucker](./v4-tucker/4.0.5) + +[4.0.4 Tucker](./v4-tucker/4.0.4) + +[4.0.3 Tucker](./v4-tucker/4.0.3) + +[4.0.2 Tucker](./v4-tucker/4.0.2) + +[4.0.1 Tucker](./v4-tucker/4.0.1) + +[4.0.0 Tucker](./v4-tucker/4.0.0) + +### Past Releases + +[Meet Monkey](../../technical-details/release-notes/v3-monkey) Our 3rd Release Pup + +[3.2.1 Monkey](./v3-monkey/3.2.1) + +[3.2.0 Monkey](./v3-monkey/3.2.0) + +[3.1.5 Monkey](./v3-monkey/3.1.5) + +[3.1.4 Monkey](./v3-monkey/3.1.4) + +[3.1.3 Monkey](./v3-monkey/3.1.3) + +[3.1.2 Monkey](./v3-monkey/3.1.2) + +[3.1.1 Monkey](./v3-monkey/3.1.1) + +[3.1.0 Monkey](./v3-monkey/3.1.0) + +[3.0.0 Monkey](./v3-monkey/3.0.0) + +*** + +[Meet Penny](../../technical-details/release-notes/v2-penny) Our 2nd Release Pup + +[2.3.1 Penny](./v2-penny/2.3.1) + +[2.3.0 Penny](./v2-penny/2.3.0) + +[2.2.3 Penny](./v2-penny/2.2.3) + +[2.2.2 Penny](./v2-penny/2.2.2) + +[2.2.0 Penny](./v2-penny/2.2.0) + +[2.1.1 Penny](./v2-penny/2.1.1) + +*** + +[Meet Alby](../../technical-details/release-notes/v1-alby) Our 1st Release Pup + +[1.3.1 Alby](./v1-alby/1.3.1) + +[1.3.0 Alby](./v1-alby/1.3.0) + +[1.2.0 Alby](./v1-alby/1.2.0) + +[1.1.0 Alby](./v1-alby/1.1.0) diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/1.1.0.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/1.1.0.md new file mode 100644 index 00000000..b42514a2 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/1.1.0.md @@ -0,0 +1,77 @@ +--- +title: 1.1.0 +sidebar_position: 89899 +--- + +### HarperDB 1.1.0, Alby Release +4/18/2018 + +**Features** + +* Users & Roles: + + * Limit/Assign access to all HarperDB operations + + * Limit/Assign access to schemas, tables & attributes + + * Limit/Assign access to specific SQL operations (`INSERT`, `UPDATE`, `DELETE`, `SELECT`) + +* Enhanced SQL parser + + * Added extensive ANSI SQL Support. + + * Added Array function, which allows for converting relational data into Object/Hierarchical data + + * `Distinct_Array` Function: allows for removing duplicates in the Array function. + + * Enhanced SQL Validation: Improved validation around structure of SQL, validating the schema, etc.. + + * 10x performance improvement on SQL statements. + +* Export Function: can now call a NoSQL/SQL search and have it export to CSV or JSON. + +* Added upgrade function to CLI + +* Added ability to perform bulk update from CSV + +* Created landing page for HarperDB. + +* Added CORS support to HarperDB + +**Fixes** + +* Fixed memory leak in CSV bulk loads + +* Corrected error when attempting to perform a `SQL DELETE` + +* Added further validation to NoSQL `UPDATE` to validate schema & table exist + +* Fixed install issue occurring when part of the install path does not exist, the install would silently fail. + +* Fixed issues with replicated data when one of the replicas is down + +* Removed logging of initial user’s credentials during install + +* Can now use reserved words as aliases in SQL + +* Removed user(s) password in results when calling `list_users` + +* Corrected forwarding of operations to other nodes in a cluster + +* Corrected lag in schema meta-data passing to other nodes in a cluster + +* Drop table & schema now move the table & schema or table to the trash folder under the Database folder for later permanent deletion. + +* Bulk inserts no longer halt the entire operation if n records already exist, instead the return includes the hashes of records that have been skipped. + +* Added ability to accept EULA from command line + +* Corrected `search_by_value` not searching on the correct attribute + +* Added ability to increase the timeout of a request by adding `SERVER_TIMEOUT_MS` to config/settings.js + +* Add error handling resulting from SQL calculations. + +* Standardized error responses as JSON. + +* Corrected internal process generation to not allow more processes than machine has cores. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/1.2.0.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/1.2.0.md new file mode 100644 index 00000000..095bf239 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/1.2.0.md @@ -0,0 +1,42 @@ +--- +title: 1.2.0 +sidebar_position: 89799 +--- + +### HarperDB 1.2.0, Alby Release +7/10/2018 + +**Features** + +* Time to Live: Conserve the resources of your edge device by setting data on devices to live for a specific period of time. +* Geo: HarperDB has implemented turf.js into its SQL parser to enable geo based analytics. +* Jobs: CSV Data loads, Exports & Time to Live now all run as back ground jobs. +* Exports: Perform queries that export into JSON or CSV and save to disk or S3. + + +**Fixes** + +* Fixed issue where CSV data loads incorrectly report number of records loaded. +* Added validation to stop `BETWEEN` operations in SQL. +* Updated logging to not include internal variables in the logs. +* Cleaned up `add_role` response to not include internal variables. +* Removed old and unused dependencies. +* Build out further unit tests and integration tests. +* Fixed https to handle certificates properly. +* Improved stability of clustering & replication. +* Corrected issue where Objects and Arrays were not casting properly in `SQL SELECT` response. +* Fixed issue where Blob text was not being returned from `SQL SELECT`s. +* Fixed error being returned when querying on table with no data, now correctly returns empty array. +* Improved performance in SQL when searching on exact values. +* Fixed error when ./harperdb stop is called. +* Fixed logging issue causing instability in installer. +* Fixed `read_log` operation to accept date time. +* Added permissions checking to `export_to_s3`. +* Added ability to run SQL on `SELECT` without a `FROM`. +* Fixed issue where updating a user’s password was not encrypting properly. +* Fixed `user_guide.html` to point to readme on git repo. +* Created option to have HarperDB run as a foreground process. +* Updated `user_info` to return the correct role for a user. +* Fixed issue where HarperDB would not stop if the database root was deleted. +* Corrected error message on insert if an invalid schema is provided. +* Added permissions checks for user & role operations. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/1.3.0.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/1.3.0.md new file mode 100644 index 00000000..ad196159 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/1.3.0.md @@ -0,0 +1,27 @@ +--- +title: 1.3.0 +sidebar_position: 89699 +--- + +### HarperDB 1.3.0, Alby Release +11/2/2018 + +**Features** + +* Upgrade: Upgrade to newest version via command line. +* SQL Support: Added `IS NULL` for SQL parser. +* Added attribute validation to search operations. + + +**Fixes** + +* Fixed `SELECT` calculations, i.e. `SELECT` 2+2. +* Fixed select OR not returning expected results. +* No longer allowing reserved words for schema and table names. +* Corrected process interruptions from improper SQL statements. +* Improved message handling between spawned processes that replace killed processes. +* Enhanced error handling for updates to tables that do not exist. +* Fixed error handling for NoSQL responses when `get_attributes` is provided with invalid attributes. +* Fixed issue with new columns not being updated properly in update statements. +* Now validating roles, tables and attributes when creating or updating roles. +* Fixed an issue where in some cases `undefined` was being returned after dropping a role diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/1.3.1.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/1.3.1.md new file mode 100644 index 00000000..77e3ffe4 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/1.3.1.md @@ -0,0 +1,29 @@ +--- +title: 1.3.1 +sidebar_position: 89698 +--- + +### HarperDB 1.3.1, Alby Release +2/26/2019 + +**Features** + +* Clustering connection direction appointment +* Foundations for threading/multi processing +* UUID autogen for hash attributes that were not provided +* Added cluster status operation + + +**Bug Fixes and Enhancements** + +* More logging +* Clustering communication enhancements +* Clustering queue ordering by timestamps +* Cluster re connection enhancements +* Number of system core(s) detection +* Node LTS (10.15) compatibility +* Update/Alter users enhancements +* General performance enhancements +* Warning is logged if different versions of harperdb are connected via clustering +* Fixed need to restart after user creation/alteration +* Fixed SQL error that occurred on selecting from an empty table \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/_category_.json b/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/_category_.json new file mode 100644 index 00000000..e33195ec --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Alby (Version 1)", + "position": -1 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/index.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/index.md new file mode 100644 index 00000000..ae17d022 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v1-alby/index.md @@ -0,0 +1,13 @@ +--- +title: HarperDB Alby (Version 1) +--- + +# HarperDB Alby (Version 1) + +Did you know our release names are dedicated to employee pups? For our first release, Alby was our pup. + +Here is a bit about Alby: + +![picture of black dog](/img/v4.2/dogs/alby.webp) + +_Hi, I am Alby. My mom is Kaylan Stock, Director of Marketing at HarperDB. I am a 9-year-old Great Dane mix who loves sun bathing, going for swims, and wreaking havoc on the local squirrels. My favorite snack is whatever you are eating, and I love a good butt scratch!_ diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.1.1.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.1.1.md new file mode 100644 index 00000000..e1314a5f --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.1.1.md @@ -0,0 +1,27 @@ +--- +title: 2.1.1 +sidebar_position: 79898 +--- + +### HarperDB 2.1.1, Penny Release +05/22/2020 + +**Highlights** + +* CORE-1007 Added the ability to perform `SQL INSERT` & `UPDATE` with function calls & expressions on values. +* CORE-1023 Fixed minor bug in final SQL step incorrectly trying to translate ordinals to alias in `ORDER BY` statement. +* CORE-1020 Fixed bug allowing 'null' and 'undefined' string values to be passed in as valid hash values. +* CORE-1006 Added SQL functionality that enables `JOIN` statements across different schemas. +* CORE-1005 Implemented JSONata library to handle our JSON document search functionality in SQL, creating the `SEARCH_JSON` function. +* CORE-1009 Updated schema validation to allow all printable ASCII characters to be used in schema/table/attribute names, except, forward slashes and backticks. Same rules apply now for hash attribute values. +* CORE-1003 Fixed handling of ORDER BY statements with function aliases. +* CORE-1004 Fixed bug related to `SELECT*` on `JOIN` queries with table columns with the same name. +* CORE-996 Fixed an issue where the `transact_to_cluster` flag is lost for CSV URL loads, fixed an issue where new attributes created in CSV bulk load do not sync to the cluster. +* CORE-994 Added new operation `system_information`. This operation returns info & metrics for the OS, time, memory, cpu, disk, network. +* CORE-993 Added new custom date functions for AlaSQL & UTC updates. +* CORE-991 Changed jobs to spawn a new process which will run the intended job without impacting a main HarperDB process. +* CORE-992 HTTPS enabled by default. +* CORE-990 Updated `describe_table` to add the record count for the table for LMDB data storage. +* CORE-989 Killed the socket cluster processes prior to HarperDB processes to eliminate a false uptime. +* CORE-975 Updated time values set by SQL Date Functions to be in epoch format. +* CORE-974 Added date functions to `SQL SELECT` column alias functionality. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.2.0.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.2.0.md new file mode 100644 index 00000000..267168cd --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.2.0.md @@ -0,0 +1,43 @@ +--- +title: 2.2.0 +sidebar_position: 79799 +--- + +### HarperDB 2.2.0, Penny Release +08/24/2020 + +**Features/Updates** + +* CORE-997 Updated the data format for CSV data loads being sync'd across a cluster to take up less resources +* CORE-1018 Adds SQL functionality for `BETWEEN` statements +* CORE-1032 Updates permissions to allow regular users (i.e. non-super users) to call the `get_job` operation +* CORE-1036 On create/drop table we auto create/drop the related transactions environments for the schema.table +* CORE-1042 Built raw functions to write to a tables transaction log for insert/update/delete operations +* CORE-1057 Implemented write transaction into lmdb create/update/delete functions +* CORE-1048 Adds `SEARCH` wildcard handling for role permissions standards +* CORE-1059 Added config setting to disable transaction logging for an instance +* CORE-1076 Adds permissions filter to describe operations +* CORE-1043 Change clustering catchup to use the new transaction log +* CORE-1052 Removed word "master" from source +* CORE-1061 Added new operation called `delete_transactions_before` this will tail a transaction log for a specific schema / table +* CORE-1040 On HarperDB startup make sure all tables have a transaction environment +* CORE-1055 Added 2 new setting to change the server headersTimeout & keepAliveTimeout from the config file +* CORE-1044 Created new operation `read_transaction_log` which will allow a user to get transactions for a table by `timestamp`, `username`, or `hash_value` +* CORE-1043 Change clustering catchup to use the new transaction log +* CORE-1089 Added new attribute to `system_information` for table/transaction log data size in bytes & transaction log record count +* CORE-1101 Fix to store empty strings rather than considering them null & fix to be able to search on empty strings in SQL/NoSQL. +* CORE-1054 Updates permissions object to remove delete attribute permission and update table attribute permission key to `attribute_permissions` +* CORE-1092 Do not allow the `__createdtime__` to be updated +* CORE-1085 Updates create schema/table & drop schema/table/attribute operations permissions to require super user role and adds integration tests to validate +* CORE-1071 Updates response messages and status codes from `describe_schema` and `describe_table` operations to provide standard language/status code when a schema item is not found +* CORE-1049 Updates response message for SQL update op with no matching rows +* CORE-1096 Added tracking of the origin in the transaction log. This origin object stores the node name, timestamp of the transaction from the originating node & the user. + +**Bug Fixes** + +* CORE-1028 Fixes bug for simple `SQL SELECT` queries not returning aliases and incorrectly returning hash values when not requested in query +* CORE-1037 Fixed an issue where numbers with leading zero i.e. 00123 are converted to numbers rather than being honored as strings. +* CORE-1063 Updates permission error response shape to consolidate issues into individual objects per schema/table combo +* CORE-1098 Fixed an issue where transaction environments were remaining in the global cache after being dropped. +* CORE-1086 Fixed issue where responses from insert/update were incorrect with skipped records. +* CORE-1079 Fixes SQL bugs around invalid schema/table and special characters in `WHERE` clause \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.2.2.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.2.2.md new file mode 100644 index 00000000..827c63db --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.2.2.md @@ -0,0 +1,16 @@ +--- +title: 2.2.2 +sidebar_position: 79797 +--- + +### HarperDB 2.2.2, Penny Release +10/27/2020 + +* CORE-1154 Allowed transaction logging to be disabled even if clustering is enabled. +* CORE-1153 Fixed issue where `delete_files_before` was writing to transaction log. +* CORE-1152 Fixed issue where no more than 4 HarperDB forks would be created. +* CORE-1112 Adds handling for system timestamp attributes in permissions. +* CORE-1131 Adds better handling for checking perms on operations with action value in JSON. +* CORE-1113 Fixes validation bug checking for super user/cluster user permissions and other permissions. +* CORE-1135 Adds validation for valid keys in role API operations. +* CORE-1073 Adds new `import_from_s3` operation to API. diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.2.3.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.2.3.md new file mode 100644 index 00000000..eca953e2 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.2.3.md @@ -0,0 +1,9 @@ +--- +title: 2.2.3 +sidebar_position: 79796 +--- + +### HarperDB 2.2.3, Penny Release +11/16/2020 + +* CORE-1158 Performance improvements to core delete function and configuration of `delete_files_before` to run in batches with a pause into between. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.3.0.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.3.0.md new file mode 100644 index 00000000..2b248490 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.3.0.md @@ -0,0 +1,22 @@ +--- +title: 2.3.0 +sidebar_position: 79699 +--- + +### HarperDB 2.3.0, Penny Release +12/03/2020 + +**Features/Updates** + +* CORE-1191, CORE-1190, CORE-1125, CORE-1157, CORE-1126, CORE-1140, CORE-1134, CORE-1123, CORE-1124, CORE-1122 Added JWT Authentication option (See documentation for more information) +* CORE-1128, CORE-1143, CORE-1140, CORE-1129 Added `upsert` operation +* CORE-1187 Added `get_configuration` operation which allows admins to view their configuration settings. +* CORE-1175 Added new internal LMDB function to copy an environment for use in future features. +* CORE-1166 Updated packages to address security vulnerabilities. + +**Bug Fixes** + +* CORE-1195 Modified `drop_attribute` to drop after data cleanse completes. +* CORE-1149 Fix SQL bug regarding self joins and updates alasql to 0.6.5 release. +* CORE-1168 Fix inconsistent invalid schema/table errors. +* CORE-1162 Fix bug which caused `delete_files_before` to cause tables to grow in size due to an open cursor issue. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.3.1.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.3.1.md new file mode 100644 index 00000000..51291a01 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/2.3.1.md @@ -0,0 +1,12 @@ +--- +title: 2.3.1 +sidebar_position: 79698 +--- + +### HarperDB 2.3.1, Penny Release +1/29/2021 + +**Bug Fixes** + +* CORE-1218 A bug in HarperDB 2.3.0 was identified related to manually calling the `create_attribute` operation. This bug caused secondary indexes to be overwritten by the most recently inserted or updated value for the index, thereby causing a search operation filtered with that index to only return the most recently inserted/updated row. Note, this issue does not affect attributes that are reflexively/automatically created. It only affects attributes created using `create_attribute`. To resolve this issue in 2.3.0 or earlier, drop and recreate your table using reflexive attribute creation. In 2.3.1, drop and recreate your table and use either reflexive attribute creation or `create_attribute`. +* CORE-1219 Increased maximum table attributes from 1000 to 10000 \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/_category_.json b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/_category_.json new file mode 100644 index 00000000..285eecf7 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Penny (Version 2)", + "position": -2 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/index.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/index.md new file mode 100644 index 00000000..23bd15ec --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v2-penny/index.md @@ -0,0 +1,13 @@ +--- +title: HarperDB Penny (Version 2) +--- + +# HarperDB Penny (Version 2) + +Did you know our release names are dedicated to employee pups? For our second release, Penny was the star. + +Here is a bit about Penny: + +![picture of brindle dog](/img/v4.2/dogs/penny.webp) + +_Hi I am Penny! My dad is Kyle Bernhardy, the CTO of HarperDB. I am a nine-year-old Whippet who lives for running hard and fast while exploring the beautiful terrain of Colorado. My favorite activity is chasing birds along with afternoon snoozes in a sunny spot in my backyard._ diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.0.0.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.0.0.md new file mode 100644 index 00000000..2907ee6c --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.0.0.md @@ -0,0 +1,31 @@ +--- +title: 3.0.0 +sidebar_position: 69999 +--- + +### HarperDB 3.0, Monkey Release +5/18/2021 + +**Features/Updates** + +* CORE-1217, CORE-1226, CORE-1232 Create new `search_by_conditions` operation. +* CORE-1304 Upgrade to Node 12.22.1. +* CORE-1235 Adds new upgrade/install functionality. +* CORE-1206, CORE-1248, CORE-1252 Implement `lmdb-store` library for optimized performance. +* CORE-1062 Added alias operation for `delete_files_before`, named `delete_records_before`. +* CORE-1243 Change `HTTPS_ON` settings value to false by default. +* CORE-1189 Implement fastify web server, resulting in improved performance. +* CORE-1221 Update user API to use role name instead of role id. +* CORE-1225 Updated dependencies to eliminate npm security warnings. +* CORE-1241 Adds 3.0 update directive and refactors/fixes update functionality. + +**Bug Fixes** + +* CORE-1299 Remove all references to the `PROJECT_DIR` setting. This setting is problematic when using node version managers and upgrading the version of node and then installing a new instance of HarperDB. +* CORE-1288 Fix bug with drop table/schema that was causing 'env required' error log. +* CORE-1285 Update warning log when trying to create an attribute that already exists. +* CORE-1254 Added logic to manage data collisions in clustering. +* CORE-1212 Add pre-check to `drop_user` that returns error if user doesn't exist. +* CORE-1114 Update response code and message from `add_user` when user already exists. +* CORE-1111 Update response from `create_attribute` to match the create schema/table response. +* CORE-1205 Fixed bug that prevented schema/table from being dropped if name was a number or had a wildcard value in it. Updated validation for insert, upsert and update. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.0.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.0.md new file mode 100644 index 00000000..148690f6 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.0.md @@ -0,0 +1,23 @@ +--- +title: 3.1.0 +sidebar_position: 69899 +--- + +### HarperDB 3.1.0, Monkey Release +8/24/2021 + +**Features/Updates** + +* CORE-1320, CORE-1321, CORE-1323, CORE-1324 Version 1.0 of HarperDB Custom Functions +* CORE-1275, CORE-1276, CORE-1278, CORE-1279, CORE-1280, CORE-1282, CORE-1283, CORE-1305, CORE-1314 IPC server for communication between HarperDB processes, including HarperDB, HarperDB Clustering, and HarperDB Functions +* CORE-1352, CORE-1355, CORE-1356, CORE-1358 Implement pm2 for HarperDB process management +* CORE-1292, CORE-1308, CORE-1312, CORE-1334, CORE-1338 Updated installation process to start HarperDB immediately on install and to accept all config settings via environment variable or command line arguments +* CORE-1310 Updated licensing functionality +* CORE-1301 Updated validation for performance improvement +* CORE-1359 Add `hdb-response-time` header which returns the HarperDB response time in milliseconds +* CORE-1330, CORE-1309 New config settings: `LOG_TO_FILE`, `LOG_TO_STDSTREAMS`, `IPC_SERVER_PORT`, `RUN_IN_FOREGROUND`, `CUSTOM_FUNCTIONS`, `CUSTOM_FUNCTIONS_PORT`, `CUSTOM_FUNCTIONS_DIRECTORY`, `MAX_CUSTOM_FUNCTION_PROCESSES` + +**Bug Fixes** + +* CORE-1315 Corrected issue in HarperDB restart scenario +* CORE-1370 Update some of the validation error handlers so that they don't log full stack \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.1.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.1.md new file mode 100644 index 00000000..0adbeb21 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.1.md @@ -0,0 +1,18 @@ +--- +title: 3.1.1 +sidebar_position: 69898 +--- + +### HarperDB 3.1.1, Monkey Release +9/23/2021 + +**Features/Updates** + +* CORE-1393 Added utility function to add settings from env/cmd vars to the settings file on every run/restart +* CORE-1395 Create a setting which will allow to enable the local Studio to be served from an instance of HarperDB +* CORE-1397 Update the stock 404 response to not return the request URL +* General updates to optimize Docker container + +**Bug Fixes** + +* CORE-1399 Added fixes for complex SQL alias issues \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.2.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.2.md new file mode 100644 index 00000000..f1c192b6 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.2.md @@ -0,0 +1,15 @@ +--- +title: 3.1.2 +sidebar_position: 69897 +--- + +### HarperDB 3.1.2, Monkey Release +10/21/2021 + +**Features/Updates** + +* Updated the installation ASCII art to reflect the new HarperDB logo + +**Bug Fixes** + +* CORE-1408 Corrects issue where `drop_attribute` was not properly setting the LMDB version number causing tables to behave unexpectedly \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.3.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.3.md new file mode 100644 index 00000000..2d484f8d --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.3.md @@ -0,0 +1,11 @@ +--- +title: 3.1.3 +sidebar_position: 69896 +--- + +### HarperDB 3.1.3, Monkey Release +1/14/2022 + +**Bug Fixes** + +* CORE-1446 Fix for scans on indexes larger than 1 million entries causing queries to never return \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.4.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.4.md new file mode 100644 index 00000000..ae0074fd --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.4.md @@ -0,0 +1,11 @@ +--- +title: 3.1.4 +sidebar_position: 69895 +--- + +### HarperDB 3.1.4, Monkey Release +2/24/2022 + +**Features/Updates** + +* CORE-1460 Added new setting `STORAGE_WRITE_ASYNC`. If this setting is true, LMDB will have faster write performance at the expense of not being crash safe. The default for this setting is false, which results in HarperDB being crash safe. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.5.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.5.md new file mode 100644 index 00000000..eff4b5b0 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.1.5.md @@ -0,0 +1,11 @@ +--- +title: 3.1.5 +sidebar_position: 69894 +--- + +### HarperDB 3.1.5, Monkey Release +3/4/2022 + +**Features/Updates** + +* CORE-1498 Fixed incorrect autocasting of string that start with "0." that tries to convert to number but instead returns NaN. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.2.0.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.2.0.md new file mode 100644 index 00000000..003575d8 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.2.0.md @@ -0,0 +1,13 @@ +--- +title: 3.2.0 +sidebar_position: 69799 +--- + +### HarperDB 3.2.0, Monkey Release +3/25/2022 + +**Features/Updates** + +* CORE-1391 Bug fix related to orphaned HarperDB background processes. +* CORE-1509 Updated node version check, updated Node.js version, updated project dependencies. +* CORE-1518 Remove final call from logger. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.2.1.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.2.1.md new file mode 100644 index 00000000..dc511a70 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.2.1.md @@ -0,0 +1,11 @@ +--- +title: 3.2.1 +sidebar_position: 69798 +--- + +### HarperDB 3.2.1, Monkey Release +6/1/2022 + +**Features/Updates** + +* CORE-1573 Added logic to track the pid of the foreground process if running in foreground. Then on stop, use that pid to kill the process. Logic was also added to kill the pm2 daemon when stop is called. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.3.0.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.3.0.md new file mode 100644 index 00000000..3e3ca784 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/3.3.0.md @@ -0,0 +1,12 @@ +--- +title: 3.3.0 +sidebar_position: 69699 +--- + +### HarperDB 3.3.0 - Monkey + +* CORE-1595 Added new role type `structure_user`, this enables non-superusers to be able to create/drop schema/table/attribute. +* CORE-1501 Improved performance for drop_table. +* CORE-1599 Added two new operations for custom functions `install_node_modules` & `audit_node_modules`. +* CORE-1598 Added `skip_node_modules` flag to `package_custom_function_project` operation. This flag allows for not bundling project dependencies and deploying a smaller project to other nodes. Use this flag in tandem with `install_node_modules`. +* CORE-1707 Binaries are now included for Linux on AMD64, Linux on ARM64, and macOS. GCC, Make, Python are no longer required when installing on these platforms. diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/_category_.json b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/_category_.json new file mode 100644 index 00000000..0103ac36 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Monkey (Version 3)", + "position": -3 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/index.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/index.md new file mode 100644 index 00000000..a05a2b6c --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v3-monkey/index.md @@ -0,0 +1,11 @@ +--- +title: HarperDB Monkey (Version 3) +--- + +# HarperDB Monkey (Version 3) + +Did you know our release names are dedicated to employee pups? For our third release, we have Monkey. + +![picture of tan dog](/img/v4.2/dogs/monkey.webp) + +_Hi, I am Monkey, a.k.a. Monk, a.k.a. Monchichi. My dad is Aron Johnson, the Director of DevOps at HarperDB. I am an eight-year-old Australian Cattle dog mutt whose favorite pastime is hunting and collecting tennis balls from the park next to her home. I love burrowing in the Colorado snow, rolling in the cool grass on warm days, and cheese!_ diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.0.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.0.md new file mode 100644 index 00000000..49770307 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.0.md @@ -0,0 +1,124 @@ +--- +title: 4.0.0 +sidebar_position: 59999 +--- + +### HarperDB 4.0.0, Tucker Release +11/2/2022 + +**Networking & Data Replication (Clustering)** + +The HarperDB clustering internals have been rewritten and the underlying technology for Clustering has been completely replaced with [NATS](https:/nats.io/), an enterprise grade connective technology responsible for addressing, discovery and exchanging of messages that drive the common patterns in distributed systems. +* CORE-1464, CORE-1470, : Remove SocketCluster dependencies and all code related to them. +* CORE-1465, CORE-1485, CORE-1537, CORE-1538, CORE-1558, CORE-1583, CORE_1665, CORE-1710, CORE-1801, CORE-1865 :Add nats-`server` code as dependency, on install of HarperDB download nats-`server` is possible else fallback to building from source code. +* CORE-1593, CORE-1761: Add `nats.js` as project dependency. +* CORE-1466: Build NATS configs on `harperdb run` based on HarperDB YAML configuration. +* CORE-1467, CORE-1508: Launch and manage NATS servers with PM2. +* CORE-1468, CORE-1507: Create a process which reads the work queue stream and processes transactions. +* CORE-1481, CORE-1529, CORE-1698, CORE-1502, CORE-1696: On upgrade to 4.0, update pre-existing clustering configurations, create table transaction streams, create work queue stream, update `hdb_nodes` table, create clustering folder structure, and rebuild self-signed certs. +* CORE-1494, CORE-1521, CORE-1755: Build out internals to interface with NATS. +* CORE-1504: Update existing hooks to save transactions to work with NATS. +* CORE-1514, CORE-1515, CORE-1516, CORE-1527, CORE-1532: Update `add_node`, `update_node`, and `remove_node` operations to no longer need host and port in payload. These operations now manage dynamically sourcing of table level transaction streams between nodes and work queues. +* CORE-1522: Create `NATSReplyService` process which handles the receiving NATS based requests from remote instances and sending back appropriate responses. +* CORE-1471, CORE-1568, CORE-1563, CORE-1534, CORE-1569: Update `cluster_status` operation. +* CORE-1611: Update pre-existing transaction log operations to be audit log operations. +* CORE-1541, CORE-1612, CORE-1613: Create translation log operations which interface with streams. +* CORE-1668: Update NATS serialization / deserialization to use MessagePack. +* CORE-1673: Add `system_info` param to `hdb_nodes` table and update on `add_node` and `cluster_status`. +* CORE-1477, CORE-1493, CORE-1557, CORE-1596, CORE-1577: Both a full HarperDB restart & just clustering restart call the NATS server with a reload directive to maintain full uptime while servers refresh. +* CORE-1474:HarperDB install adds clustering folder structure. +* CORE-1530: Post `drop_table` HarperDB purges the related transaction stream. +* CORE-1567: Set NATS config to always use TLS. +* CORE-1543: Removed the `transact_to_cluster` attribute from the bulk load operations. Now bulk loads always replicate. +* CORE-1533, CORE-1556, CORE-1561, CORE-1562, CORE-1564: New operation `configure_cluster`, this operation enables bulk publishing and subscription of multiple tables to multiple instances of HarperDB. +* CORE-1535: Create work queue stream on install of HarperDB. This stream receives transactions from remote instances of HarperDB which are then ingested in order. +* CORE-1551: Create transaction streams on the remote node if they do not exist when performing `add_node` or `update_node`. +* CORE-1594, CORE-1605, CORE-1749, CORE-1767, CORE-1770: Optimize the work queue stream and its consumer to be more performant and validate exact once delivery. +* CORE-1621, CORE-1692, CORE-1570, CORE-1693: NATS stream names are MD5 hashed to avoid characters that HarperDB allows, but NATS may not. +* CORE-1762: Add a new optional attribute to `add_node` and `update_node` named `opt_start_time`. This attribute sets a starting time to start synchronizing transactions. +* CORE-1785: Optimizations and bug fixes in regards to sourcing data from remote instances on HarperDB. +* CORE-1588: Created new operation `set_cluster_routes` to enable setting routes for instances of HarperDB to mesh together. +* CORE-1589: Created new operation `get_cluster_routes` to allow for retrieval of routes used to connect the instance of HarperDB to the mesh. +* CORE-1590: Created new operation `delete_cluster_routes` to allow for removal of routes used to connect the instance of HarperDB to the mesh. +* CORE-1667: Fix old environment variable `CLUSTERING_PORT` not mapping to new hub server port. +* CORE-1609: Allow `remove_node` to be called when the other node cannot be reached. +* CORE-1815: Add transaction lock to `add_node` and `update_node` to avoid concurrent nats source update bug. +* CORE-1848: Update stream configs if the node name has been changed in the YAML configuration. +* CORE-1873: Update `add_node` and `update_node` so that it auto-creates schema/table on both local and remote node respectively + + +**Data Storage** + +We have made improvements to how we store, index, and retrieve data. +* CORE-1619: Enabled new concurrent flushing technology for improved write performance. +* CORE-1701: Optimize search performance for `search_by_conditions` when executing multiple AND conditions. +* CORE-1652: Encode the values of secondary indices more efficiently for faster access. +* CORE-1670: Store updated timestamp in `lmdb.js`' version property. +* CORE-1651: Enabled multiple value indexing of array values which allows for the ability to search on specific elements in an array more efficiently. +* CORE-1649, CORE-1659: Large text values (larger than 255 bytes) are no longer stored in separate blob index. Now they are segmented and delimited in the same index to increase search performance. +* Complex objects and object arrays are no longer stored in a separate index to preserve storage and increase write throughput. +* CORE-1650, CORE-1724, CORE-1738: Improved internals around interpreting attribute values. +* CORE-1657: Deferred property decoding allows large objects to be stored, but individual attributes can be accessed (like with get_attributes) without incurring the cost of decoding the entire object. +* CORE-1658: Enable in-memory caching of records for even faster access to frequently accessed data. +* CORE-1693: Wrap updates in async transactions to ensure ACID-compliant updates. +* CORE-1653: Upgrade to 4.0 rebuilds tables to reflect changes made to index improvements. +* CORE-1753: Removed old `node-lmdb` dependency. +* CORE-1787: Freeze objects returned from queries. +* CORE-1821: Read the `WRITE_ASYNC` setting which enables LMDB nosync. + +**Logging** + +HarperDB has increased logging specificity by breaking out logs based on components logging. There are specific log files each for HarperDB Core, Custom Functions, Hub Server, Leaf Server, and more. +* CORE-1497: Remove `pino` and `winston` dependencies. +* CORE-1426: All logging is output via `stdout` and `stderr`, our default logging is then picked up by PM2 which handles writing out to file. +* CORE-1431: Improved `read_log` operation validation. +* CORE-1433, CORE-1463: Added log rotation. +* CORE-1553, CORE-1555, CORE-1552, CORE-1554, CORE-1704: Performance gain by only serializing objects and arrays if the log is for the level defined in configuration. +* CORE-1436: Upgrade to 4.0 updates internals for logging changes. +* CORE-1428, CORE-1440, CORE-1442, CORE-1434, CORE-1435, CORE-1439, CORE-1482, CORE-1751, CORE-1752: Bug fixes, performance improvements and improved unit tests. +* CORE-1691: Convert non-PM2 managed log file writes to use Node.js `fs.appendFileSync` function. + +**Configuration** + +HarperDB has updated its configuration from a properties file to YAML. +* CORE-1448, CORE-1449, CORE-1519, CORE-1587: Upgrade automatically converts the pre-existing settings file to YAML. +* CORE-1445, CORE-1534, CORE-1444, CORE-1858: Build out new logic to create, update, and interpret the YAML configuration file. +* Installer has updated prompts to reflect YAML settings. +* CORE-1447: Create an alias for the `configure_cluster` operation as `set_configuration`. +* CORE-1461, CORE-1462, CORE-1483: Unit test improvements. +* CORE-1492: Improvements to get_configuration and set_configuration operations. +* CORE-1503: Modify HarperDB configuration for more granular certificate definition. +* CORE-1591: Update `routes` IP param to `host` and to `leaf` config in `harperdb.conf` +* CORE-1519: Fix issue when switching between old and new versions of HarperDB we are getting the config parameter is undefined error on npm install. + +**Broad NodeJS and Platform Support** +* CORE-1624: HarperDB can now run on multiple versions of NodeJS, from v14 to v19. We primarily test on v18, so that is the preferred version. + +**Windows 10 and 11** +* CORE-1088: HarperDB now runs natively on Windows 10 and 11 without the need to run in a container or installed in WSL. Windows is only intended for evaluation and development purposes, not for production work loads. + +**Extra Changes and Bug Fixes** +* CORE-1520: Refactor installer to remove all waterfall code and update to use Promises. +* CORE-1573: Stop the PM2 daemon and any logging processes when stopping hdb. +* CORE-1586: When HarperDB is running in foreground stop any additional logging processes from being spawned. +* CORE-1626: Update docker file to accommodate new `harperdb.conf` file. +* CORE-1592, CORE-1526, CORE-1660, CORE-1646, CORE-1640, CORE-1689, CORE-1711, CORE-1601, CORE-1726, CORE-1728, CORE-1736, CORE-1735, CORE-1745, CORE-1729, CORE-1748, CORE-1644, CORE-1750, CORE-1757, CORE-1727, CORE-1740, CORE-1730, CORE-1777, CORE-1778, CORE-1782, CORE-1775, CORE-1771, CORE-1774, CORE-1759, CORE-1772, CORE-1861, CORE-1862, CORE-1863, CORE-1870, CORE-1869:Changes for CI/CD pipeline and integration tests. +* CORE-1661: Fixed issue where old boot properties file caused an error when attempting to install 4.0.0. +* CORE-1697, CORE-1814, CORE-1855: Upgrade fastify dependency to new major version 4. +* CORE-1629: Jobs are now running as processes managed by the PM2 daemon. +* CORE-1733: Update LICENSE to reflect our EULA on our site. +* CORE-1606: Enable Custom Functions by default. +* CORE-1714: Include pre-built binaries for most common platforms (darwin-arm64, darwin-x64, linux-arm64, linux-x64, win32-x64). +* CORE-1628: Fix issue where setting license through environment variable not working. +* CORE-1602, CORE-1760, CORE-1838, CORE-1839, CORE-1847, CORE-1773: HarperDB Docker container improvements. +* CORE-1706: Add support for encoding HTTP responses with MessagePack. +* CORE-1709: Improve the way lmdb.js dependencies are installed. +* CORE-1758: Remove/update unnecessary HTTP headers. +* CORE-1756: On `npm install` and `harperdb install` change the node version check from an error to a warning if the installed Node.js version does not match our preferred version. +* CORE-1791: Optimizations to authenticated user caching. +* CORE-1794: Update README to discuss Windows support & Node.js versions +* CORE-1837: Fix issue where Custom Function directory was not being created on install. +* CORE-1742: Add more validation to audit log - check schema/table exists and log is enabled. +* CORE-1768: Fix issue where when running in foreground HarperDB process is not stopping on `harperdb stop`. +* CORE-1864: Fix to semver checks on upgrade. +* CORE-1850: Fix issue where a `cluster_user` type role could not be altered. diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.1.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.1.md new file mode 100644 index 00000000..9e148e63 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.1.md @@ -0,0 +1,12 @@ +--- +title: 4.0.1 +sidebar_position: 59998 +--- + +### HarperDB 4.0.1, Tucker Release +01/20/2023 + +**Bug Fixes** + +* CORE-1992 Local studio was not loading because the path got mangled in the build. +* CORE-2001 Fixed deploy_custom_function_project after node update broke it. diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.2.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.2.md new file mode 100644 index 00000000..b65d1427 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.2.md @@ -0,0 +1,12 @@ +--- +title: 4.0.2 +sidebar_position: 59997 +--- + +### HarperDB 4.0.2, Tucker Release +01/24/2023 + +**Bug Fixes** + +* CORE-2003 Fix bug where if machine had one core thread config would default to zero. +* Update to lmdb 2.7.3 and msgpackr 1.7.0 diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.3.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.3.md new file mode 100644 index 00000000..67aaae56 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.3.md @@ -0,0 +1,11 @@ +--- +title: 4.0.3 +sidebar_position: 59996 +--- + +### HarperDB 4.0.3, Tucker Release +01/26/2023 + +**Bug Fixes** + +* CORE-2007 Add update nodes 4.0.0 launch script to build script to fix clustering upgrade. diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.4.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.4.md new file mode 100644 index 00000000..2a30c9d1 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.4.md @@ -0,0 +1,11 @@ +--- +title: 4.0.4 +sidebar_position: 59995 +--- + +### HarperDB 4.0.4, Tucker Release +01/27/2023 + +**Bug Fixes** + +* CORE-2009 Fixed bug where add node was not being called when upgrading clustering. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.5.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.5.md new file mode 100644 index 00000000..dc66721f --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.5.md @@ -0,0 +1,14 @@ +--- +title: 4.0.5 +sidebar_position: 59994 +--- + +### HarperDB 4.0.5, Tucker Release +02/15/2023 + +**Bug Fixes** + +* CORE-2029 Improved the upgrade process for handling existing user TLS certificates and correctly configuring TLS settings. Added a prompt to upgrade to determine if new certificates should be created or existing certificates should be kept/used. +* Fix the way NATS connections are honored in a local environment. +* Do not define the certificate authority path to NATS if it is not defined in the HarperDB config. + diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.6.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.6.md new file mode 100644 index 00000000..bf97d148 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.6.md @@ -0,0 +1,11 @@ +--- +title: 4.0.6 +sidebar_position: 59993 +--- + +### HarperDB 4.0.6, Tucker Release +03/09/2023 + +**Bug Fixes** + +* Fixed a data serialization error that occurs when a large number of different record structures are persisted in a single table. diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.7.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.7.md new file mode 100644 index 00000000..7d48666a --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.0.7.md @@ -0,0 +1,11 @@ +--- +title: 4.0.7 +sidebar_position: 59992 +--- + +### HarperDB 4.0.7, Tucker Release +03/10/2023 + +**Bug Fixes** + +* Update lmdb.js dependency \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.1.0.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.1.0.md new file mode 100644 index 00000000..80b4e5d2 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.1.0.md @@ -0,0 +1,63 @@ +--- +title: 4.1.0 +sidebar_position: 59899 +--- + +# 4.1.0 + +HarperDB 4.1 introduces the ability to use worker threads for concurrently handling HTTP requests. Previously this was handled by processes. This shift provides important benefits in terms of better control of traffic delegation with support for optimized load tracking and session affinity, better debuggability, and reduced memory footprint. + +This means debugging will be much easier for custom functions. If you install/run HarperDB locally, most modern IDEs like WebStorm and VSCode support worker thread debugging, so you can start HarperDB in your IDE, and set breakpoints in your custom functions and debug them. + +The associated routing functionality now includes session affinity support. This can be used to consistently route users to the same thread which can improve caching locality, performance, and fairness. This can be enabled in with the [`http.sessionAffinity` option in your configurationsecurity configuration. + +HarperDB 4.1's NoSQL query handling has been revamped to consistently use iterators, which provide an extremely memory efficient mechanism for directly streaming query results to the network _as_ the query results are computed. This results in faster Time to First Byte (TTFB) (only the first record/value in a query needs to be computed before data can start to be sent), and less memory usage during querying (the entire query result does not need to be stored in memory). These iterators are also available in query results for custom functions and can provide means for custom function code to iteratively access data from the database without loading entire results. This should be a completely transparent upgrade, all HTTP APIs function the same, with the one exception that custom functions need to be aware that they can't access query results by `[index]` (they should use array methods or for-in loops to handle query results). + +4.1 includes configuration options for specifying the location of database storage files. This allows you to specifically locate database directories and files on different volumes for better flexibility and utilization of disks and storage volumes. See the [storage configuration](../../../../deployments/configuration#storage) and [schemas configuration](../../../../deployments/configuration#schemas) for information on how to configure these locations. + +Logging has been revamped and condensed into one `hdb.log` file. See [logginglogging for more information. + +A new operation called `cluster_network` was added, this operation will ping the cluster and return a list of enmeshed nodes. + +Custom Functions will no longer automatically load static file routes, instead the `@fastify/static` plugin will need to be registered with the Custom Function server. See [Host A Static Web UI-static](https:/docs.harperdb.io/docs/v/4.1/custom-functions/host-static). + +Updates to S3 import and export mean that these operations now require the bucket `region` in the request. Also, if referencing a nested object it should be done in the `key` parameter. See examples [here](https:/api.harperdb.io/#aa74bbdf-668c-4536-80f1-b91bb13e5024). + +Due to the AWS SDK v2 reaching end of life support we have updated to v3. This has caused some breaking changes in our operations `import_from_s3` and `export_to_s3`: + +* A new attribute `region` will need to be supplied +* The `bucket` attribute can no longer have trailing slashes. Slashes will now need to be in the `key`. + +Starting HarperDB without any command (just `harperdb`) now runs HarperDB like a standard process, in the foreground. This means you can use standard unix tooling for interacting with the process and is conducive for running HarperDB with systemd or any other process management tool. If you wish to have HarperDB launch itself in separate background process (and immediately terminate the shell process), you can do so by running `harperdb start`. + +Internal Tickets completed: + +* CORE-609 - Ensure that attribute names are always added to global schema as Strings +* CORE-1549 - Remove fastify-static code from Custom Functions server which auto serves content from "static" folder +* CORE-1655 - Iterator based queries +* CORE-1764 - Fix issue where describe\_all operation returns an empty object for non super-users if schema(s) do not yet have table(s) +* CORE-1854 - Switch to using worker threads instead of processes for handling concurrency +* CORE-1877 - Extend the csv\_url\_load operation to allow for additional headers to be passed to the remote server when the csv is being downloaded +* CORE-1893 - Add last updated timestamp to describe operations +* CORE-1896 - Fix issue where Select \* from system.hdb\_info returns wrong HDB version number after Instance Upgrade +* CORE-1904 - Fix issue when executing GEOJSON query in SQL +* CORE-1905 - Add HarperDB YAML configuration setting which defines the storage location of NATS streams +* CORE-1906 - Add HarperDB YAML configuration setting defining the storage location of tables. +* CORE-1655 - Streaming binary format serialization +* CORE-1943 - Add configuration option to set mount point for audit tables +* CORE-1921 - Update NATS transaction lifecycle to handle message deduplication in work queue streams. +* CORE-1963 - Update logging for better readability, reduced duplication, and request context information. +* CORE-1968 - In server\nats\natsIngestService.js remove the js\_msg.working(); line to improve performance. +* CORE-1976 - Fix error when calling describe\_table operation with no schema or table defined in payload. +* CORE-1983 - Fix issue where create\_attribute operation does not validate request for required attributes +* CORE-2015 - Remove PM2 logs that get logged in console when starting HDB +* CORE-2048 - systemd script for 4.1 +* CORE-2052 - Include thread information in system\_information for visibility of threads +* CORE-2061 - Add a better error msg when clustering is enabled without a cluster user set +* CORE-2068 - Create new log rotate logic since pm2 log-rotate no longer used +* CORE-2072 - Update to Node 18.15.0 +* CORE-2090 - Upgrade Testing from v4.0.x and v3.x to v4.1. +* CORE-2091 - Run the performance tests +* CORE-2092 - Allow for automatic patch version updates of certain packages +* CORE-2109 - Add verify option to clustering TLS configuration +* CORE-2111 - Update AWS SDK to v3 diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.1.1.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.1.1.md new file mode 100644 index 00000000..537ef71c --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.1.1.md @@ -0,0 +1,15 @@ +--- +title: 4.1.1 +sidebar_position: 59898 +--- + +# 4.1.1 + +06/16/2023 + +* HarperDB uses improved logic for determining default heap limits and thread counts. When running in a restricted container and on NodeJS 18.15+, HarperDB will use the constrained memory limit to determine heap limits for each thread. In more memory constrained servers with many CPU cores, a reduced default thread count will be used to ensure that excessive memory is not used by many workers. You may still define your own thread count (with `http`/`threads`) in the [configuration](../../../deployments/configuration). +* An option has been added for [disabling the republishing NATS messages](../../../deployments/configuration), which can provide improved replication performance in a fully connected network. +* Improvements to our OpenShift container. +* Dependency security updates. +* **Bug Fixes** +* Fixed a bug in reporting database metrics in the `system_information` operation. diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.1.2.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.1.2.md new file mode 100644 index 00000000..2a62db64 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.1.2.md @@ -0,0 +1,13 @@ +--- +title: 4.1.2 +sidebar_position: 59897 +--- + +### HarperDB 4.1.2, Tucker Release +06/16/2023 + +* HarperDB has updated binary dependencies to support older glibc versions back 2.17. +* A new CLI command was added to get the current status of whether HarperDB is running and the cluster status. This is available with `harperdb status`. +* Improvements to our OpenShift container. +* Dependency security updates. + diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.0.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.0.md new file mode 100644 index 00000000..a57a9781 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.0.md @@ -0,0 +1,99 @@ +--- +title: 4.2.0 +sidebar_position: 59799 +--- + +# 4.2.0 + +#### HarperDB 4.2.0 + +HarperDB 4.2 introduces a new interface to accessing our core database engine with faster access, well-typed idiomatic JavaScript interfaces, ergonomic object mapping, and real-time data subscriptions. 4.2 also had adopted a new component architecture for building extensions to deliver customized external data sources, authentication, file handlers, content types, and more. These architectural upgrades lead to several key new HarperDB capabilities including a new REST interface, advanced caching, real-time messaging and publish/subscribe functionality through MQTT, WebSockets, and Server-Sent Events. + +4.2 also introduces configurable database schemas, using GraphQL Schema syntax. The new component structure is also configuration-driven, providing easy, low-code paths to building applications. [Check out our new getting starting guide](../../../getting-started) to see how easy it is to get started with HarperDB apps. + +### Resource API + +The [Resource API](../../reference/resource) is the new interface for accessing data in HarperDB. It utilizes a uniform interface for accessing data in HarperDB database/tables and is designed to easily be implemented or extended for defining customized application logic for table access or defining custom external data sources. This API has support for connecting resources together for caching and delivering data change and message notifications in real-time. The [Resource API documentation details this interface](../../reference/resource). + +### Component Architecture + +HarperDB's custom functions have evolved towards a [full component architecture](../../../developers/components); our internal functionality is defined as components, and this can be used in a modular way in conjunction with user components. These can all easily be configured and loaded through configuration files, and there is now a [well-defined interface for creating your own components](../../../developers/components/writing-extensions). Components can easily be deployed/installed into HarperDB using [NPM and Github references as well](../../../developers/components/installing). + +### Configurable Database Schemas + +HarperDB applications or components support [schema definitions using GraphQL schema syntax](../../../../developers/applications/defining-schemas). This makes it easy to define your table and attribute structure and gives you control over which attributes should be indexed and what types they should be. With schemas in configuration, these schemas can be bundled with an application and deployed together with application code. + +### REST Interface + +HarperDB 4.2 introduces a new REST interface for accessing data through best-practice HTTP APIs using intuitive paths and standards-based methods and headers that directly map to our Resource API. This new interface provides fast and easy access to data via queries through GET requests, modifications of data through PUTs, customized actions through POSTs and more. With standards-based header support built-in, this works seamlessly with external caches (including browser caches) for accelerated performance and reduced network transfers. + +### Real-Time + +HarperDB 4.2 now provides standard interfaces for subscribing to data changes and receiving notifications of changes and messages in real-time. Using these new real-time messaging capabilities with structured data provides a powerful integrated platform for both database style data updates and querying along with message delivery. [Real-time messaging](../../../../developers/real-time) of data is available through several protocols: + +#### MQTT + +4.2 now includes MQTT support which is a publish and subscribe messaging protocol, designed for efficiency (designed to be efficient enough for even small Internet of Things devices). This allows clients to connect to HarperDB and publish messages through our data center and subscribe to messages and data for real-time delivery. 4.2 implements support for QoS 0 and 1, along with durable sessions. + +#### WebSockets + +HarperDB now also supports WebSockets. This can be used as a transport for MQTT or as a connection for custom connection handling. + +#### Server-Sent Events + +HarperDB also includes support for Server-Sent Events. This is a very easy-to-use browser API that allows web sites/applications to connect to HarperDB and subscribe to data changes with minimal effort over standard HTTP. + +### Database Structure + +HarperDB databases contain a collection of tables, and these tables are now contained in a single transactionally-consistent database file. This means reads and writes can be performed transactionally and atomically across tables (as long as they are in the same database). Multi-table transactions are replicated as single atomic transactions as well. Audit logs are also maintained in the same database with atomic consistency as well. + +Databases are now entirely encapsulated in a file, which means they can be moved/copied to another database without requiring any separate metadata updates in the system tables. + +### Clone Node + +HarperDB includes new functionality for adding new HarperDB nodes in a cluster. New instances can be configured to clone from a leader node, performing and copying a database snapshot from a leader node, and self-configuring from the leader node as well, to facilitate accelerated deployment of new nodes for fast horizontal scaling to meet demand needs. [See the documentation on Clone Node for more information.](../../../../administration/cloning) + +### Operations API terminology updates + +Any operation that used the `schema` property was updated to make this property optional and alternately support `database` as the property for specifying the database (formerly 'schema'). If both `schema` and `database` are absent, operation defaults to using the `data` database. Term 'primary key' now used in place of 'hash'. noSQL operation `search_by_hash` updated to `search_by_id`. + +Support was added for defining a table with `primary_key` instead of `hash_attribute`. + +## Configuration + +There have been significant changes to `harperdb-config.yaml`, however none of these changes should affect pre-4.2 versions. If you upgrade to 4.2 any existing configuration should be backwards compatible and will not need to be updated. + +`harperdb-config.yaml` has had some configuration values added, removed, renamed and defaults changed. Please refer to [harperdb-config.yaml](../../../deployments/configuration) for the most current configuration parameters. + +* The `http` element has been expanded. + * `compressionThreshold` was added. + * All `customFunction` configuration now lives here, except for the `tls` section. +* `threads` has moved out of the `http` element and now is its own top level element. +* `authentication` section was moved out of the `operationsApi` section and is now its own top level element/section. +* `analytics.aggregatePeriod` was added. +* Default logging level was changed to `warn`. +* Default clustering log level was changed to `info`. +* `clustering.republishMessages` now defaults to `false`. +* `operationsApi.foreground` was removed. To start HarperDB in the foreground, from the CLI run `harperdb`. +* Made `operationsApi` configuration optional. Any config not defined here will default to the `http` section. +* Added a `securePort` parameter to `operationsApi` and `http` used for setting the https port. +* Added a new top level `tls` section. +* Removed `customFunctions.enabled`, `customFunctions.network.https`, `operationsApi.network.https` and `operationsApi.nodeEnv`. +* Added an element called `componentRoot` which replaces `customFunctions.root`. +* Updated custom pathing to use `databases` instead of `schemas`. +* Added `logging.auditAuthEvents.logFailed` and `logging.auditAuthEvents.logSuccessful` for enabling logging of auth events. +* A new `mqtt` section was added. + +### Socket Management + +HarperDB now uses socket sharing to distribute incoming connections to different threads (`SO_REUSEPORT`). This is considered to be the most performant mechanism available for multi-threaded socket handling. This does mean that we have deprecated session-affinity based socket delegation. + +HarperDB now also supports more flexible port configurations: application endpoints and WebSockets run on 9926 by default, but these can be separated, or application endpoints can be configured to run on the same port as the operations API for a single port configuration. + +### Sessions + +HarperDB now supports cookie-based sessions for authentication for web clients. This can be used with the standard authentication mechanisms to login, and then cookies can be used to preserve the authenticated session. This is generally a more secure way of maintaining authentication in browsers, without having to rely on storing credentials. + +### Dev Mode + +HarperDB can now directly run a HarperDB application from any location using `harperdb run /path/to/app` or `harperdb dev /path/to/app`. The latter starts in dev mode, with logging directly to the console, debugging enabled, and auto-restarting with any changes in your application files. Dev mode is recommended for local application and component development. diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.1.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.1.md new file mode 100644 index 00000000..38617ca9 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.1.md @@ -0,0 +1,13 @@ +--- +title: 4.2.1 +sidebar_position: 59798 +--- + +### HarperDB 4.2.1, Tucker Release +11/3/2023 + +* Downgrade NATS 2.10.3 back to 2.10.1 due to regression in connection handling. +* Handle package names with underscores. +* Improved validation of queries and comparators +* Avoid double replication on transactions with multiple commits +* Added file metadata on get_component_file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.2.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.2.md new file mode 100644 index 00000000..15768374 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.2.md @@ -0,0 +1,15 @@ +--- +title: 4.2.2 +sidebar_position: 59797 +--- + +### HarperDB 4.2.2, Tucker Release +11/8/2023 + +* Increase timeouts for NATS connections. +* Fix for database snapshots for backups (and for clone node). +* Fix application of permissions for default tables exposed through REST. +* Log replication failures with record information. +* Fix application of authorization/permissions for MQTT commands. +* Fix copying of local components in clone node. +* Fix calculation of overlapping start time in clone node. \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.3.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.3.md new file mode 100644 index 00000000..dab25c3d --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.3.md @@ -0,0 +1,13 @@ +--- +title: 4.2.3 +sidebar_position: 59796 +--- + +### HarperDB 4.2.3, Tucker Release +11/15/2023 + +* When setting setting securePort, disable unsecure port setting on same port +* Fix `harperdb status` when pid file is missing +* Fix/include missing icons/fonts from local studio +* Fix crash that can occur when concurrently accessing records > 16KB +* Apply a lower heap limit to better ensure that memory leaks are quickly caught/mitigated \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.4.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.4.md new file mode 100644 index 00000000..87ee241d --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.4.md @@ -0,0 +1,10 @@ +--- +title: 4.2.4 +sidebar_position: 59795 +--- + +### HarperDB 4.2.4, Tucker Release +11/16/2023 + +* Prevent coercion of strings to numbers in SQL queries (in WHERE clause) +* Address fastify deprecation warning about accessing config \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.5.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.5.md new file mode 100644 index 00000000..1172c4b3 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.5.md @@ -0,0 +1,12 @@ +--- +title: 4.2.5 +sidebar_position: 59794 +--- + +### HarperDB 4.2.5, Tucker Release +11/22/2023 + +* Disable compression on server-sent events to ensure messages are immediately sent (not queued for later deliver) +* Update geoNear function to tolerate null values +* lmdb-js fix to ensure prefetched keys are pinned in memory until retrieved +* Add header to indicate start of a new authenticated session (for studio to identify authenticated sessions) diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.6.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.6.md new file mode 100644 index 00000000..d0a1f177 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.6.md @@ -0,0 +1,10 @@ +--- +title: 4.2.6 +sidebar_position: 59793 +--- + +### HarperDB 4.2.6, Tucker Release +11/29/2023 + +* Update various geo SQL functions to tolerate invalid values +* Properly report component installation/load errors in `get_components` (for studio to load components after an installation failure) \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.7.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.7.md new file mode 100644 index 00000000..78bfcaa7 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.7.md @@ -0,0 +1,11 @@ +--- +title: 4.2.7 +sidebar_position: 59792 +--- + +### HarperDB 4.2.7 +12/6/2023 + +* Add support for cloning over the top of an existing HarperDB instance +* Add health checks for NATS consumer with ability to restart consumer loops for better resiliency +* Revert Fastify autoload module due to a regression that had caused EcmaScript modules for Fastify route modules to fail to load on Windows \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.8.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.8.md new file mode 100644 index 00000000..fbe94b69 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/4.2.8.md @@ -0,0 +1,14 @@ +--- +title: 4.2.8 +sidebar_position: 59791 +--- + +### HarperDB 4.2.8 +12/19/2023 + +* Added support CLI command line arguments for clone node +* Added support for cloning a node without enabling clustering +* Clear NATS client cache on closed event +* Fix check for attribute permissions so that an empty attribute permissions array is treated as a table level permission definition +* Improve speed of cross-node health checks +* Fix for using `database` in describe operations diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/_category_.json b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/_category_.json new file mode 100644 index 00000000..9a7bca50 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Tucker (Version 4)", + "position": -4 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/index.md b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/index.md new file mode 100644 index 00000000..f2f48c98 --- /dev/null +++ b/site/versioned_docs/version-4.2/technical-details/release-notes/v4-tucker/index.md @@ -0,0 +1,11 @@ +--- +title: HarperDB Tucker (Version 4) +--- + +# HarperDB Tucker (Version 4) + +Did you know our release names are dedicated to employee pups? For our fourth release, we have Tucker. + +![picture of grey and white dog](/img/v4.2/dogs/tucker.png) + +_G’day, I’m Tucker. My dad is David Cockerill, a software engineer here at HarperDB. I am a 3-year-old Labrador Husky mix. I love to protect my dad from all the squirrels and rabbits we have in our yard. I have very ticklish feet and love belly rubs!_ diff --git a/site/versioned_docs/version-4.3/administration/_category_.json b/site/versioned_docs/version-4.3/administration/_category_.json new file mode 100644 index 00000000..828e0998 --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Administration", + "position": 2, + "link": { + "type": "generated-index", + "title": "Administration Documentation", + "description": "Guides for managing and administering HarperDB instances", + "keywords": [ + "administration" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/administration/administration.md b/site/versioned_docs/version-4.3/administration/administration.md new file mode 100644 index 00000000..8fbe7b80 --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/administration.md @@ -0,0 +1,31 @@ +--- +title: Best Practices and Recommendations +--- + +# Best Practices and Recommendations + +HarperDB is designed for minimal administrative effort, and with managed services these are handled for you. But there are important things to consider for managing your own HarperDB servers. + +### Data Protection and (Backup and) Recovery + +As a distributed database, data protection and recovery can benefit from different data protection strategies than a traditional single-server database. But multiple aspects of data protection and recovery should be considered: + +* Availability: As a distributed database HarperDB is intrinsically built for high-availability and a cluster will continue to run even with complete server(s) failure. This is the first and primary defense for protecting against any downtime or data loss. HarperDB provides fast horizontal scaling functionality with node cloning, which facilitates ease of establishing high availability clusters. +* [Audit log](./logging/audit-logging): HarperDB defaults to tracking data changes so malicious data changes can be found, attributed, and reverted. This provides security-level defense against data loss, allowing for fine-grained isolation and reversion of individual data without the large-scale reversion/loss of data associated with point-in-time recovery approaches. +* Snapshots: When used as a source-of-truth database for crucial data, we recommend using snapshot tools to regularly snapshot databases as a final backup/defense against data loss (this should only be used as a last resort in recovery). HarperDB has a [`get_backup`](../developers/operations-api/databases-and-tables#get-backup) operation, which provides direct support for making and retrieving database snapshots. An HTTP request can be used to get a snapshot. Alternatively, volume snapshot tools can be used to snapshot data at the OS/VM level. HarperDB can also provide scripts for replaying transaction logs from snapshots to facilitate point-in-time recovery when necessary (often customization may be preferred in certain recovery situations to minimize data loss). + +### Horizontal Scaling with Node Cloning + +HarperDB provides rapid horizontal scaling capabilities through [node cloning functionality described here](./cloning). + +### Monitoring + +HarperDB provides robust capabilities for analytics and observability to facilitate effective and informative monitoring: +* Analytics provides statistics on usage, request counts, load, memory usage with historical tracking. The analytics data can be [accessed through querying](../technical-details/reference/analytics). +* A large variety of real-time statistics about load, system information, database metrics, thread usage can be retrieved through the [`system_information` API](../developers/operations-api/utilities). +* Information about the current cluster configuration and status can be found in the [cluster APIs](../developers/operations-api/clustering). +* Analytics and system information can easily be exported to Prometheus with our [Prometheus exporter component](https:/github.com/HarperDB-Add-Ons/prometheus_exporter), making it easy visualize and monitor HarperDB with Graphana. + +### Replication Transaction Logging + +HarperDB utilizes NATS for replication, which maintains a transaction log. See the [transaction log documentation for information on how to query this log](./logging/transaction-logging). diff --git a/site/versioned_docs/version-4.3/administration/cloning.md b/site/versioned_docs/version-4.3/administration/cloning.md new file mode 100644 index 00000000..ed4e1d79 --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/cloning.md @@ -0,0 +1,171 @@ +--- +title: Clone Node +--- + +# Clone Node + +Clone node is a configurable node script that when pointed to another instance of HarperDB will create a clone of that +instance's config, databases and setup replication. If it is run in a location where there is no existing HarperDB install, +it will, along with cloning, install HarperDB. If it is run in a location where there is another HarperDB instance, it will +only clone config, databases and replication that do not already exist. + +Clone node is triggered when HarperDB is installed or started with certain environment or command line (CLI) variables set (see below). + +**Leader node** - the instance of HarperDB you are cloning.\ +**Clone node** - the new node which will be a clone of the leader node. + +To start clone run `harperdb` in the CLI with either of the following variables set: + +#### Environment variables + +* `HDB_LEADER_URL` - The URL of the leader node's operation API (usually port 9925). +* `HDB_LEADER_USERNAME` - The leader node admin username. +* `HDB_LEADER_PASSWORD` - The leader node admin password. +* `HDB_LEADER_CLUSTERING_HOST` - _(optional)_ The leader clustering host. This value will be added to the clustering routes on the clone node. If this value is not set, replication will not be set up between the leader and clone. + +For example: +``` +HDB_LEADER_URL=https:/node-1.my-domain.com:9925 HDB_LEADER_CLUSTERING_HOST=node-1.my-domain.com HDB_LEADER_USERNAME=... HDB_LEADER_PASSWORD=... harperdb +``` + +#### Command line variables + +* `--HDB_LEADER_URL` - The URL of the leader node's operation API (usually port 9925). +* `--HDB_LEADER_USERNAME` - The leader node admin username. +* `--HDB_LEADER_PASSWORD` - The leader node admin password. +* `--HDB_LEADER_CLUSTERING_HOST` - _(optional)_ The leader clustering host. This value will be added to the clustering routes on the clone node. If this value is not set, replication will not be set up between the leader and clone. + +For example: +``` +harperdb --HDB_LEADER_URL https:/node-1.my-domain.com:9925 --HDB_LEADER_CLUSTERING_HOST node-1.my-domain.com --HDB_LEADER_USERNAME ... --HDB_LEADER_PASSWORD ... +``` + +Each time clone is run it will set a value `cloned: true` in `harperdb-config.yaml`. This value will prevent clone from +running again. If you want to run clone again set this value to `false`. If HarperDB is started with the clone variables +still present and `cloned` is true, HarperDB will just start as normal. + +Clone node does not require any additional configuration apart from the variables referenced above. +However, if you wish to set any configuration during clone this can be done by passing the config as environment/CLI +variables or cloning overtop of an existing harperdb-config.yaml file. + +More can be found in the HarperDB config documentation [here](../deployments/configuration). + +_Note: because node name must be unique, clone will auto-generate one unless one is provided_ + +### Excluding database, components and replication + +To set any specific (optional) clone config, including the exclusion of any database, components or replication, there is a file +called `clone-node-config.yaml` that can be used. + +The file must be located in the `ROOTPATH` directory of your clone (the `hdb` directory where you clone will be installed. +If the directory does not exist, create one and add the file to it). + +The config available in `clone-node-config.yaml` is: + +```yaml +databaseConfig: + excludeDatabases: + - database: null + excludeTables: + - database: null + table: null +componentConfig: + exclude: + - name: null +clusteringConfig: + publishToLeaderNode: true + subscribeToLeaderNode: true + excludeDatabases: + - database: null + excludeTables: + - database: null + table: null +``` + +_Note: only include the configuration that you are using. If no clone config file is provided nothing will be excluded, +unless it already exists on the clone._ + +`databaseConfig` - Set any databases or tables that you wish to exclude from cloning. + +`componentConfig` - Set any components that you do not want cloned. Clone node will not clone the component code, +it will only clone the component reference that exists in the leader harperdb-config file. + +`clusteringConfig` - Set the replication setup to establish with the other nodes (default is `true` & `true`) and +set any databases or tables that you wish to exclude from clustering. + +### Cloning configuration + +Clone node will not clone any configuration that is classed as unique to the leader node. This includes `clustering.nodeName`, +`rootPath` and any other path related values, for example `storage.path`, `logging.root`, `componentsRoot`, +any authentication certificate/key paths. + +**Clustering Routes** + +By default, the clone will send a set routes request to the leader node. The default `host` used in this request will be the +host name of the clone operating system. + +To manually set a host use the variable `HDB_CLONE_CLUSTERING_HOST`. + +To disable the setting of the route set `HDB_SET_CLUSTERING_HOST` to `false`. + +### Cloning system database + +HarperDB uses a database called `system` to store operational information. Clone node will only clone the user and role +tables from this database. It will also set up replication on this table, which means that any existing and future user and roles +that are added will be replicated throughout the cluster. + +Cloning the user and role tables means that once clone node is complete, the clone will share the same login credentials with +the leader. + +### Fully connected clone + +A fully connected topology is when all nodes are replicating (publish and subscribing) with all other nodes. +A fully connected clone maintains this topology with addition of the new node. When a clone is created, +replication is added between the leader and the clone and any nodes the leader is replicating with. For example, +if the leader is replicating with node-a and node-b, the clone will replicate with the leader, node-a and node-b. + +To run clone node with the fully connected option simply pass the environment variable `HDB_FULLY_CONNECTED=true` or CLI variable `--HDB_FULLY_CONNECTED true`. + +### Cloning overtop of an existing HarperDB instance + +Clone node will not overwrite any existing config, database or replication. It will write/clone any config database or replication +that does not exist on the node it is running on. + +An example of how this can be useful is if you want to set HarperDB config before the clone is created. To do this you +would create a harperdb-config.yaml file in your local `hdb` root directory with the config you wish to set. Then +when clone is run it will append the missing config to the file and install HarperDB with the desired config. + +Another useful example could be retroactively adding another database to an existing instance. Running clone on +an existing instance could create a full clone of another database and set up replication between the database on the +leader and the clone. + +### Cloning steps + +Clone node will execute the following steps when ran: +1. Look for an existing HarperDB install. It does this by using the default (or user provided) `ROOTPATH`. +1. If an existing instance is found it will check for a `harperdb-config.yaml` file and search for the `cloned` value. If the value exists and is `true` clone will skip the clone logic and start HarperDB. +1. Clone harperdb-config.yaml values that don't already exist (excluding values unique to the leader node). +1. Fully clone any databases that don't already exist. +1. If classed as a "fresh clone", install HarperDB. An instance is classed as a fresh clone if there is no system database. +1. If clustering is enabled on the leader and the `HDB_LEADER_CLUSTERING_HOST` variable is provided, set up replication on all cloned database(s). +1. Clone is complete, start HarperDB. + +### Cloning with Docker + +To run clone inside a container add the environment variables to your run command. + +For example: + +``` +docker run -d \ + -v :/home/harperdb/hdb \ + -e HDB_LEADER_PASSWORD=password \ + -e HDB_LEADER_USERNAME=admin \ + -e HDB_LEADER_URL=https:/1.123.45.6:9925 \ + -e HDB_LEADER_CLUSTERING_HOST=1.123.45.6 \ + -p 9925:9925 \ + -p 9926:9926 \ + harperdb/harperdb +``` + +Clone will only run once, when you first start the container. If the container restarts the environment variables will be ignored. diff --git a/site/versioned_docs/version-4.3/administration/compact.md b/site/versioned_docs/version-4.3/administration/compact.md new file mode 100644 index 00000000..ca2aaf57 --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/compact.md @@ -0,0 +1,65 @@ +--- +title: Compact a database +--- + +# Compact a database + +Database files can grow quickly as you use them, sometimes impeding performance. +HarperDB has multiple compact features that can be used to reduce database file size and potentially improve performance. +The compact process does not compress your data, it instead makes your database file smaller by eliminating free-space and fragmentation. + +There are two options that HarperDB offers for compacting a Database. + +_Note: Some of the storage configuration (such as compression) cannot be updated on existing databases, +this is where the following options are useful. They will create a new compressed copy of the database with any updated configuration._ + +More information on the storage configuration options can be [found here](../deployments/configuration#storage) + +### Copy compaction + +It is recommended that, to prevent any record loss, HarperDB is not running when performing this operation. + +This will copy a HarperDB database with compaction. If you wish to use this new database in place of the original, +you will need to move/rename it to the path of the original database. + +This command should be run in the [CLI](../deployments/harperdb-cli) + +```bash +harperdb copy-db +``` +For example, to copy the default database: +```bash +harperdb copy-db data /home/user/hdb/database/copy.mdb +``` + +### Compact on start + +Compact on start is a more automated option that will compact __all__ databases when HarperDB is started. HarperDB will +not start until compact is complete. Under the hood it loops through all non-system databases, +creates a backup of each one and calls copy-db. After the copy/compaction is complete it will move the new database +to where the original one is located and remove any backups. + +Compact on start is initiated by config in harperdb-config.yaml + +_Note: Compact on start will switch `compactOnStart` to `false` after it has run_ + +`compactOnStart` - _Type_: boolean; _Default_: false + +`compactOnStartKeepBackup` - _Type_: boolean; _Default_: false + +```yaml +storage: + compactOnStart: true + compactOnStartKeepBackup: false +``` + +Using CLI variables + +```bash +--STORAGE_COMPACTONSTART true --STORAGE_COMPACTONSTARTKEEPBACKUP true +``` + +```bash +STORAGE_COMPACTONSTART=true +STORAGE_COMPACTONSTARTKEEPBACKUP=true +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/administration/harperdb-studio/create-account.md b/site/versioned_docs/version-4.3/administration/harperdb-studio/create-account.md new file mode 100644 index 00000000..635de7f4 --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/harperdb-studio/create-account.md @@ -0,0 +1,26 @@ +--- +title: Create a Studio Account +--- + +# Create a Studio Account +Start at the [HarperDB Studio sign up page](https:/studio.harperdb.io/sign-up). + +1) Provide the following information: + * First Name + * Last Name + * Email Address + * Subdomain + + *Part of the URL that will be used to identify your HarperDB Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com.* + * Coupon Code (optional) +2) Review the Privacy Policy and Terms of Service. +3) Click the sign up for free button. +4) You will be taken to a new screen to add an account password. Enter your password. + *Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character.* +5) Click the add account password button. + +You will receive a Studio welcome email confirming your registration. + + + +Note: Your email address will be used as your username and cannot be changed. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/administration/harperdb-studio/enable-mixed-content.md b/site/versioned_docs/version-4.3/administration/harperdb-studio/enable-mixed-content.md new file mode 100644 index 00000000..1948d6be --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/harperdb-studio/enable-mixed-content.md @@ -0,0 +1,11 @@ +--- +title: Enable Mixed Content +--- + +# Enable Mixed Content + +Enabling mixed content is required in cases where you would like to connect the HarperDB Studio to HarperDB Instances via HTTP. This should not be used for production systems, but may be convenient for development and testing purposes. Doing so will allow your browser to reach HTTP traffic, which is considered insecure, through an HTTPS site like the Studio. + + + +A comprehensive guide is provided by Adobe [here](https:/experienceleague.adobe.com/docs/target/using/experiences/vec/troubleshoot-composer/mixed-content.html). \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/administration/harperdb-studio/index.md b/site/versioned_docs/version-4.3/administration/harperdb-studio/index.md new file mode 100644 index 00000000..8765927c --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/harperdb-studio/index.md @@ -0,0 +1,17 @@ +--- +title: HarperDB Studio +--- + +# HarperDB Studio +HarperDB Studio is the web-based GUI for HarperDB. Studio enables you to administer, navigate, and monitor all of your HarperDB instances in a simple, user-friendly interface without any knowledge of the underlying HarperDB API. It’s free to sign up, get started today! + +[Sign up for free!](https:/studio.harperdb.io/sign-up) + +HarperDB now includes a simplified local Studio that is packaged with all HarperDB installations and served directly from the instance. It can be enabled in the [configuration file](../../deployments/configuration#localstudio). This section is dedicated to the hosted Studio accessed at [studio.harperdb.io](https:/studio.harperdb.io). + +--- +## How does Studio Work? +While HarperDB Studio is web based and hosted by us, all database interactions are performed on the HarperDB instance the studio is connected to. The HarperDB Studio loads in your browser, at which point you login to your HarperDB instances. Credentials are stored in your browser cache and are not transmitted back to HarperDB. All database interactions are made via the HarperDB Operations API directly from your browser to your instance. + +## What type of instances can I manage? +HarperDB Studio enables users to manage both HarperDB Cloud instances and privately hosted instances all from a single UI. All HarperDB instances feature identical behavior whether they are hosted by us or by you. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/administration/harperdb-studio/instance-configuration.md b/site/versioned_docs/version-4.3/administration/harperdb-studio/instance-configuration.md new file mode 100644 index 00000000..ec800055 --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/harperdb-studio/instance-configuration.md @@ -0,0 +1,125 @@ +--- +title: Instance Configuration +--- + +# Instance Configuration + +HarperDB instance configuration can be viewed and managed directly through the HarperDB Studio. HarperDB Cloud instances can be resized in two different ways via this page, either by modifying machine RAM or by increasing drive storage. Enterprise instances can have their licenses modified by modifying licensed RAM. + + + +All instance configuration is handled through the **config** page of the HarperDB Studio, accessed with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click config in the instance control bar. + +*Note, the **config** page will only be available to super users and certain items are restricted to Studio organization owners.* + +## Instance Overview + +The **instance overview** panel displays the following instance specifications: + +* Instance URL + +* Applications URL + +* Instance Node Name (for clustering) + +* Instance API Auth Header (this user) + + *The Basic authentication header used for the logged in HarperDB database user* + +* Created Date (HarperDB Cloud only) + +* Region (HarperDB Cloud only) + + *The geographic region where the instance is hosted.* + +* Total Price + +* RAM + +* Storage (HarperDB Cloud only) + +* Disk IOPS (HarperDB Cloud only) + +## Update Instance RAM + +HarperDB Cloud instance size and Enterprise instance licenses can be modified with the following instructions. This option is only available to Studio organization owners. + + + +Note: For HarperDB Cloud instances, upgrading RAM may add additional CPUs to your instance as well. Click here to see how many CPUs are provisioned for each instance size. + +1) In the **update ram** panel at the bottom left: + + * Select the new instance size. + + * If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. + + * If you do have a credit card associated, you will be presented with the updated billing information. + + * Click **Upgrade**. + +2) The instance will shut down and begin reprovisioning/relicensing itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. + +3) Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. + +*Note, if HarperDB Cloud instance reprovisioning takes longer than 20 minutes, please submit a support ticket here: https:/harperdbhelp.zendesk.com/hc/en-us/requests/new.* + +## Update Instance Storage + +The HarperDB Cloud instance storage size can be increased with the following instructions. This option is only available to Studio organization owners. + +Note: Instance storage can only be upgraded once every 6 hours. + +1) In the **update storage** panel at the bottom left: + + * Select the new instance storage size. + + * If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. + + * If you do have a credit card associated, you will be presented with the updated billing information. + + * Click **Upgrade**. + +2) The instance will shut down and begin reprovisioning itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. + +3) Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. + +*Note, if this process takes longer than 20 minutes, please submit a support ticket here: https:/harperdbhelp.zendesk.com/hc/en-us/requests/new.* + +## Remove Instance + +The HarperDB instance can be deleted/removed from the Studio with the following instructions. Once this operation is started it cannot be undone. This option is only available to Studio organization owners. + +1) In the **remove instance** panel at the bottom left: + * Enter the instance name in the text box. + + * The Studio will present you with a warning. + + * Click **Remove**. + +2) The instance will begin deleting immediately. + +## Restart Instance + +The HarperDB Cloud instance can be restarted with the following instructions. + +1) In the **restart instance** panel at the bottom right: + * Enter the instance name in the text box. + + * The Studio will present you with a warning. + + * Click **Restart**. + +2) The instance will begin restarting immediately. + +## Instance Config (Read Only) + +A JSON preview of the instance config is available for reference at the bottom of the page. This is a read only visual and is not editable via the Studio. To make changes to the instance config, review the [configuration file documentation](../../deployments/configuration#using-the-configuration-file-and-naming-conventions). \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/administration/harperdb-studio/instance-metrics.md b/site/versioned_docs/version-4.3/administration/harperdb-studio/instance-metrics.md new file mode 100644 index 00000000..f084df63 --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/harperdb-studio/instance-metrics.md @@ -0,0 +1,16 @@ +--- +title: Instance Metrics +--- + +# Instance Metrics + +The HarperDB Studio display instance status and metrics on the instance status page, which can be accessed with the following instructions: + +1. Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **status** in the instance control bar. + +Once on the instance browse page you can view host system information, [HarperDB logs](../logging/standard-logging), and [HarperDB Cloud alarms](../../deployments/harperdb-cloud/alarms) (if it is a cloud instance). + +_Note, the **status** page will only be available to super users._ diff --git a/site/versioned_docs/version-4.3/administration/harperdb-studio/instances.md b/site/versioned_docs/version-4.3/administration/harperdb-studio/instances.md new file mode 100644 index 00000000..548deb5a --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/harperdb-studio/instances.md @@ -0,0 +1,131 @@ +--- +title: Instances +--- + +# Instances + +The HarperDB Studio allows you to administer all of your HarperDB instances in one place. HarperDB currently offers the following instance types: + +* **HarperDB Cloud Instance** Managed installations of HarperDB, what we call [HarperDB Cloud](../../deployments/harperdb-cloud/). +* **5G Wavelength Instance** Managed installations of HarperDB running on the Verizon network through AWS Wavelength, what we call [5G Wavelength Instances](../../deployments/harperdb-cloud/verizon-5g-wavelength-instances). _Note, these instances are only accessible via the Verizon network._ +* **Enterprise Instance** Any HarperDB installation that is managed by you. These include instances hosted within your cloud provider accounts (for example, from the AWS or Digital Ocean Marketplaces), privately hosted instances, or instances installed locally. + +All interactions between the Studio and your instances take place directly from your browser. HarperDB stores metadata about your instances, which enables the Studio to display these instances when you log in. Beyond that, all traffic is routed from your browser to the HarperDB instances using the standard [HarperDB API](../../developers/operations-api/). + +## Organization Instance List + +A summary view of all instances within an organization can be viewed by clicking on the appropriate organization from the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. Each instance gets their own card. HarperDB Cloud and Enterprise instances are listed together. + +## Create a New Instance + +1. Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization for the instance to be created under. +1. Click the **Create New HarperDB Cloud Instance + Register Enterprise Instance** card. +1. Select your desired Instance Type. +1. For a HarperDB Cloud Instance or a HarperDB 5G Wavelength Instance, click **Create HarperDB Cloud Instance**. + 1. Fill out Instance Info. + 1. Enter Instance Name + + _This will be used to build your instance URL. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com. The Instance URL will be previewed below._ + 1. Enter Instance Username + + _This is the username of the initial HarperDB instance super user._ + 1. Enter Instance Password + + _This is the password of the initial HarperDB instance super user._ + 1. Click **Instance Details** to move to the next page. + 1. Select Instance Specs + 1. Select Instance RAM + + _HarperDB Cloud Instances are billed based on Instance RAM, this will select the size of your provisioned instance._ [_More on instance specs_](../../deployments/harperdb-cloud/instance-size-hardware-specs)_._ + 1. Select Storage Size + + _Each instance has a mounted storage volume where your HarperDB data will reside. Storage is provisioned based on space and IOPS._ [_More on IOPS Impact on Performance_](../../deployments/harperdb-cloud/iops-impact)_._ + 1. Select Instance Region + + _The geographic area where your instance will be provisioned._ + 1. Click **Confirm Instance Details** to move to the next page. + 1. Review your Instance Details, if there is an error, use the back button to correct it. + 1. Review the [Privacy Policy](https:/harperdb.io/legal/privacy-policy/) and [Terms of Service](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. + 1. Click **Add Instance**. + 1. Your HarperDB Cloud instance will be provisioned in the background. Provisioning typically takes 5-15 minutes. You will receive an email notification when your instance is ready. + + +## Register Enterprise Instance + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +2) Click the appropriate organization for the instance to be created under. +3) Click the **Create New HarperDB Cloud Instance + Register Enterprise Instance** card. +4) Select **Register Enterprise Instance**. + 1. Fill out Instance Info. + 1. Enter Instance Name + + _This is used for descriptive purposes only._ + 1. Enter Instance Username + + _The username of a HarperDB super user that is already configured in your HarperDB installation._ + 1. Enter Instance Password + + _The password of a HarperDB super user that is already configured in your HarperDB installation._ + 1. Enter Host + + _The host to access the HarperDB instance. For example, `harperdb.myhost.com` or `localhost`._ + 1. Enter Port + + _The port to access the HarperDB instance. HarperDB defaults `9925` for HTTP and `31283` for HTTPS._ + 1. Select SSL + + _If your instance is running over SSL, select the SSL checkbox. If not, you will need to enable mixed content in your browser to allow the HTTPS Studio to access the HTTP instance. If there are issues connecting to the instance, the Studio will display a red error message._ + 1. Click **Instance Details** to move to the next page. + 1. Select Instance Specs + 1. Select Instance RAM + + _HarperDB instances are billed based on Instance RAM. Selecting additional RAM will enable the ability for faster and more complex queries._ + 1. Click **Confirm Instance Details** to move to the next page. + 1. Review your Instance Details, if there is an error, use the back button to correct it. + 1. Review the [Privacy Policy](https:/harperdb.io/legal/privacy-policy/) and [Terms of Service](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. + 1. Click **Add Instance**. + 1. The HarperDB Studio will register your instance and restart it for the registration to take effect. Your instance will be immediately available after this is complete. + +## Delete an Instance + +Instance deletion has two different behaviors depending on the instance type. + +* **HarperDB Cloud Instance** This instance will be permanently deleted, including all data. This process is irreversible and cannot be undone. +* **Enterprise Instance** The instance will be removed from the HarperDB Studio only. This does not uninstall HarperDB from your system and your data will remain intact. + +An instance can be deleted as follows: + +1. Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card and click the trash can icon. +1. Enter the instance name into the text box. + + _This is done for confirmation purposes to ensure you do not accidentally delete an instance._ +1. Click the **Do It** button. + +## Upgrade an Instance + +HarperDB instances can be resized on the [Instance Configuration](./instance-configuration) page. + +## Instance Log In/Log Out + +The Studio enables users to log in and out of different database users from the instance control panel. To log out of an instance: + +1. Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card and click the lock icon. +1. You will immediately be logged out of the instance. + +To log in to an instance: + +1. Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card, it will have an unlocked icon and a status reading PLEASE LOG IN, and click the center of the card. +1. Enter the database username. + + _The username of a HarperDB user that is already configured in your HarperDB instance._ +1. Enter the database password. + + _The password of a HarperDB user that is already configured in your HarperDB instance._ +1. Click **Log In**. diff --git a/site/versioned_docs/version-4.3/administration/harperdb-studio/login-password-reset.md b/site/versioned_docs/version-4.3/administration/harperdb-studio/login-password-reset.md new file mode 100644 index 00000000..dddda5c1 --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/harperdb-studio/login-password-reset.md @@ -0,0 +1,42 @@ +--- +title: Login and Password Reset +--- + +# Login and Password Reset + +## Log In to Your HarperDB Studio Account + +To log into your existing HarperDB Studio account: + +1) Navigate to the [HarperDB Studio](https:/studio.harperdb.io/). +2) Enter your email address. +3) Enter your password. +4) Click **sign in**. + +## Reset a Forgotten Password + +To reset a forgotten password: + +1) Navigate to the HarperDB Studio password reset page. +2) Enter your email address. +3) Click **send password reset email**. +4) If the account exists, you will receive an email with a temporary password. +5) Navigate back to the HarperDB Studio login page. +6) Enter your email address. +7) Enter your temporary password. +8) Click **sign in**. +9) You will be taken to a new screen to reset your account password. Enter your new password. +*Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character.* +10) Click the **add account password** button. + +## Change Your Password + +If you are already logged into the Studio, you can change your password though the user interface. + +1) Navigate to the HarperDB Studio profile page. +2) In the **password** section, enter: + + * Current password. + * New password. + * New password again *(for verification)*. +4) Click the **Update Password** button. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-applications.md b/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-applications.md new file mode 100644 index 00000000..57126c96 --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-applications.md @@ -0,0 +1,61 @@ +--- +title: Manage Applications +--- + +# Manage Applications + +[HarperDB Applications](../../developers/applications/) are enabled by default and can be configured further through the HarperDB Studio. It is recommended to read through the [Applications](../../developers/applications/) documentation first to gain a strong understanding of HarperDB Applications behavior. + +All Applications configuration and development is handled through the **applications** page of the HarperDB Studio, accessed with the following instructions: + +1) Navigate to the HarperDB Studio Organizations page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **applications** in the instance control bar. + +*Note, the **applications** page will only be available to super users.* + +## Manage Applications + +The Applications editor is not required for development and deployment, though it is a useful tool to maintain and manage your HarperDB Applications. The editor provides the ability to create new applications or import/deploy remote application packages. + +The left bar is the applications file navigator, allowing you to select files to edit and add/remove files and folders. By default, this view is empty because there are no existing applications. To get started, either create a new application or import/deploy a remote application. + +The right side of the screen is the file editor. Here you can make edit individual files of your application directly in the HarperDB Studio. + +## Things to Keep in Mind +To learn more about developing HarperDB Applications, make sure to read through the [Applications](../../developers/applications/) documentation. + +When working with Applications in the HarperDB Studio, by default the editor will restart the HarperDB Applications server every time a file is saved. Note, this behavior can be turned off by toggling the `auto` toggle at the top right of the applications page. If you are constantly editing your application, it may result in errors causing the application not to run. These errors will not be visible on the application page, however they will be available in the HarperDB logs, which can be found on the [status page](./instance-metrics). + +The Applications editor stores unsaved changes in cache. This means that occasionally your editor will show a discrepancy from the code that is stored and running on your HarperDB instance. You can identify if the code in your Studio differs if the "save" and "revert" buttons are active. To revert the cached version in your editor to the version of the file stored on your HarperDB instance click the "revert" button. + +## Accessing Your Application Endpoints +Accessing your application endpoints varies with which type of endpoint you're creating. All endpoints, regardless of type, will be accessed via the [HarperDB HTTP port found in the HarperDB configuration file](../../deployments/configuration#http). The default port is `9926`, but you can verify what your instances is set to by navigating to the [instance config page](./instance-configuration) and examining the read only JSON version of your instance's config file looking specifically for either the `http: port: 9926` or `http: securePort: 9926` configs. If `port` is set, you will access your endpoints via `http` and if `securePort` is set, you will access your endpoints via `https`. + +Below is a breakdown of how to access each type of endpoint. In these examples, we will use a locally hosted instance with `securePort` set to `9926`: `https:/localhost:9926`. + +- **Standard REST Endpoints**\ +Standard REST endpoints are defined via the `@export` directive to tables in your schema definition. You can read more about these in the [Adding an Endpoint section of the Applications documentation](../../developers/applications/#adding-an-endpoint). Here, if we are looking to access a record with ID `1` from table `Dog` on our instance, [per the REST documentation](../../developers/rest), we could send a `GET` (or since this is a GET, we could post the URL in our browser) to `https:/localhost:9926/Dog/1`. +- **Augmented REST Endpoints**\ +HarperDB Applications enable you to write [Custom Functionality with JavaScript](../../developers/applications/#custom-functionality-with-javascript) for your resources. Accessing these endpoints is identical to accessing the standard REST endpoints above, though you may have defined custom behavior in each function. Taking the example from the [Applications documentation](../../developers/applications/#custom-functionality-with-javascript), if we are looking to access the `DogWithHumanAge` example, we could send the GET to `https:/localhost:9926/DogWithHumanAge/1`. +- **Fastify Routes**\ +If you need more functionality than the REST applications can provide, you can define your own custom endpoints using [Fastify Routes](../../developers/applications/#define-fastify-routes). The paths to these routes are defined via the application `config.yaml` file. You can read more about how you can customize the configuration options in the [Define Fastify Routes documentation](../../developers/applications/define-routes). By default, routes are accessed via the following pattern: `[Instance URL]:[HTTP Port]/[Project Name]/[Route URL]`. Using the example from the [HarperDB Application Template](https:/github.com/HarperDB/application-template/blob/main/routes/index.js), where we've named our project `application-template`, we would access the `getAll` route at `https:/localhost/application-template/getAll`. + + +## Creating a New Application + +1) From the application page, click the "+ app" button at the top right. +2) Click "+ Create A New Application Using The Default Template". +3) Enter a name for your project, note project names must contain only alphanumeric characters, dashes and underscores. +4) Click OK. +5) Your project will be available in the applications file navigator on the left. Click a file to select a file to edit. + +## Editing an Application + +1) From the applications page, click the file you would like to edit from the file navigator on the left. +2) Edit the file with any changes you'd like. +3) Click "save" at the top right. Note, as mentioned above, when you save a file, the HarperDB Applications server will be restarted immediately. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-charts.md b/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-charts.md new file mode 100644 index 00000000..cb73ae99 --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-charts.md @@ -0,0 +1,65 @@ +--- +title: Manage Charts +--- + +# Manage Charts + +The HarperDB Studio includes a charting feature within an instance. They are generated in real time based on your existing data and automatically refreshed every 15 seconds. Instance charts can be accessed with the following instructions: + +1. Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **charts** in the instance control bar. + +## Creating a New Chart + +Charts are generated based on SQL queries, therefore to build a new chart you first need to build a query. Instructions as follows (starting on the charts page described above): + +1. Click **query** in the instance control bar. +1. Enter the SQL query you would like to generate a chart from. + + _For example, using the dog demo data from the API Docs, we can get the average dog age per owner with the following query: `SELECT AVG(age) as avg_age, owner_name FROM dev.dog GROUP BY owner_name`._ +1. Click **Execute**. +1. Click **create chart** at the top right of the results table. +1. Configure your chart. + 1. Choose chart type. + + _HarperDB Studio offers many standard charting options like line, bar, etc._ + 1. Choose a data column. + + _This column will be used to plot the data point. Typically, this is the values being calculated in the `SELECT` statement. Depending on the chart type, you can select multiple data columns to display on a single chart._ + 1. Depending on the chart type, you will need to select a grouping. + + _This could be labeled as x-axis, label, etc. This will be used to group the data, typically this is what you used in your **GROUP BY** clause._ + 1. Enter a chart name. + + _Used for identification purposes and will be displayed at the top of the chart._ + 1. Choose visible to all org users toggle. + + _Leaving this option off will limit chart visibility to just your HarperDB Studio user. Toggling it on will enable all users with this Organization to view this chart._ + 1. Click **Add Chart**. + 1. The chart will now be visible on the **charts** page. + +The example query above, configured as a bar chart, results in the following chart: + +![Average Age per Owner Example](/img/v4.3/ave-age-per-owner-ex.png) + +## Downloading Charts + +HarperDB Studio charts can be downloaded in SVG, PNG, and CSV format. Instructions as follows (starting on the charts page described above): + +1. Identify the chart you would like to export. +1. Click the three bars icon. +1. Select the appropriate download option. +1. The Studio will generate the export and begin downloading immediately. + +## Delete a Chart + +Delete a chart as follows (starting on the charts page described above): + +1. Identify the chart you would like to delete. +1. Click the X icon. +1. Click the **confirm delete chart** button. +1. The chart will be deleted. + +Deleting a chart that is visible to all Organization users will delete it for all users. diff --git a/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-databases-browse-data.md b/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-databases-browse-data.md new file mode 100644 index 00000000..da302f70 --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-databases-browse-data.md @@ -0,0 +1,132 @@ +--- +title: Manage Databases / Browse Data +--- + +# Manage Databases / Browse Data + +Manage instance databases/tables and browse data in tabular format with the following instructions: + +1) Navigate to the HarperDB Studio Organizations page. +2) Click the appropriate organization that the instance belongs to. +3) Select your desired instance. +4) Click **browse** in the instance control bar. + +Once on the instance browse page you can view data, manage databases and tables, add new data, and more. + +## Manage Databases and Tables + +#### Create a Database + +1) Click the plus icon at the top right of the databases section. +2) Enter the database name. +3) Click the green check mark. + + +#### Delete a Database + +Deleting a database is permanent and irreversible. Deleting a database removes all tables and data within it. + +1) Click the minus icon at the top right of the databases section. +2) Identify the appropriate database to delete and click the red minus sign in the same row. +3) Click the red check mark to confirm deletion. + + +#### Create a Table + +1) Select the desired database from the databases section. +2) Click the plus icon at the top right of the tables section. +3) Enter the table name. +4) Enter the primary key. + + *The primary key is also often referred to as the hash attribute in the studio, and it defines the unique identifier for each row in your table.* +5) Click the green check mark. + + +#### Delete a Table +Deleting a table is permanent and irreversible. Deleting a table removes all data within it. + +1) Select the desired database from the databases section. +2) Click the minus icon at the top right of the tables section. +3) Identify the appropriate table to delete and click the red minus sign in the same row. +4) Click the red check mark to confirm deletion. + +## Manage Table Data + +The following section assumes you have selected the appropriate table from the database/table browser. + + + +#### Filter Table Data + +1) Click the magnifying glass icon at the top right of the table browser. +2) This expands the search filters. +3) The results will be filtered appropriately. + + +#### Load CSV Data + +1) Click the data icon at the top right of the table browser. You will be directed to the CSV upload page where you can choose to import a CSV by URL or upload a CSV file. +2) To import a CSV by URL: + 1) Enter the URL in the **CSV file URL** textbox. + 2) Click **Import From URL**. + 3) The CSV will load, and you will be redirected back to browse table data. +3) To upload a CSV file: + 1) Click **Click or Drag to select a .csv file** (or drag your CSV file from your file browser). + 2) Navigate to your desired CSV file and select it. + 3) Click **Insert X Records**, where X is the number of records in your CSV. + 4) The CSV will load, and you will be redirected back to browse table data. + + +#### Add a Record + +1) Click the plus icon at the top right of the table browser. +2) The Studio will pre-populate existing table attributes in JSON format. + + *The primary key is not included, but you can add it in and set it to your desired value. Auto-maintained fields are not included and cannot be manually set. You may enter a JSON array to insert multiple records in a single transaction.* +3) Enter values to be added to the record. + + *You may add new attributes to the JSON; they will be reflexively added to the table.* +4) Click the **Add New** button. + + +#### Edit a Record + +1) Click the record/row you would like to edit. +2) Modify the desired values. + + *You may add new attributes to the JSON; they will be reflexively added to the table.* + +3) Click the **save icon**. + + +#### Delete a Record + +Deleting a record is permanent and irreversible. If transaction logging is turned on, the delete transaction will be recorded as well as the data that was deleted. + +1) Click the record/row you would like to delete. +2) Click the **delete icon**. +3) Confirm deletion by clicking the **check icon**. + +## Browse Table Data + +The following section assumes you have selected the appropriate table from the database/table browser. + +#### Browse Table Data + +The first page of table data is automatically loaded on table selection. Paging controls are at the bottom of the table. Here you can: + +* Page left and right using the arrows. +* Type in the desired page. +* Change the page size (the amount of records displayed in the table). + + +#### Refresh Table Data + +Click the refresh icon at the top right of the table browser. + + + +#### Automatically Refresh Table Data + +Toggle the auto switch at the top right of the table browser. The table data will now automatically refresh every 15 seconds. Filters and pages will remain set for refreshed data. + diff --git a/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-instance-roles.md b/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-instance-roles.md new file mode 100644 index 00000000..f0aa72bb --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-instance-roles.md @@ -0,0 +1,76 @@ +--- +title: Manage Instance Roles +--- + +# Manage Instance Roles + +HarperDB users and roles can be managed directly through the HarperDB Studio. It is recommended to read through the [users & roles documentation](../../developers/security/users-and-roles) to gain a strong understanding of how they operate. + +Instance role configuration is handled through the **roles** page of the HarperDB Studio, accessed with the following instructions: + +1) Navigate to the HarperDB Studio Organizations page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **roles** in the instance control bar. + +*Note, the **roles** page will only be available to super users.* + + + +The *roles management* screen consists of the following panels: + +* **super users** + + Displays all super user roles for this instance. +* **cluster users** + + Displays all cluster user roles for this instance. +* **standard roles** + + Displays all standard roles for this instance. +* **role permission editing** + + Once a role is selected for editing, permissions will be displayed here in JSON format. + +*Note, when new tables are added that are not configured, the Studio will generate configuration values with permissions defaulting to `false`.* + +## Role Management + +#### Create a Role + +1) Click the plus icon at the top right of the appropriate role section. + +2) Enter the role name. + +3) Click the green check mark. + +4) Optionally toggle the **manage databases/tables** switch to specify the `structure_user` config. + +5) Configure the role permissions in the role permission editing panel. + + *Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel.* + +6) Click **Update Role Permissions**. + +#### Modify a Role + +1) Click the appropriate role from the appropriate role section. + +2) Modify the role permissions in the role permission editing panel. + + *Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel.* + +3) Click **Update Role Permissions**. + +#### Delete a Role + +Deleting a role is permanent and irreversible. A role cannot be remove if users are associated with it. + +1) Click the minus icon at the top right of the roles section. + +2) Identify the appropriate role to delete and click the red minus sign in the same row. + +3) Click the red check mark to confirm deletion. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-instance-users.md b/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-instance-users.md new file mode 100644 index 00000000..02a0a32d --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-instance-users.md @@ -0,0 +1,61 @@ +--- +title: Manage Instance Users +--- + +# Manage Instance Users + +HarperDB users and roles can be managed directly through the HarperDB Studio. It is recommended to read through the [users & roles documentation](../../developers/security/users-and-roles) to gain a strong understanding of how they operate. + +Instance user configuration is handled through the **users** page of the HarperDB Studio, accessed with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **users** in the instance control bar. + +*Note, the **users** page will only be available to super users.* + +## Add a User + +HarperDB instance users can be added with the following instructions. + +1) In the **add user** panel on the left enter: + + * New user username. + + * New user password. + + * Select a role. + + *Learn more about role management here: [Manage Instance Roles](./manage-instance-roles).* + +2) Click **Add User**. + +## Edit a User + +HarperDB instance users can be modified with the following instructions. + +1) In the **existing users** panel, click the row of the user you would like to edit. + +2) To change a user’s password: + + 1) In the **Change user password** section, enter the new password. + + 2) Click **Update Password**. + +3) To change a user’s role: + + 1) In the **Change user role** section, select the new role. + + 2) Click **Update Role**. + +4) To delete a user: + + 1) In the **Delete User** section, type the username into the textbox. + + *This is done for confirmation purposes.* + + 2) Click **Delete User**. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-replication.md b/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-replication.md new file mode 100644 index 00000000..3ee158bd --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/harperdb-studio/manage-replication.md @@ -0,0 +1,89 @@ +--- +title: Manage Replication +--- + +# Manage Replication + +HarperDB instance clustering and replication can be configured directly through the HarperDB Studio. It is recommended to read through the [clustering documentation](../../developers/clustering/) first to gain a strong understanding of HarperDB clustering behavior. + + + +All clustering configuration is handled through the **replication** page of the HarperDB Studio, accessed with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **replication** in the instance control bar. + +Note, the **replication** page will only be available to super users. + +--- +## Initial Configuration + +HarperDB instances do not have clustering configured by default. The HarperDB Studio will walk you through the initial configuration. Upon entering the **replication** screen for the first time you will need to complete the following configuration. Configurations are set in the **enable clustering** panel on the left while actions are described in the middle of the screen. It is worth reviewing the [Creating a Cluster User](../../developers/clustering/creating-a-cluster-user) document before proceeding. + +1) Enter Cluster User username. (Defaults to `cluster_user`). +2) Enter Cluster Password. +3) Review and/or Set Cluster Node Name. +4) Click **Enable Clustering**. + +At this point the Studio will restart your HarperDB Instance, required for the configuration changes to take effect. + +--- + +## Manage Clustering +Once initial clustering configuration is completed you a presented with a clustering management screen with the following properties: + +* **connected instances** + + Displays all instances within the Studio Organization that this instance manages a connection with. + +* **unconnected instances** + + Displays all instances within the Studio Organization that this instance does not manage a connection with. + +* **unregistered instances** + + Displays all instances outside the Studio Organization that this instance manages a connection with. + +* **manage clustering** + + Once instances are connected, this will display clustering management options for all connected instances and all databases and tables. +--- + +## Connect an Instance + +HarperDB Instances can be clustered together with the following instructions. + +1) Ensure clustering has been configured on both instances and a cluster user with identical credentials exists on both. + +2) Identify the instance you would like to connect from the **unconnected instances** panel. + +3) Click the plus icon next the appropriate instance. + +4) If configurations are correct, all databases will sync across the cluster, then appear in the **manage clustering** panel. If there is a configuration issue, a red exclamation icon will appear, click it to learn more about what could be causing the issue. + +--- + +## Disconnect an Instance + +HarperDB Instances can be disconnected with the following instructions. + +1) Identify the instance you would like to disconnect from the **connected instances** panel. + +2) Click the minus icon next the appropriate instance. + +--- + +## Manage Replication + +Subscriptions must be configured in order to move data between connected instances. Read more about subscriptions here: Creating A Subscription. The **manage clustering** panel displays a table with each row representing an channel per instance. Cells are bolded to indicate a change in the column. Publish and subscribe replication can be configured per table with the following instructions: + +1) Identify the instance, database, and table for replication to be configured. + +2) For publish, click the toggle switch in the **publish** column. + +3) For subscribe, click the toggle switch in the **subscribe** column. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/administration/harperdb-studio/organizations.md b/site/versioned_docs/version-4.3/administration/harperdb-studio/organizations.md new file mode 100644 index 00000000..888469d7 --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/harperdb-studio/organizations.md @@ -0,0 +1,105 @@ +--- +title: Organizations +--- + +# Organizations +HarperDB Studio organizations provide the ability to group HarperDB Cloud Instances. Organization behavior is as follows: + +* Billing occurs at the organization level to a single credit card. +* Organizations retain their own unique HarperDB Cloud subdomain. +* Cloud instances reside within an organization. +* Studio users can be invited to organizations to share instances. + + +An organization is automatically created for you when you sign up for HarperDB Studio. If you only have one organization, the Studio will automatically bring you to your organization’s page. + +--- + +## List Organizations +A summary view of all organizations your user belongs to can be viewed on the [HarperDB Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. You can navigate to this page at any time by clicking the **all organizations** link at the top of the HarperDB Studio. + +## Create a New Organization +A new organization can be created as follows: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the **Create a New Organization** card. +3) Fill out new organization details + * Enter Organization Name + *This is used for descriptive purposes only.* + * Enter Organization Subdomain + *Part of the URL that will be used to identify your HarperDB Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com.* +4) Click Create Organization. + +## Delete an Organization +An organization cannot be deleted until all instances have been removed. An organization can be deleted as follows: + +1) Navigate to the HarperDB Studio Organizations page. +2) Identify the proper organization card and click the trash can icon. +3) Enter the organization name into the text box. + + *This is done for confirmation purposes to ensure you do not accidentally delete an organization.* +4) Click the **Do It** button. + +## Manage Users +HarperDB Studio organization owners can manage users including inviting new users, removing users, and toggling ownership. + + + +#### Inviting a User +A new user can be invited to an organization as follows: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the appropriate organization card. +3) Click **users** at the top of the screen. +4) In the **add user** box, enter the new user’s email address. +5) Click **Add User**. + +Users may or may not already be HarperDB Studio users when adding them to an organization. If the HarperDB Studio account already exists, the user will receive an email notification alerting them to the organization invitation. If the user does not have a HarperDB Studio account, they will receive an email welcoming them to HarperDB Studio. + +--- + +#### Toggle a User’s Organization Owner Status +Organization owners have full access to the organization including the ability to manage organization users, create, modify, and delete instances, and delete the organization. Users must have accepted their invitation prior to being promoted to an owner. A user’s organization owner status can be toggled owner as follows: + +1) Navigate to the HarperDB Studio Organizations page. +2) Click the appropriate organization card. +3) Click **users** at the top of the screen. +4) Click the appropriate user from the **existing users** section. +5) Toggle the **Is Owner** switch to the desired status. +--- + +#### Remove a User from an Organization +Users may be removed from an organization at any time. Removing a user from an organization will not delete their HarperDB Studio account, it will only remove their access to the specified organization. A user can be removed from an organization as follows: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the appropriate organization card. +3) Click **users** at the top of the screen. +4) Click the appropriate user from the **existing users** section. +5) Type **DELETE** in the text box in the **Delete User** row. + + *This is done for confirmation purposes to ensure you do not accidentally delete a user.* +6) Click **Delete User**. + +## Manage Billing + +Billing is configured per organization and will be billed to the stored credit card at appropriate intervals (monthly or annually depending on the registered instance). Billing settings can be configured as follows: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the appropriate organization card. +3) Click **billing** at the top of the screen. + +Here organization owners can view invoices, manage coupons, and manage the associated credit card. + + + +*HarperDB billing and payments are managed via Stripe.* + + + +### Add a Coupon + +Coupons are applicable towards any paid tier or enterprise instance and you can change your subscription at any time. Coupons can be added to your Organization as follows: + +1) In the coupons panel of the **billing** page, enter your coupon code. +2) Click **Add Coupon**. +3) The coupon will then be available and displayed in the coupons panel. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/administration/harperdb-studio/query-instance-data.md b/site/versioned_docs/version-4.3/administration/harperdb-studio/query-instance-data.md new file mode 100644 index 00000000..5c3ae28f --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/harperdb-studio/query-instance-data.md @@ -0,0 +1,53 @@ +--- +title: Query Instance Data +--- + +# Query Instance Data + +SQL queries can be executed directly through the HarperDB Studio with the following instructions: + +1) Navigate to the [HarperDB Studio Organizations](https:/studio.harperdb.io/organizations) page. +2) Click the appropriate organization that the instance belongs to. +3) Select your desired instance. +4) Click **query** in the instance control bar. +5) Enter your SQL query in the SQL query window. +6) Click **Execute**. + +*Please note, the Studio will execute the query exactly as entered. For example, if you attempt to `SELECT *` from a table with millions of rows, you will most likely crash your browser.* + +## Browse Query Results Set + +#### Browse Results Set Data + +The first page of results set data is automatically loaded on query execution. Paging controls are at the bottom of the table. Here you can: + +* Page left and right using the arrows. +* Type in the desired page. +* Change the page size (the amount of records displayed in the table). + +#### Refresh Results Set + +Click the refresh icon at the top right of the results set table. + +#### Automatically Refresh Results Set + +Toggle the auto switch at the top right of the results set table. The results set will now automatically refresh every 15 seconds. Filters and pages will remain set for refreshed data. + +## Query History + +Query history is stored in your local browser cache. Executed queries are listed with the most recent at the top in the **query history** section. + + +#### Rerun Previous Query + +* Identify the query from the **query history** list. +* Click the appropriate query. It will be loaded into the **sql query** input box. +* Click **Execute**. + +#### Clear Query History + +Click the trash can icon at the top right of the **query history** section. + +## Create Charts + +The HarperDB Studio includes a charting feature where you can build charts based on your specified queries. Visit the Charts documentation for more information. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/administration/jobs.md b/site/versioned_docs/version-4.3/administration/jobs.md new file mode 100644 index 00000000..44b755fe --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/jobs.md @@ -0,0 +1,112 @@ +--- +title: Jobs +--- + +# Jobs + +HarperDB Jobs are asynchronous tasks performed by the Operations API. + +## Job Summary + +Jobs uses an asynchronous methodology to account for the potential of a long-running operation. For example, exporting millions of records to S3 could take some time, so that job is started and the id is provided to check on the status. + +The job status can be **COMPLETE** or **IN\_PROGRESS**. + +## Example Job Operations + +Example job operations include: + +[csv data load](../developers/operations-api/bulk-operations#csv-data-load) + +[csv file load](../developers/operations-api/bulk-operations#csv-file-load) + +[csv url load](../developers/operations-api/bulk-operations#csv-url-load) + +[import from s3](../developers/operations-api/bulk-operations#import-from-s3) + +[delete_records_before](../developers/operations-api/utilities#delete-records-before) + +[export_local](../developers/operations-api/utilities#export-local) + +[export_to_s3](../developers/operations-api/utilities#export-to-s3) + +Example Response from a Job Operation + +``` +{ + "message": "Starting job with id 062a1892-6a0a-4282-9791-0f4c93b12e16" +} +``` + +Whenever one of these operations is initiated, an asynchronous job is created and the request contains the ID of that job which can be used to check on its status. + +## Managing Jobs + +To check on a job's status, use the [get_job](../developers/operations-api/jobs#get-job) operation. + +Get Job Request + +``` +{ + "operation": "get_job", + "id": "4a982782-929a-4507-8794-26dae1132def" +} +``` + +Get Job Response + +``` +[ + { + "__createdtime__": 1611615798782, + "__updatedtime__": 1611615801207, + "created_datetime": 1611615798774, + "end_datetime": 1611615801206, + "id": "4a982782-929a-4507-8794-26dae1132def", + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "start_datetime": 1611615798805, + "status": "COMPLETE", + "type": "csv_url_load", + "user": "HDB_ADMIN", + "start_datetime_converted": "2021-01-25T23:03:18.805Z", + "end_datetime_converted": "2021-01-25T23:03:21.206Z" + } +] +``` + +## Finding Jobs + +To find jobs (if the ID is not known) use the [search_jobs_by_start_date](../developers/operations-api/jobs#search-jobs-by-start-date) operation. + +Search Jobs Request + +``` +{ + "operation": "search_jobs_by_start_date", + "from_date": "2021-01-25T22:05:27.464+0000", + "to_date": "2021-01-25T23:05:27.464+0000" +} +``` + +Search Jobs Response + +``` +[ + { + "id": "942dd5cb-2368-48a5-8a10-8770ff7eb1f1", + "user": "HDB_ADMIN", + "type": "csv_url_load", + "status": "COMPLETE", + "start_datetime": 1611613284781, + "end_datetime": 1611613287204, + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "created_datetime": 1611613284764, + "__createdtime__": 1611613284767, + "__updatedtime__": 1611613287207, + "start_datetime_converted": "2021-01-25T22:21:24.781Z", + "end_datetime_converted": "2021-01-25T22:21:27.204Z" + } +] +``` diff --git a/site/versioned_docs/version-4.3/administration/logging/audit-logging.md b/site/versioned_docs/version-4.3/administration/logging/audit-logging.md new file mode 100644 index 00000000..11d552ec --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/logging/audit-logging.md @@ -0,0 +1,135 @@ +--- +title: Audit Logging +--- + +# Audit Logging + +### Audit log + +The audit log uses a standard HarperDB table to track transactions. For each table a user creates, a corresponding table will be created to track transactions against that table. + +Audit log is enabled by default. To disable the audit log, set `logging.auditLog` to false in the config file, `harperdb-config.yaml`. Then restart HarperDB for those changes to take place. Note, the audit is required to be enabled for real-time messaging. + +### Audit Log Operations + +#### read\_audit\_log + +The `read_audit_log` operation is flexible, enabling users to query with many parameters. All operations search on a single table. Filter options include timestamps, usernames, and table hash values. Additional examples found in the [HarperDB API documentation](../../developers/operations-api/logs). + +**Search by Timestamp** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "timestamp", + "search_values": [ + 1660585740558 + ] +} +``` + +There are three outcomes using timestamp. + +* `"search_values": []` - All records returned for specified table +* `"search_values": [1660585740558]` - All records after provided timestamp +* `"search_values": [1660585740558, 1760585759710]` - Records "from" and "to" provided timestamp + +*** + +**Search by Username** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "username", + "search_values": [ + "admin" + ] +} +``` + +The above example will return all records whose `username` is "admin." + +*** + +**Search by Primary Key** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "hash_value", + "search_values": [ + 318 + ] +} +``` + +The above example will return all records whose primary key (`hash_value`) is 318. + +*** + +#### read\_audit\_log Response + +The example that follows provides records of operations performed on a table. One thing of note is that the `read_audit_log` operation gives you the `original_records`. + +```json +{ + "operation": "update", + "user_name": "HDB_ADMIN", + "timestamp": 1607035559122.277, + "hash_values": [ + 1, + 2 + ], + "records": [ + { + "id": 1, + "breed": "Muttzilla", + "age": 6, + "__updatedtime__": 1607035559122 + }, + { + "id": 2, + "age": 7, + "__updatedtime__": 1607035559121 + } + ], + "original_records": [ + { + "__createdtime__": 1607035556801, + "__updatedtime__": 1607035556801, + "age": 5, + "breed": "Mutt", + "id": 2, + "name": "Penny" + }, + { + "__createdtime__": 1607035556801, + "__updatedtime__": 1607035556801, + "age": 5, + "breed": "Mutt", + "id": 1, + "name": "Harper" + } + ] +} +``` + +#### delete\_audit\_logs\_before + +Just like with transaction logs, you can clean up your audit logs with the `delete_audit_logs_before` operation. It will delete audit log data according to the given parameters. The example below will delete records older than the timestamp provided. + +```json +{ + "operation": "delete_audit_logs_before", + "schema": "dev", + "table": "cat", + "timestamp": 1598290282817 +} +``` diff --git a/site/versioned_docs/version-4.3/administration/logging/index.md b/site/versioned_docs/version-4.3/administration/logging/index.md new file mode 100644 index 00000000..2ed92774 --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/logging/index.md @@ -0,0 +1,11 @@ +--- +title: Logging +--- + +# Logging + +HarperDB provides many different logging options for various features and functionality. + +* [Standard Logging](./standard-logging): HarperDB maintains a log of events that take place throughout operation. +* [Audit Logging](./audit-logging): HarperDB uses a standard HarperDB table to track transactions. For each table a user creates, a corresponding table will be created to track transactions against that table. +* [Transaction Logging](./transaction-logging): HarperDB stores a verbose history of all transactions logged for specified database tables, including original data records. diff --git a/site/versioned_docs/version-4.3/administration/logging/standard-logging.md b/site/versioned_docs/version-4.3/administration/logging/standard-logging.md new file mode 100644 index 00000000..d586da1c --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/logging/standard-logging.md @@ -0,0 +1,65 @@ +--- +title: Standard Logging +--- + +# Standard Logging + +HarperDB maintains a log of events that take place throughout operation. Log messages can be used for diagnostics purposes as well as monitoring. + +All logs (except for the install log) are stored in the main log file in the hdb directory `/log/hdb.log`. The install log is located in the HarperDB application directory most likely located in your npm directory `npm/harperdb/logs`. + +Each log message has several key components for consistent reporting of events. A log message has a format of: + +``` + [] [] ...[]: +``` + +For example, a typical log entry looks like: + +``` +2023-03-09T14:25:05.269Z [notify] [main/0]: HarperDB successfully started. +``` + +The components of a log entry are: + +* timestamp - This is the date/time stamp when the event occurred +* level - This is an associated log level that gives a rough guide to the importance and urgency of the message. The available log levels in order of least urgent (and more verbose) are: `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. +* thread/ID - This reports the name of the thread and the thread ID that the event was reported on. Note that NATS logs are recorded by their process name and there is no thread id for them since they are a separate process. Key threads are: + * main - This is the thread that is responsible for managing all other threads and routes incoming requests to the other threads + * http - These are the worker threads that handle the primary workload of incoming HTTP requests to the operations API and custom functions. + * Clustering\* - These are threads and processes that handle replication. + * job - These are job threads that have been started to handle operations that are executed in a separate job thread. +* tags - Logging from a custom function will include a "custom-function" tag in the log entry. Most logs will not have any additional tags. +* message - This is the main message that was reported. + +We try to keep logging to a minimum by default, to do this the default log level is `error`. If you require more information from the logs, increasing the log level down will provide that. + +The log level can be changed by modifying `logging.level` in the config file `harperdb-config.yaml`. + +## Clustering Logging + +HarperDB clustering utilizes two [Nats](https:/nats.io/) servers, named Hub and Leaf. The Hub server is responsible for establishing the mesh network that connects instances of HarperDB and the Leaf server is responsible for managing the message stores (streams) that replicate and store messages between instances. Due to the verbosity of these servers there is a separate log level configuration for them. To adjust their log verbosity, set `clustering.logLevel` in the config file `harperdb-config.yaml`. Valid log levels from least verbose are `error`, `warn`, `info`, `debug` and `trace`. + +## Log File vs Standard Streams + +HarperDB logs can optionally be streamed to standard streams. Logging to standard streams (stdout/stderr) is primarily used for container logging drivers. For more traditional installations, we recommend logging to a file. Logging to both standard streams and to a file can be enabled simultaneously. To log to standard streams effectively, make sure to directly run `harperdb` and don't start it as a separate process (don't use `harperdb start`) and `logging.stdStreams` must be set to true. Note, logging to standard streams only will disable clustering catchup. + +## Logging Rotation + +Log rotation allows for managing log files, such as compressing rotated log files, archiving old log files, determining when to rotate, and the like. This will allow for organized storage and efficient use of disk space. For more information see “logging” in our [config docs](../../deployments/configuration). + +## Read Logs via the API + +To access specific logs you may query the HarperDB API. Logs can be queried using the `read_log` operation. `read_log` returns outputs from the log based on the provided search criteria. + +```json +{ + "operation": "read_log", + "start": 0, + "limit": 1000, + "level": "error", + "from": "2021-01-25T22:05:27.464+0000", + "until": "2021-01-25T23:05:27.464+0000", + "order": "desc" +} +``` diff --git a/site/versioned_docs/version-4.3/administration/logging/transaction-logging.md b/site/versioned_docs/version-4.3/administration/logging/transaction-logging.md new file mode 100644 index 00000000..a65c4714 --- /dev/null +++ b/site/versioned_docs/version-4.3/administration/logging/transaction-logging.md @@ -0,0 +1,87 @@ +--- +title: Transaction Logging +--- + +# Transaction Logging + +HarperDB offers two options for logging transactions executed against a table. The options are similar but utilize different storage layers. + +## Transaction log + +The first option is `read_transaction_log`. The transaction log is built upon clustering streams. Clustering streams are per-table message stores that enable data to be propagated across a cluster. HarperDB leverages streams for use with the transaction log. When clustering is enabled all transactions that occur against a table are pushed to its stream, and thus make up the transaction log. + +If you would like to use the transaction log, but have not set up clustering yet, please see ["How to Cluster"](../../developers/clustering/). + +## Transaction Log Operations + +### read\_transaction\_log + +The `read_transaction_log` operation returns a prescribed set of records, based on given parameters. The example below will give a maximum of 2 records within the timestamps provided. + +```json +{ + "operation": "read_transaction_log", + "schema": "dev", + "table": "dog", + "from": 1598290235769, + "to": 1660249020865, + "limit": 2 +} +``` + +_See example response below._ + +### read\_transaction\_log Response + +```json +[ + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619736, + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38, + "__updatedtime__": 1660165619688, + "__createdtime__": 1660165619688 + } + ] + }, + { + "operation": "update", + "user": "admin", + "timestamp": 1660165620040, + "records": [ + { + "id": 1, + "dog_name": "Penny B", + "__updatedtime__": 1660165620036 + } + ] + } +] +``` + +_See example request above._ + +### delete\_transaction\_logs\_before + +The `delete_transaction_logs_before` operation will delete transaction log data according to the given parameters. The example below will delete records older than the timestamp provided. + +```json +{ + "operation": "delete_transaction_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1598290282817 +} +``` + +_Note: Streams are used for catchup if a node goes down. If you delete messages from a stream there is a chance catchup won't work._ + +Read on for `read_audit_log`, the second option, for logging transactions executed against a table. diff --git a/site/versioned_docs/version-4.3/deployments/_category_.json b/site/versioned_docs/version-4.3/deployments/_category_.json new file mode 100644 index 00000000..8fdd6e17 --- /dev/null +++ b/site/versioned_docs/version-4.3/deployments/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Deployments", + "position": 3, + "link": { + "type": "generated-index", + "title": "Deployments Documentation", + "description": "Installation and deployment guides for HarperDB", + "keywords": [ + "deployments" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/deployments/configuration.md b/site/versioned_docs/version-4.3/deployments/configuration.md new file mode 100644 index 00000000..23c92368 --- /dev/null +++ b/site/versioned_docs/version-4.3/deployments/configuration.md @@ -0,0 +1,970 @@ +--- +title: Configuration File +--- + +# Configuration File + +HarperDB is configured through a [YAML](https:/yaml.org/) file called `harperdb-config.yaml` located in the HarperDB root directory (by default this is a directory named `hdb` located in the home directory of the current user). + +Some configuration will be populated by default in the config file on install, regardless of whether it is used. + +*** + +## Using the Configuration File and Naming Conventions + +The configuration elements in `harperdb-config.yaml` use camelcase: `operationsApi`. + +To change a configuration value edit the `harperdb-config.yaml` file and save any changes. HarperDB must be restarted for changes to take effect. + +Alternately, configuration can be changed via environment and/or command line variables or via the API. To access lower level elements, use underscores to append parent/child elements (when used this way elements are case insensitive): + +``` +- Environment variables: `OPERATIONSAPI_NETWORK_PORT=9925` +- Command line variables: `--OPERATIONSAPI_NETWORK_PORT 9925` +- Calling `set_configuration` through the API: `operationsApi_network_port: 9925` +``` + +_Note: Component configuration cannot be added or updated via CLI or ENV variables._ + +## Importing installation configuration + +To use a custom configuration file to set values on install, use the CLI/ENV variable `HDB_CONFIG` and set it to the path of your custom configuration file. + +To install HarperDB overtop of an existing configuration file, set `HDB_CONFIG` to the root path of your install `/harperdb-config.yaml` + +*** + +## Configuration Options + +### `http` + +`sessionAffinity` - _Type_: string; _Default_: null + +HarperDB is a multi-threaded server designed to scale to utilize many CPU cores with high concurrency. Session affinity can help improve the efficiency and fairness of thread utilization by routing multiple requests from the same client to the same thread. This provides a fairer method of request handling by keeping a single user contained to a single thread, can improve caching locality (multiple requests from a single user are more likely to access the same data), and can provide the ability to share information in-memory in user sessions. Enabling session affinity will cause subsequent requests from the same client to be routed to the same thread. + +To enable `sessionAffinity`, you need to specify how clients will be identified from the incoming requests. If you are using HarperDB to directly serve HTTP requests from users from different remote addresses, you can use a setting of `ip`. However, if you are using HarperDB behind a proxy server or application server, all the remote ip addresses will be the same and HarperDB will effectively only run on a single thread. Alternately, you can specify a header to use for identification. If you are using basic authentication, you could use the "Authorization" header to route requests to threads by the user's credentials. If you have another header that uniquely identifies users/clients, you can use that as the value of sessionAffinity. But be careful to ensure that the value does provide sufficient uniqueness and that requests are effectively distributed to all the threads and fully utilizing all your CPU cores. + +```yaml +http: + sessionAffinity: ip +``` + +`compressionThreshold` - _Type_: number; _Default_: 1200 (bytes) + +For HTTP clients that support (Brotli) compression encoding, responses that are larger than than this threshold will be compressed (also note that for clients that accept compression, any streaming responses from queries are compressed as well, since the size is not known beforehand). + +```yaml +http: + compressionThreshold: 1200 +``` + +`cors` - _Type_: boolean; _Default_: true + +Enable Cross Origin Resource Sharing, which allows requests across a domain. + +`corsAccessList` - _Type_: array; _Default_: null + +An array of allowable domains with CORS + +`headersTimeout` - _Type_: integer; _Default_: 60,000 milliseconds (1 minute) + +Limit the amount of time the parser will wait to receive the complete HTTP headers with. + +`maxHeaderSize` - _Type_: integer; _Default_: 16394 + +The maximum allowed size of HTTP headers in bytes. + +`keepAliveTimeout` - _Type_: integer; _Default_: 30,000 milliseconds (30 seconds) + +Sets the number of milliseconds of inactivity the server needs to wait for additional incoming data after it has finished processing the last response. + +`port` - _Type_: integer; _Default_: 9926 + +The port used to access the component server. + +`securePort` - _Type_: integer; _Default_: null + +The port the HarperDB component server uses for HTTPS connections. This requires a valid certificate and key. + +`timeout` - _Type_: integer; _Default_: Defaults to 120,000 milliseconds (2 minutes) + +The length of time in milliseconds after which a request will timeout. + +```yaml +http: + cors: true + corsAccessList: + - null + headersTimeout: 60000 + maxHeaderSize: 8192 + https: false + keepAliveTimeout: 30000 + port: 9926 + securePort: null + timeout: 120000 +``` + +`mlts` - _Type_: boolean | object; _Default_: false + +This can be configured to enable mTLS based authentication for incoming connections. If enabled with default options (by setting to `true`), the client certificate will be checked against the certificate authority specified with `tls.certificateAuthority`. And if the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. + +You can also define specific mTLS options by specifying an object for mtls with the following (optional) properties which may be included: + +`user` - _Type_: string; _Default_: Common Name + +This configures a specific username to authenticate as for mTLS connections. If a `user` is defined, any authorized mTLS connection (that authorizes against the certificate authority) will be authenticated as this user. +This can also be set to `null`, which indicates that no authentication is performed based on the mTLS authorization. When combined with `required: true`, this can be used to enforce that users must have authorized mTLS _and_ provide credential-based authentication. + +`required` - _Type_: boolean; _Default_: false + +This can be enabled to require client certificates (mTLS) for all incoming MQTT connections. If enabled, any connection that doesn't provide an authorized certificate will be rejected/closed. By default, this is disabled, and authentication can take place with mTLS _or_ standard credential authentication. + +```yaml +http: + mtls: true +``` +or +```yaml +http: + mtls: + required: true + user: user-name +``` + + +*** + +### `threads` + +The `threads` provides control over how many threads, how much heap memory they may use, and debugging of the threads: + +`count` - _Type_: number; _Default_: One less than the number of logical cores/processors + +The `threads.count` option specifies the number of threads that will be used to service the HTTP requests for the operations API and custom functions. Generally, this should be close to the number of CPU logical cores/processors to ensure the CPU is fully utilized (a little less because HarperDB does have other threads at work), assuming HarperDB is the main service on a server. + +```yaml +threads: + count: 11 +``` + +`debug` - _Type_: boolean | object; _Default_: false + +This enables debugging. If simply set to true, this will enable debugging on the main thread on port 9229 with the 127.0.0.1 host interface. This can also be an object for more debugging control. + +`debug.port` - The port to use for debugging the main thread +`debug.startingPort` - This will set up a separate port for debugging each thread. This is necessary for debugging individual threads with devtools. +`debug.host` - Specify the host interface to listen on +`debug.waitForDebugger` - Wait for debugger before starting + +```yaml +threads: + debug: + port: 9249 +``` + +`maxHeapMemory` - _Type_: number; + +```yaml +threads: + maxHeapMemory: 300 +``` + +This specifies the heap memory limit for each thread, in megabytes. The default heap limit is a heuristic based on available memory and thread count. + + +*** + +### `clustering` + +The `clustering` section configures the clustering engine, this is used to replicate data between instances of HarperDB. + +Clustering offers a lot of different configurations, however in a majority of cases the only options you will need to pay attention to are: + +* `clustering.enabled` Enable the clustering processes. +* `clustering.hubServer.cluster.network.port` The port other nodes will connect to. This port must be accessible from other cluster nodes. +* `clustering.hubServer.cluster.network.routes`The connections to other instances. +* `clustering.nodeName` The name of your node, must be unique within the cluster. +* `clustering.user` The name of the user credentials used for Inter-node authentication. + +`enabled` - _Type_: boolean; _Default_: false + +Enable clustering. + +_Note: If you enabled clustering but do not create and add a cluster user you will get a validation error. See `user` description below on how to add a cluster user._ + +```yaml +clustering: + enabled: true +``` + +`clustering.hubServer.cluster` + +Clustering’s `hubServer` facilitates the HarperDB mesh network and discovery service. + +```yaml +clustering: + hubServer: + cluster: + name: harperdb + network: + port: 9932 + routes: + - host: 3.62.184.22 + port: 9932 + - host: 3.735.184.8 + port: 9932 +``` + +`name` - _Type_: string, _Default_: harperdb + +The name of your cluster. This name needs to be consistent for all other nodes intended to be meshed in the same network. + +`port` - _Type_: integer, _Default_: 9932 + +The port the hub server uses to accept cluster connections + +`routes` - _Type_: array, _Default_: null + +An object array that represent the host and port this server will cluster to. Each object must have two properties `port` and `host`. Multiple entries can be added to create network resiliency in the event one server is unavailable. Routes can be added, updated and removed either by directly editing the `harperdb-config.yaml` file or by using the `cluster_set_routes` or `cluster_delete_routes` API endpoints. + +`host` - _Type_: string + +The host of the remote instance you are creating the connection with. + +`port` - _Type_: integer + +The port of the remote instance you are creating the connection with. This is likely going to be the `clustering.hubServer.cluster.network.port` on the remote instance. + +`clustering.hubServer.leafNodes` + +```yaml +clustering: + hubServer: + leafNodes: + network: + port: 9931 +``` + +`port` - _Type_: integer; _Default_: 9931 + +The port the hub server uses to accept leaf server connections. + +`clustering.hubServer.network` + +```yaml +clustering: + hubServer: + network: + port: 9930 +``` + +`port` - _Type_: integer; _Default_: 9930 + +Use this port to connect a client to the hub server, for example using the NATs SDK to interact with the server. + +`clustering.leafServer` + +Manages streams, streams are ‘message stores’ that store table transactions. + +```yaml +clustering: + leafServer: + network: + port: 9940 + routes: + - host: 3.62.184.22 + port: 9931 + - host: node3.example.com + port: 9931 + streams: + maxAge: 3600 + maxBytes: 10000000 + maxMsgs: 500 + path: /user/hdb/clustering/leaf +``` + +`port` - _Type_: integer; _Default_: 9940 + +Use this port to connect a client to the leaf server, for example using the NATs SDK to interact with the server. + +`routes` - _Type_: array; _Default_: null + +An object array that represent the host and port the leaf node will directly connect with. Each object must have two properties `port` and `host`. Unlike the hub server, the leaf server will establish connections to all listed hosts. Routes can be added, updated and removed either by directly editing the `harperdb-config.yaml` file or by using the `cluster_set_routes` or `cluster_delete_routes` API endpoints. + +`host` - _Type_: string + +The host of the remote instance you are creating the connection with. + +`port` - _Type_: integer + +The port of the remote instance you are creating the connection with. This is likely going to be the `clustering.hubServer.cluster.network.port` on the remote instance. + +`clustering.leafServer.streams` + +`maxAge` - _Type_: integer; _Default_: null + +The maximum age of any messages in the stream, expressed in seconds. + +`maxBytes` - _Type_: integer; _Default_: null + +The maximum size of the stream in bytes. Oldest messages are removed if the stream exceeds this size. + +`maxMsgs` - _Type_: integer; _Default_: null + +How many messages may be in a stream. Oldest messages are removed if the stream exceeds this number. + +`path` - _Type_: string; _Default_: \/clustering/leaf + +The directory where all the streams are kept. + +```yaml +clustering: + leafServer: + streams: + maxConsumeMsgs: 100 + maxIngestThreads: 2 +``` +`maxConsumeMsgs` - _Type_: integer; _Default_: 100 + +The maximum number of messages a consumer can process in one go. + +`maxIngestThreads` - _Type_: integer; _Default_: 2 + +The number of HarperDB threads that are delegated to ingesting messages. + +*** + +`logLevel` - _Type_: string; _Default_: error + +Control the verbosity of clustering logs. + +```yaml +clustering: + logLevel: error +``` + +There exists a log level hierarchy in order as `trace`, `debug`, `info`, `warn`, and `error`. When the level is set to `trace` logs will be created for all possible levels. Whereas if the level is set to `warn`, the only entries logged will be `warn` and `error`. The default value is `error`. + +`nodeName` - _Type_: string; _Default_: null + +The name of this node in your HarperDB cluster topology. This must be a value unique from the rest of the cluster node names. + +_Note: If you want to change the node name make sure there are no subscriptions in place before doing so. After the name has been changed a full restart is required._ + +```yaml +clustering: + nodeName: great_node +``` + +`tls` + +Transport Layer Security default values are automatically generated on install. + +```yaml +clustering: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem + insecure: true + verify: true +``` + +`certificate` - _Type_: string; _Default_: \/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: \/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: \/keys/privateKey.pem + +Path to the private key file. + +`insecure` - _Type_: boolean; _Default_: true + +When true, will skip certificate verification. For use only with self-signed certs. + +`republishMessages` - _Type_: boolean; _Default_: false + +When true, all transactions that are received from other nodes are republished to this node's stream. When subscriptions are not fully connected between all nodes, this ensures that messages are routed to all nodes through intermediate nodes. This also ensures that all writes, whether local or remote, are written to the NATS transaction log. However, there is additional overhead with republishing, and setting this is to false can provide better data replication performance. When false, you need to ensure all subscriptions are fully connected between every node to every other node, and be aware that the NATS transaction log will only consist of local writes. + +`verify` - _Type_: boolean; _Default_: true + +When true, hub server will verify client certificate using the CA certificate. + +*** + +`user` - _Type_: string; _Default_: null + +The username given to the `cluster_user`. All instances in a cluster must use the same clustering user credentials (matching username and password). + +Inter-node authentication takes place via a special HarperDB user role type called `cluster_user`. + +The user can be created either through the API using an `add_user` request with the role set to `cluster_user`, or on install using environment variables `CLUSTERING_USER=cluster_person` `CLUSTERING_PASSWORD=pass123!` or CLI variables `harperdb --CLUSTERING_USER cluster_person` `--CLUSTERING_PASSWORD` `pass123!` + +```yaml +clustering: + user: cluster_person +``` + +*** + +### `localStudio` + +The `localStudio` section configures the local HarperDB Studio, a GUI for HarperDB hosted on the server. A hosted version of the HarperDB Studio with licensing and provisioning options is available at https:/studio.harperdb.io. Note, all database traffic from either `localStudio` or HarperDB Studio is made directly from your browser to the instance. + +`enabled` - _Type_: boolean; _Default_: false + +Enabled the local studio or not. + +```yaml +localStudio: + enabled: false +``` + +*** + +### `logging` + +The `logging` section configures HarperDB logging across all HarperDB functionality. This includes standard text logging of application and database events as well as structured data logs of record changes. Logging of application/database events are logged in text format to the `~/hdb/log/hdb.log` file (or location specified by `logging.root`). + +In addition, structured logging of data changes are also available: + +`auditLog` - _Type_: boolean; _Default_: false + +Enabled table transaction logging. + +```yaml +logging: + auditLog: false +``` + +To access the audit logs, use the API operation `read_audit_log`. It will provide a history of the data, including original records and changes made, in a specified table. + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog" +} +``` + +`file` - _Type_: boolean; _Default_: true + +Defines whether to log to a file. + +```yaml +logging: + file: true +``` + +`auditRetention` - _Type_: string|number; _Default_: 3d + +This specifies how long audit logs should be retained. + +`level` - _Type_: string; _Default_: error + +Control the verbosity of text event logs. + +```yaml +logging: + level: error +``` + +There exists a log level hierarchy in order as `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. When the level is set to `trace` logs will be created for all possible levels. Whereas if the level is set to `fatal`, the only entries logged will be `fatal` and `notify`. The default value is `error`. + +`root` - _Type_: string; _Default_: \/log + +The path where the log files will be written. + +```yaml +logging: + root: ~/hdb/log +``` + +`rotation` + +Rotation provides the ability for a user to systematically rotate and archive the `hdb.log` file. To enable `interval` and/or `maxSize` must be set. + +_**Note:**_ `interval` and `maxSize` are approximates only. It is possible that the log file will exceed these values slightly before it is rotated. + +```yaml +logging: + rotation: + enabled: true + compress: false + interval: 1D + maxSize: 100K + path: /user/hdb/log +``` + +`enabled` - _Type_: boolean; _Default_: false + +Enables logging rotation. + +`compress` - _Type_: boolean; _Default_: false + +Enables compression via gzip when logs are rotated. + +`interval` - _Type_: string; _Default_: null + +The time that should elapse between rotations. Acceptable units are D(ays), H(ours) or M(inutes). + +`maxSize` - _Type_: string; _Default_: null + +The maximum size the log file can reach before it is rotated. Must use units M(egabyte), G(igabyte), or K(ilobyte). + +`path` - _Type_: string; _Default_: \/log + +Where to store the rotated log file. File naming convention is `HDB-YYYY-MM-DDT-HH-MM-SSSZ.log`. + +`stdStreams` - _Type_: boolean; _Default_: false + +Log HarperDB logs to the standard output and error streams. + +```yaml +logging: + stdStreams: false +``` + +*** + +### `authentication` + +The authentication section defines the configuration for the default authentication mechanism in HarperDB. + +```yaml +authentication: + authorizeLocal: true + cacheTTL: 30000 + enableSessions: true + operationTokenTimeout: 1d + refreshTokenTimeout: 30d +``` + +`authorizeLocal` - _Type_: boolean; _Default_: true + +This will automatically authorize any requests from the loopback IP address as the superuser. This should be disabled for any HarperDB servers that may be accessed by untrusted users from the same instance. For example, this should be disabled if you are using a local proxy, or for general server hardening. + +`cacheTTL` - _Type_: number; _Default_: 30000 + +This defines the length of time (in milliseconds) that an authentication (a particular Authorization header or token) can be cached. + +`enableSessions` - _Type_: boolean; _Default_: true + +This will enable cookie-based sessions to maintain an authenticated session. This is generally the preferred mechanism for maintaining authentication in web browsers as it allows cookies to hold an authentication token securely without giving JavaScript code access to token/credentials that may open up XSS vulnerabilities. + +`operationTokenTimeout` - _Type_: string; _Default_: 1d + +Defines the length of time an operation token will be valid until it expires. Example values: https:/github.com/vercel/ms. + +`refreshTokenTimeout` - _Type_: string; _Default_: 1d + +Defines the length of time a refresh token will be valid until it expires. Example values: https:/github.com/vercel/ms. + +### `operationsApi` + +The `operationsApi` section configures the HarperDB Operations API.\ +All the `operationsApi` configuration is optional. Any configuration that is not provided under this section will default to the `http` configuration section. + +`network` + +```yaml +operationsApi: + network: + cors: true + corsAccessList: + - null + domainSocket: /user/hdb/operations-server + headersTimeout: 60000 + keepAliveTimeout: 5000 + port: 9925 + securePort: null + timeout: 120000 +``` + +`cors` - _Type_: boolean; _Default_: true + +Enable Cross Origin Resource Sharing, which allows requests across a domain. + +`corsAccessList` - _Type_: array; _Default_: null + +An array of allowable domains with CORS + +`domainSocket` - _Type_: string; _Default_: \/hdb/operations-server + +The path to the Unix domain socket used to provide the Operations API through the CLI + +`headersTimeout` - _Type_: integer; _Default_: 60,000 milliseconds (1 minute) + +Limit the amount of time the parser will wait to receive the complete HTTP headers with. + +`keepAliveTimeout` - _Type_: integer; _Default_: 5,000 milliseconds (5 seconds) + +Sets the number of milliseconds of inactivity the server needs to wait for additional incoming data after it has finished processing the last response. + +`port` - _Type_: integer; _Default_: 9925 + +The port the HarperDB operations API interface will listen on. + +`securePort` - _Type_: integer; _Default_: null + +The port the HarperDB operations API uses for HTTPS connections. This requires a valid certificate and key. + +`timeout` - _Type_: integer; _Default_: Defaults to 120,000 milliseconds (2 minutes) + +The length of time in milliseconds after which a request will timeout. + +`tls` + +This configures the Transport Layer Security for HTTPS support. + +```yaml +operationsApi: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`certificate` - _Type_: string; _Default_: \/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: \/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: \/keys/privateKey.pem + +Path to the private key file. + +*** + +### `componentsRoot` + +`componentsRoot` - _Type_: string; _Default_: \/components + +The path to the folder containing the local component files. + +```yaml +componentsRoot: ~/hdb/components +``` + +*** + +### `rootPath` + +`rootPath` - _Type_: string; _Default_: home directory of the current user + +The HarperDB database and applications/API/interface are decoupled from each other. The `rootPath` directory specifies where the HarperDB application persists data, config, logs, and Custom Functions. + +```yaml +rootPath: /Users/jonsnow/hdb +``` + +*** + +### `storage` + +`writeAsync` - _Type_: boolean; _Default_: false + +The `writeAsync` option turns off disk flushing/syncing, allowing for faster write operation throughput. However, this does not provide storage integrity guarantees, and if a server crashes, it is possible that there may be data loss requiring restore from another backup/another node. + +```yaml +storage: + writeAsync: false +``` + +`caching` - _Type_: boolean; _Default_: true + +The `caching` option enables in-memory caching of records, providing faster access to frequently accessed objects. This can incur some extra overhead for situations where reads are extremely random and don't benefit from caching. + +```yaml +storage: + caching: true +``` + +`compression` - _Type_: boolean; _Default_: true + +The `compression` option enables compression of records in the database. This can be helpful for very large records in reducing storage requirements and potentially allowing more data to be cached. This uses the very fast LZ4 compression algorithm, but this still incurs extra costs for compressing and decompressing. + +```yaml +storage: + compression: false +``` + +`compression.dictionary` _Type_: number; _Default_: null + +Path to a compression dictionary file + +`compression.threshold` _Type_: number; _Default_: Either `4036` or if `storage.pageSize` provided `storage.pageSize - 60` + +Only entries that are larger than this value (in bytes) will be compressed. + +```yaml +storage: + compression: + dictionary: /users/harperdb/dict.txt + threshold: 1000 +``` + +`compactOnStart` - _Type_: boolean; _Default_: false + +When `true` all non-system databases will be compacted when starting HarperDB, read more [here](../administration/compact). + +`compactOnStartKeepBackup` - _Type_: boolean; _Default_: false + +Keep the backups made by compactOnStart. + +```yaml +storage: + compactOnStart: true + compactOnStartKeepBackup: false +``` + +`maxTransactionQueueTime` - _Type_: time; _Default_: 45s + +The `maxTransactionQueueTime` specifies how long the write queue can get before write requests are rejected (with a 503). + +```yaml +storage: + maxTransactionQueueTime: 2m +``` + +`noReadAhead` - _Type_: boolean; _Default_: false + +The `noReadAhead` option advises the operating system to not read ahead when reading from the database. This provides better memory utilization for databases with small records (less than one page), but can degrade performance in situations where large records are used or frequent range queries are used. + +```yaml +storage: + noReadAhead: true +``` + +`prefetchWrites` - _Type_: boolean; _Default_: true + +The `prefetchWrites` option loads data prior to write transactions. This should be enabled for databases that are larger than memory (although it can be faster to disable this for smaller databases). + +```yaml +storage: + prefetchWrites: true +``` + +`path` - _Type_: string; _Default_: `/schema` + +The `path` configuration sets where all database files should reside. + +```yaml +storage: + path: /users/harperdb/storage +``` +_**Note:**_ This configuration applies to all database files, which includes system tables that are used internally by HarperDB. For this reason if you wish to use a non default `path` value you must move any existing schemas into your `path` location. Existing schemas is likely to include the system schema which can be found at `/schema/system`. + + +`pageSize` - _Type_: number; _Default_: Defaults to the default page size of the OS + +Defines the page size of the database. + +```yaml +storage: + pageSize: 4096 +``` + +*** + +### `tls` + +The section defines the certificates, keys, and settings for Transport Layer Security (TLS) for HTTPS and TLS socket support. This is used for both the HTTP and MQTT protocols. The `tls` section can be a single object with the settings below, or it can be an array of objects, where each object is a separate TLS configuration. By using an array, the TLS configuration can be used to define multiple certificates for different domains/hosts (negotiated through SNI). + +```yaml +tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`certificate` - _Type_: string; _Default_: \/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: \/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: \/keys/privateKey.pem + +Path to the private key file. + +`ciphers` - _Type_: string; + +Allows specific ciphers to be set. + +If you want to define multiple certificates that are applied based on the domain/host requested via SNI, you can define an array of TLS configurations. Each configuration can have the same properties as the root TLS configuration, but can (optionally) also have an additional `host` property to specify the domain/host that the certificate should be used for: + +```yaml +tls: + - certificate: ~/hdb/keys/certificate1.pem + certificateAuthority: ~/hdb/keys/ca1.pem + privateKey: ~/hdb/keys/privateKey1.pem + host: example.com # the host is optional, and if not provided, this certificate's common name will be used as the host name. + - certificate: ~/hdb/keys/certificate2.pem + certificateAuthority: ~/hdb/keys/ca2.pem + privateKey: ~/hdb/keys/privateKey2.pem + +``` + +Note that a `tls` section can also be defined in the `operationsApi` section, which will override the root `tls` section for the operations API. + +*** + +### `mqtt` + +The MQTT protocol can be configured in this section. + +```yaml +mqtt: + network: + port: 1883 + securePort: 8883 + mtls: false + webSocket: true + requireAuthentication: true +``` + +`port` - _Type_: number; _Default_: 1883 + +This is the port to use for listening for insecure MQTT connections. + +`securePort` - _Type_: number; _Default_: 8883 + +This is the port to use for listening for secure MQTT connections. This will use the `tls` configuration for certificates. + +`webSocket` - _Type_: boolean; _Default_: true + +This enables access to MQTT through WebSockets. This will handle WebSocket connections on the http port (defaults to 9926), that have specified a (sub) protocol of `mqtt`. + +`requireAuthentication` - _Type_: boolean; _Default_: true + +This indicates if authentication should be required for establishing an MQTT connection (whether through MQTT connection credentials or mTLS). Disabling this allows unauthenticated connections, which are then subject to authorization for publishing and subscribing (and by default tables/resources do not authorize such access, but that can be enabled at the resource level). + +`mlts` - _Type_: boolean | object; _Default_: false + +This can be configured to enable mTLS based authentication for incoming connections. If enabled with default options (by setting to `true`), the client certificate will be checked against the certificate authority specified in the `tls` section. And if the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. + +You can also define specific mTLS options by specifying an object for mtls with the following (optional) properties which may be included: + +`user` - _Type_: string; _Default_: Common Name + +This configures a specific username to authenticate as for mTLS connections. If a `user` is defined, any authorized mTLS connection (that authorizes against the certificate authority) will be authenticated as this user. +This can also be set to `null`, which indicates that no authentication is performed based on the mTLS authorization. When combined with `required: true`, this can be used to enforce that users must have authorized mTLS _and_ provide credential-based authentication. + +`required` - _Type_: boolean; _Default_: false + +This can be enabled to require client certificates (mTLS) for all incoming MQTT connections. If enabled, any connection that doesn't provide an authorized certificate will be rejected/closed. By default, this is disabled, and authentication can take place with mTLS _or_ standard credential authentication. + +`certificateAuthority` - _Type_: string; _Default_: Path from `tls.certificateAuthority` + +This can define a specific path to use for the certificate authority. By default, certificate authorization checks against the CA specified at `tls.certificateAuthority`, but if you need a specific/distinct CA for MQTT, you can set this. + +For example, you could specify that mTLS is required and will authenticate as "user-name": +```yaml +mqtt: + network: + mtls: + user: user-name + required: true +``` + +*** + +### `databases` + +The `databases` section is an optional configuration that can be used to define where database files should reside down to the table level. +This configuration should be set before the database and table have been created. +The configuration will not create the directories in the path, that must be done by the user. + +To define where a database and all its tables should reside use the name of your database and the `path` parameter. + +```yaml +databases: + nameOfDatabase: + path: /path/to/database +``` + +To define where specific tables within a database should reside use the name of your database, the `tables` parameter, the name of your table and the `path` parameter. + +```yaml +databases: + nameOfDatabase: + tables: + nameOfTable: + path: /path/to/table +``` + +This same pattern can be used to define where the audit log database files should reside. To do this use the `auditPath` parameter. + +```yaml +databases: + nameOfDatabase: + auditPath: /path/to/database +``` + +**Setting the database section through the command line, environment variables or API** + +When using command line variables,environment variables or the API to configure the databases section a slightly different convention from the regular one should be used. To add one or more configurations use a JSON object array. + +Using command line variables: + +```bash +--DATABASES [{\"nameOfSchema\":{\"tables\":{\"nameOfTable\":{\"path\":\"\/path\/to\/table\"}}}}] +``` + +Using environment variables: + +```bash +DATABASES=[{"nameOfSchema":{"tables":{"nameOfTable":{"path":"/path/to/table"}}}}] +``` + +Using the API: + +```json +{ + "operation": "set_configuration", + "databases": [{ + "nameOfDatabase": { + "tables": { + "nameOfTable": { + "path": "/path/to/table" + } + } + } + }] +} +``` + +*** + +### Components + +`` - _Type_: string + +The name of the component. This will be used to name the folder where the component is installed and must be unique. + +`package` - _Type_: string + +A reference to your [component](../developers/components/installing) package.This could be a remote git repo, a local folder/file or an NPM package. +HarperDB will add this package to a package.json file and call `npm install` on it, so any reference that works with that paradigm will work here. + +Read more about npm install [here](https:/docs.npmjs.com/cli/v8/commands/npm-install) + +`port` - _Type_: number _Default_: whatever is set in `http.port` + +The port that your component should listen on. If no port is provided it will default to `http.port` + +```yaml +: + package: 'HarperDB-Add-Ons/package-name' + port: 4321 +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/deployments/harperdb-cli.md b/site/versioned_docs/version-4.3/deployments/harperdb-cli.md new file mode 100644 index 00000000..804bc749 --- /dev/null +++ b/site/versioned_docs/version-4.3/deployments/harperdb-cli.md @@ -0,0 +1,164 @@ +--- +title: HarperDB CLI +--- + +# HarperDB CLI + +The HarperDB command line interface (CLI) is used to administer [self-installed HarperDB instances](./install-harperdb/). + +## Installing HarperDB + +To install HarperDB with CLI prompts, run the following command: + +```bash +harperdb install +``` + +Alternatively, HarperDB installations can be automated with environment variables or command line arguments; [see a full list of configuration parameters here](./configuration#using-the-configuration-file-and-naming-conventions). Note, when used in conjunction, command line arguments will override environment variables. + +#### Environment Variables + +```bash +#minimum required parameters for no additional CLI prompts +export TC_AGREEMENT=yes +export HDB_ADMIN_USERNAME=HDB_ADMIN +export HDB_ADMIN_PASSWORD=password +export ROOTPATH=/tmp/hdb/ +export OPERATIONSAPI_NETWORK_PORT=9925 +harperdb install +``` + +#### Command Line Arguments + +```bash +#minimum required parameters for no additional CLI prompts +harperdb install --TC_AGREEMENT yes --HDB_ADMIN_USERNAME HDB_ADMIN --HDB_ADMIN_PASSWORD password --ROOTPATH /tmp/hdb/ --OPERATIONSAPI_NETWORK_PORT 9925 +``` + +*** + +## Starting HarperDB + +To start HarperDB after it is installed, run the following command: + +```bash +harperdb start +``` + +*** + +## Stopping HarperDB + +To stop HarperDB once it is running, run the following command: + +```bash +harperdb stop +``` + +*** + +## Restarting HarperDB + +To restart HarperDB once it is running, run the following command: + +```bash +harperdb restart +``` +*** + +## Getting the HarperDB Version + +To check the version of HarperDB that is installed run the following command: + +```bash +harperdb version +``` +*** + +## Renew self-signed certificates + +To renew the HarperDB generated self-signed certificates, run: + +```bash +harperdb renew-certs +``` + +*** + +## Copy a database with compaction + +To copy a HarperDB database with compaction (to eliminate free-space and fragmentation), use + +```bash +harperdb copy-db +``` +For example, to copy the default database: +```bash +harperdb copy-db data /home/user/hdb/database/copy.mdb +``` + + +*** + +## Get all available CLI commands + +To display all available HarperDB CLI commands along with a brief description run: + +```bash +harperdb help +``` +*** + +## Get the status of HarperDB and clustering + +To display the status of the HarperDB process, the clustering hub and leaf processes, the clustering network and replication statuses, run: + +```bash +harperdb status +``` + +*** + +## Backups + +HarperDB uses a transactional commit process that ensures that data on disk is always transactionally consistent with storage. This means that HarperDB maintains database integrity in the event of a crash. It also means that you can use any standard volume snapshot tool to make a backup of a HarperDB database. Database files are stored in the hdb/database directory. As long as the snapshot is an atomic snapshot of these database files, the data can be copied/moved back into the database directory to restore a previous backup (with HarperDB shut down) , and database integrity will be preserved. Note that simply copying an in-use database file (using `cp`, for example) is _not_ a snapshot, and this would progressively read data from the database at different points in time, which yields unreliable copy that likely will not be usable. Standard copying is only reliable for a database file that is not in use. + +*** + +# Operations API through the CLI + +Some of the API operations are available through the CLI, this includes most operations that do not require nested parameters. +To call the operation use the following convention: ` =`. +By default, the result will be formatted as YAML, if you would like the result in JSON pass: `json=true`. + +Some examples are: + +```bash +$ harperdb describe_table database=dev table=dog + +schema: dev +name: dog +hash_attribute: id +audit: true +schema_defined: false +attributes: + - attribute: id + is_primary_key: true + - attribute: name + indexed: true +clustering_stream_name: 3307bb542e0081253klnfd3f1cf551b +record_count: 10 +last_updated_record: 1724483231970.9949 +``` + +`harperdb set_configuration logging_level=error` + +`harperdb deploy_component project=my-cool-app package=https:/github.com/HarperDB/application-template` + +`harperdb get_components` + +`harperdb search_by_id database=dev table=dog ids='["1"]' get_attributes='["*"]' json=true` + +`harperdb search_by_value table=dog search_attribute=name search_value=harper get_attributes='["id", "name"]'` + +`harperdb sql sql='select * from dev.dog where id="1"'` diff --git a/site/versioned_docs/version-4.3/deployments/harperdb-cloud/alarms.md b/site/versioned_docs/version-4.3/deployments/harperdb-cloud/alarms.md new file mode 100644 index 00000000..03526fa8 --- /dev/null +++ b/site/versioned_docs/version-4.3/deployments/harperdb-cloud/alarms.md @@ -0,0 +1,20 @@ +--- +title: Alarms +--- + +# Alarms + +HarperDB Cloud instance alarms are triggered when certain conditions are met. Once alarms are triggered organization owners will immediately receive an email alert and the alert will be available on the [Instance Configuration](../../administration/harperdb-studio/instance-configuration) page. The below table describes each alert and their evaluation metrics. + +### Heading Definitions + +* **Alarm**: Title of the alarm. +* **Threshold**: Definition of the alarm threshold. +* **Intervals**: The number of occurrences before an alarm is triggered and the period that the metric is evaluated over. +* **Proposed Remedy**: Recommended solution to avoid the alert in the future. + +| Alarm | Threshold | Intervals | Proposed Remedy | +| ------- | ---------- | --------- | -------------------------------------------------------------------------------------------------------------------------------- | +| Storage | > 90% Disk | 1 x 5min | [Increased storage volume](../../administration/harperdb-studio/instance-configuration#update-instance-storage) | +| CPU | > 90% Avg | 2 x 5min | [Increase instance size for additional CPUs](../../administration/harperdb-studio/instance-configuration#update-instance-ram) | +| Memory | > 90% RAM | 2 x 5min | [Increase instance size](../../administration/harperdb-studio/instance-configuration#update-instance-ram) | diff --git a/site/versioned_docs/version-4.3/deployments/harperdb-cloud/index.md b/site/versioned_docs/version-4.3/deployments/harperdb-cloud/index.md new file mode 100644 index 00000000..ae2ec1a7 --- /dev/null +++ b/site/versioned_docs/version-4.3/deployments/harperdb-cloud/index.md @@ -0,0 +1,9 @@ +--- +title: HarperDB Cloud +--- + +# HarperDB Cloud + +[HarperDB Cloud](https:/studio.harperdb.io/) is the easiest way to test drive HarperDB, it’s HarperDB-as-a-Service. Cloud handles deployment and management of your instances in just a few clicks. HarperDB Cloud is currently powered by AWS with additional cloud providers on our roadmap for the future. + +You can create a new [HarperDB Cloud instance in the HarperDB Studio](../../administration/harperdb-studio/instances#create-a-new-instance). diff --git a/site/versioned_docs/version-4.3/deployments/harperdb-cloud/instance-size-hardware-specs.md b/site/versioned_docs/version-4.3/deployments/harperdb-cloud/instance-size-hardware-specs.md new file mode 100644 index 00000000..0e970b13 --- /dev/null +++ b/site/versioned_docs/version-4.3/deployments/harperdb-cloud/instance-size-hardware-specs.md @@ -0,0 +1,23 @@ +--- +title: Instance Size Hardware Specs +--- + +# Instance Size Hardware Specs + +While HarperDB Cloud bills by RAM, each instance has other specifications associated with the RAM selection. The following table describes each instance size in detail\*. + +| AWS EC2 Instance Size | RAM (GiB) | # vCPUs | Network (Gbps) | Processor | +| --------------------- | --------- | ------- | -------------- | -------------------------------------- | +| t3.micro | 1 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| t3.small | 2 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| t3.medium | 4 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| m5.large | 8 | 2 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.xlarge | 16 | 4 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.2xlarge | 32 | 8 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.4xlarge | 64 | 16 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.8xlarge | 128 | 32 | 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.12xlarge | 192 | 48 | 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.16xlarge | 256 | 64 | 20 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.24xlarge | 384 | 96 | 25 | Up to 3.1 GHz Intel Xeon Platinum 8000 | + +\*Specifications are subject to change. For the most up to date information, please refer to AWS documentation: [https:/aws.amazon.com/ec2/instance-types/](https:/aws.amazon.com/ec2/instance-types/). diff --git a/site/versioned_docs/version-4.3/deployments/harperdb-cloud/iops-impact.md b/site/versioned_docs/version-4.3/deployments/harperdb-cloud/iops-impact.md new file mode 100644 index 00000000..1c8496d5 --- /dev/null +++ b/site/versioned_docs/version-4.3/deployments/harperdb-cloud/iops-impact.md @@ -0,0 +1,42 @@ +--- +title: IOPS Impact on Performance +--- + +# IOPS Impact on Performance + +HarperDB, like any database, can place a tremendous load on its storage resources. Storage, not CPU or memory, will more often be the bottleneck of server, virtual machine, or a container running HarperDB. Understanding how storage works, and how much storage performance your workload requires, is key to ensuring that HarperDB performs as expected. + +## IOPS Overview + +The primary measure of storage performance is the number of input/output operations per second (IOPS) that a storage device can perform. Different storage devices can have dramatically different performance profiles. A hard drive (HDD) might only perform a hundred or so IOPS, while a solid state drive (SSD) might be able to perform tens or hundreds of thousands of IOPS. + +Cloud providers like AWS, which powers HarperDB Cloud, don’t typically attach individual disks to a virtual machine or container. Instead, they combine large numbers of storage drives to create very high performance storage servers. Chunks (volumes) of that storage are then carved out and presented to many different virtual machines and containers. Due to the shared nature of this type of storage, the cloud provider places configurable limits on the number of IOPS that a volume can perform. The same way that cloud providers charge more for larger capacity volumes, they also charge more for volumes with more IOPS. + +## HarperDB Cloud Storage + +HarperDB Cloud utilizes AWS Elastic Block Storage (EBS) General Purpose SSD (gp3) volumes. This is the most common storage type used in AWS, as it provides reasonable performance for most workloads, at a reasonable price. + +AWS EBS gp3 volumes have a baseline performance level of 3,000 IOPS, as a result, all HarperDB Cloud storage options will offer 3,000 IOPS. We plan to offer scalable IOPS as an option in the future. + +You can read more about AWS EBS volume IOPS here: https:/docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html. + +## Estimating IOPS for HarperDB Instance + +The number of IOPS required for a particular workload is influenced by many factors. Testing your particular application is the best way to determine the number of IOPS required. A reliable method is to estimate about two IOPS for every index, including the primary key itself. So if a table has two indices besides primary key, estimate that an insert or update will require about six IOPS. Note that that can often be closer to one IOPS per index under load due to internal batching of writes, and sometimes even better when doing sequential inserts. Again it is best to test to verify this with application specific data and write patterns. + +For assistance in estimating IOPS requirements feel free to contact HarperDB Support or join our Community Slack Channel. + +## Example Use Case IOPS Requirements + +* **Sensor Data Collection** + + In the case of IoT sensors where data collection will be sustained, high IOPS are required. While there are not typically large queries going on in this case, there is a high volume of data being ingested. This implies that IOPS will be sustained at a high level. For example, if you are collecting 100 records per second you would expect to need roughly 3,000 IOPS just to handle the data inserts. +* **Data Analytics/BI Server** + + Providing a server for analytics purposes typically requires a larger machine. Typically these cases involve large scale SQL joins and aggregations, which puts a large strain on reads. HarperDB utilizes an in-memory cache, which provides a significant performance boost on machines with large amounts of memory. However, if disparate datasets are constantly being queried and/or new data is frequently being loaded, you will find that the system still needs to have high IOPS to meet performance demand. +* **Web Services** + + Typical web service implementations with discrete reads and writes often do not need high IOPS to perform as expected. This is often the case in more transactional systems without the requirement for high performance load. A good rule to follow is that any HarperDB operation that requires a data scan will be IOPS intensive, but if these are not frequent then the EBS boost will suffice. Queries utilizing equals operations in either SQL or NoSQL do not require a scan due to HarperDB’s native indexing. +* **High Performance Database** + + Ultimately, if performance is your top priority, HarperDB should be run on bare metal hardware. Cloud providers offer these options at a higher cost, but they come with obvious performance improvements. diff --git a/site/versioned_docs/version-4.3/deployments/harperdb-cloud/verizon-5g-wavelength-instances.md b/site/versioned_docs/version-4.3/deployments/harperdb-cloud/verizon-5g-wavelength-instances.md new file mode 100644 index 00000000..c5a565e9 --- /dev/null +++ b/site/versioned_docs/version-4.3/deployments/harperdb-cloud/verizon-5g-wavelength-instances.md @@ -0,0 +1,31 @@ +--- +title: Verizon 5G Wavelength +--- + +# Verizon 5G Wavelength + +These instances are only accessible from the Verizon network. When accessing your HarperDB instance please ensure you are connected to the Verizon network, examples include Verizon 5G Internet, Verizon Hotspots, or Verizon mobile devices. + +HarperDB on Verizon 5G Wavelength brings HarperDB closer to the end user exclusively on the Verizon network resulting in as little as single-digit millisecond response time from HarperDB to the client. + +Instances are built via AWS Wavelength. You can read more about [AWS Wavelength here](https:/aws.amazon.com/wavelength/). + +HarperDB 5G Wavelength Instance Specs While HarperDB 5G Wavelength bills by RAM, each instance has other specifications associated with the RAM selection. The following table describes each instance size in detail\*. + +| AWS EC2 Instance Size | RAM (GiB) | # vCPUs | Network (Gbps) | Processor | +| --------------------- | --------- | ------- | -------------- | ------------------------------------------- | +| t3.medium | 4 | 2 | Up to 5 | Up to 3.1 GHz Intel Xeon Platinum Processor | +| t3.xlarge | 16 | 4 | Up to 5 | Up to 3.1 GHz Intel Xeon Platinum Processor | +| r5.2xlarge | 64 | 8 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum Processor | + +\*Specifications are subject to change. For the most up to date information, please refer to [AWS documentation](https:/aws.amazon.com/ec2/instance-types/). + +## HarperDB 5G Wavelength Storage + +HarperDB 5G Wavelength utilizes AWS Elastic Block Storage (EBS) General Purpose SSD (gp2) volumes. This is the most common storage type used in AWS, as it provides reasonable performance for most workloads, at a reasonable price. + +AWS EBS gp2 volumes have a baseline performance level, which determines the number of IOPS it can perform indefinitely. The larger the volume, the higher its baseline performance. Additionally, smaller gp2 volumes are able to burst to a higher number of IOPS for periods of time. + +Smaller gp2 volumes are perfect for trying out the functionality of HarperDB, and might also work well for applications that don’t perform many database transactions. For applications that perform a moderate or high number of transactions, we recommend that you use a larger HarperDB volume. Learn more about the [impact of IOPS on performance here](./iops-impact). + +You can read more about [AWS EBS gp2 volume IOPS here](https:/docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html#ebsvolumetypes\_gp2). diff --git a/site/versioned_docs/version-4.3/deployments/install-harperdb/index.md b/site/versioned_docs/version-4.3/deployments/install-harperdb/index.md new file mode 100644 index 00000000..8e105aac --- /dev/null +++ b/site/versioned_docs/version-4.3/deployments/install-harperdb/index.md @@ -0,0 +1,61 @@ +--- +title: Install HarperDB +--- + +# Install HarperDB + +## Install HarperDB + +This documentation contains information for installing HarperDB locally. Note that if you’d like to get up and running quickly, you can try a [managed instance with HarperDB Cloud](https:/studio.harperdb.io/sign-up). HarperDB is a cross-platform database; we recommend Linux for production use, but HarperDB can run on Windows and Mac as well, for development purposes. Installation is usually very simple and just takes a few steps, but there are a few different options documented here. + +HarperDB runs on Node.js, so if you do not have it installed, you need to do that first (if you have installed, you can skip to installing HarperDB, itself). Node.js can be downloaded and installed from [their site](https:/nodejs.org/). For Linux and Mac, we recommend installing and managing Node versions with [NVM, which has instructions for installation](https:/github.com/nvm-sh/nvm). Generally NVM can be installed with the following command: + +```bash +curl -o- https:/raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash +``` + +And then logout and login, and then install Node.js using nvm. We recommend using LTS, but support all currently maintained Node versions (which is currently version 14 and newer, and make sure to always uses latest minor/patch for the major version): + +```bash +nvm install --lts +``` + +#### `Install and Start HarperDB ` + +Then you can install HarperDB with NPM and start it: + +```bash +npm install -g harperdb +harperdb +``` + +HarperDB will automatically start after installation. HarperDB's installation can be configured with numerous options via CLI arguments, for more information visit the [HarperDB Command Line Interface](../harperdb-cli) guide. + +If you are setting up a production server on Linux, [we have much more extensive documentation on how to configure volumes for database storage, set up a systemd script, and configure your operating system to use as a database server in our linux installation guide](./linux). + +## With Docker + +If you would like to run HarperDB in Docker, install [Docker Desktop](https:/docs.docker.com/desktop/) on your Mac or Windows computer. Otherwise, install the [Docker Engine](https:/docs.docker.com/engine/install/) on your Linux server. + +Once Docker Desktop or Docker Engine is installed, visit our [Docker Hub page](https:/hub.docker.com/r/harperdb/harperdb) for information and examples on how to run a HarperDB container. + +## Offline Install + +If you need to install HarperDB on a device that doesn't have an Internet connection, you can choose your version and download the npm package and install it directly (you’ll still need Node.js and NPM): + +[Download Install Package](https:/products-harperdb-io.s3.us-east-2.amazonaws.com/index.html) + +Once you’ve downloaded the .tgz file, run the following command from the directory where you’ve placed it: + +```bash +npm install -g harperdb-X.X.X.tgz harperdb install +``` + +## Installation on Less Common Platforms + +HarperDB comes with binaries for standard AMD64/x64 or ARM64 CPU architectures on Linux, Windows (x64 only), and Mac (including Apple Silicon). However, if you are installing on a less common platform (Alpine, for example), you will need to ensure that you have build tools installed for the installation process to compile the binaries (this is handled automatically), including: + +* [Go](https:/go.dev/dl/): version 1.19.1 +* GCC +* Make +* Python v3.7, v3.8, v3.9, or v3.10 diff --git a/site/versioned_docs/version-4.3/deployments/install-harperdb/linux.md b/site/versioned_docs/version-4.3/deployments/install-harperdb/linux.md new file mode 100644 index 00000000..6cea34ad --- /dev/null +++ b/site/versioned_docs/version-4.3/deployments/install-harperdb/linux.md @@ -0,0 +1,223 @@ +--- +title: On Linux +--- + +# On Linux + +If you wish to install locally or already have a configured server, see the basic [Installation Guide](./) + +The following is a recommended way to configure Linux and install HarperDB. These instructions should work reasonably well for any public cloud or on-premises Linux instance. + +*** + +These instructions assume that the following has already been completed: + +1. Linux is installed +1. Basic networking is configured +1. A non-root user account dedicated to HarperDB with sudo privileges exists +1. An additional volume for storing HarperDB files is attached to the Linux instance +1. Traffic to ports 9925 (HarperDB Operations API) 9926 (HarperDB Application Interface) and 9932 (HarperDB Clustering) is permitted + +While you will need to access HarperDB through port 9925 for the administration through the operations API, and port 9932 for clustering, for higher level of security, you may want to consider keeping both of these ports restricted to a VPN or VPC, and only have the application interface (9926 by default) exposed to the public Internet. + +For this example, we will use an AWS Ubuntu Server 22.04 LTS m5.large EC2 Instance with an additional General Purpose SSD EBS volume and the default “ubuntu” user account. + +*** + +### (Optional) LVM Configuration + +Logical Volume Manager (LVM) can be used to stripe multiple disks together to form a single logical volume. If striping disks together is not a requirement, skip these steps. + +Find disk that already has a partition + +```bash +used_disk=$(lsblk -P -I 259 | grep "nvme.n1.*part" | grep -o "nvme.n1") +``` + +Create array of free disks + +```bash +declare -a free_disks +mapfile -t free_disks < <(lsblk -P -I 259 | grep "nvme.n1.*disk" | grep -o "nvme.n1" | grep -v "$used_disk") +``` + +Get quantity of free disks + +```bash +free_disks_qty=${#free_disks[@]} +``` + +Construct pvcreate command + +```bash +cmd_string="" +for i in "${free_disks[@]}" +do +cmd_string="$cmd_string /dev/$i" +done +``` + +Initialize disks for use by LVM + +```bash +pvcreate_cmd="pvcreate $cmd_string" +sudo $pvcreate_cmd +``` + +Create volume group + +```bash +vgcreate_cmd="vgcreate hdb_vg $cmd_string" +sudo $vgcreate_cmd +``` + +Create logical volume + +```bash +sudo lvcreate -n hdb_lv -i $free_disks_qty -l 100%FREE hdb_vg +``` + +### Configure Data Volume + +Run `lsblk` and note the device name of the additional volume + +```bash +lsblk +``` + +Create an ext4 filesystem on the volume (The below commands assume the device name is nvme1n1. If you used LVM to create logical volume, replace /dev/nvme1n1 with /dev/hdb\_vg/hdb\_lv) + +```bash +sudo mkfs.ext4 -L hdb_data /dev/nvme1n1 +``` + +Mount the file system and set the correct permissions for the directory + +```bash +mkdir /home/ubuntu/hdb +sudo mount -t ext4 /dev/nvme1n1 /home/ubuntu/hdb +sudo chown -R ubuntu:ubuntu /home/ubuntu/hdb +sudo chmod 775 /home/ubuntu/hdb +``` + +Create a fstab entry to mount the filesystem on boot + +```bash +echo "LABEL=hdb_data /home/ubuntu/hdb ext4 defaults,noatime 0 1" | sudo tee -a /etc/fstab +``` + +### Configure Linux and Install Prerequisites + +If a swap file or partition does not already exist, create and enable a 2GB swap file + +```bash +sudo dd if=/dev/zero of=/swapfile bs=128M count=16 +sudo chmod 600 /swapfile +sudo mkswap /swapfile +sudo swapon /swapfile +echo "/swapfile swap swap defaults 0 0" | sudo tee -a /etc/fstab +``` + +Increase the open file limits for the ubuntu user + +```bash +echo "ubuntu soft nofile 500000" | sudo tee -a /etc/security/limits.conf +echo "ubuntu hard nofile 1000000" | sudo tee -a /etc/security/limits.conf +``` + +Install Node Version Manager (nvm) + +```bash +curl -o- https:/raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh | bash +``` + +Load nvm (or logout and then login) + +```bash +. ~/.nvm/nvm.sh +``` + +Install Node.js using nvm ([read more about specific Node version requirements](https:/www.npmjs.com/package/harperdb#prerequisites)) + +```bash +nvm install +``` + +### `Install and Start HarperDB ` + +Here is an example of installing HarperDB with minimal configuration. + +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --ROOTPATH "/home/ubuntu/hdb" \ + --OPERATIONSAPI_NETWORK_PORT "9925" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" +``` + +Here is an example of installing HarperDB with commonly used additional configuration. + +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --ROOTPATH "/home/ubuntu/hdb" \ + --OPERATIONSAPI_NETWORK_PORT "9925" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" \ + --HTTP_SECUREPORT "9926" \ + --CLUSTERING_ENABLED "true" \ + --CLUSTERING_USER "cluster_user" \ + --CLUSTERING_PASSWORD "password" \ + --CLUSTERING_NODENAME "hdb1" +``` + +You can also use a custom configuration file to set values on install, use the CLI/ENV variable `HDB_CONFIG` and set it to the path of your [custom configuration file](../../deployments/configuration): +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" \ + --HDB_CONFIG "/path/to/your/custom/harperdb-config.yaml" +``` + +#### Start HarperDB on Boot +HarperDB will automatically start after installation. If you wish HarperDB to start when the OS boots, you have two options: + +You can set up a crontab: + +```bash +(crontab -l 2>/dev/null; echo "@reboot PATH=\"/home/ubuntu/.nvm/versions/node/v18.15.0/bin:$PATH\" && harperdb start") | crontab - +``` + +Or you can create a systemd script at `/etc/systemd/system/harperdb.service` + +Pasting the following contents into the file: + +``` +[Unit] +Description=HarperDB + +[Service] +Type=simple +Restart=always +User=ubuntu +Group=ubuntu +WorkingDirectory=/home/ubuntu +ExecStart=/bin/bash -c 'PATH="/home/ubuntu/.nvm/versions/node/v18.15.0/bin:$PATH"; harperdb' + +[Install] +WantedBy=multi-user.target +``` + +And then running the following: + +``` +systemctl daemon-reload +systemctl enable harperdb +``` + +For more information visit the [HarperDB Command Line Interface guide](../../deployments/harperdb-cli) and the [HarperDB Configuration File guide](../../deployments/configuration). diff --git a/site/versioned_docs/version-4.3/deployments/upgrade-hdb-instance.md b/site/versioned_docs/version-4.3/deployments/upgrade-hdb-instance.md new file mode 100644 index 00000000..0b7c6e3f --- /dev/null +++ b/site/versioned_docs/version-4.3/deployments/upgrade-hdb-instance.md @@ -0,0 +1,90 @@ +--- +title: Upgrade a HarperDB Instance +--- + +# Upgrade a HarperDB Instance + +This document describes best practices for upgrading self-hosted HarperDB instances. HarperDB can be upgraded using a combination of npm and built-in HarperDB upgrade scripts. Whenever upgrading your HarperDB installation it is recommended you make a backup of your data first. Note: This document applies to self-hosted HarperDB instances only. All [HarperDB Cloud instances](./harperdb-cloud/) will be upgraded by the HarperDB Cloud team. + +## Upgrading + +Upgrading HarperDB is a two-step process. First the latest version of HarperDB must be downloaded from npm, then the HarperDB upgrade scripts will be utilized to ensure the newest features are available on the system. + +1. Install the latest version of HarperDB using `npm install -g harperdb`. + + Note `-g` should only be used if you installed HarperDB globally (which is recommended). +1. Run `harperdb` to initiate the upgrade process. + + HarperDB will then prompt you for all appropriate inputs and then run the upgrade directives. + +## Node Version Manager (nvm) + +[Node Version Manager (nvm)](http:/nvm.sh/) is an easy way to install, remove, and switch between different versions of Node.js as required by various applications. More information, including directions on installing nvm can be found here: https:/nvm.sh/. + +HarperDB supports Node.js versions 14.0.0 and higher, however, **please check our** [**NPM page**](https:/www.npmjs.com/package/harperdb) **for our recommended Node.js version.** To install a different version of Node.js with nvm, run the command: + +```bash +nvm install +``` + +To switch to a version of Node run: + +```bash +nvm use +``` + +To see the current running version of Node run: + +```bash +node --version +``` + +With a handful of different versions of Node.js installed, run nvm with the `ls` argument to list out all installed versions: + +```bash +nvm ls +``` + +When upgrading HarperDB, we recommend also upgrading your Node version. Here we assume you're running on an older version of Node; the execution may look like this: + +Switch to the older version of Node that HarperDB is running on (if it is not the current version): + +```bash +nvm use 14.19.0 +``` + +Make sure HarperDB is not running: + +```bash +harperdb stop +``` + +Uninstall HarperDB. Note, this step is not required, but will clean up old artifacts of HarperDB. We recommend removing all other HarperDB installations to ensure the most recent version is always running. + +```bash +npm uninstall -g harperdb +``` + +Switch to the newer version of Node: + +```bash +nvm use +``` + +Install HarperDB globally + +```bash +npm install -g harperdb +``` + +Run the upgrade script + +```bash +harperdb +``` + +Start HarperDB + +```bash +harperdb start +``` diff --git a/site/versioned_docs/version-4.3/developers/_category_.json b/site/versioned_docs/version-4.3/developers/_category_.json new file mode 100644 index 00000000..9fe399bf --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Developers", + "position": 1, + "link": { + "type": "generated-index", + "title": "Developers Documentation", + "description": "Comprehensive guides and references for building applications with HarperDB", + "keywords": [ + "developers" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/developers/applications/caching.md b/site/versioned_docs/version-4.3/developers/applications/caching.md new file mode 100644 index 00000000..6ebad89a --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/applications/caching.md @@ -0,0 +1,288 @@ +--- +title: Caching +--- + +# Caching + +HarperDB has integrated support for caching data from external sources. With built-in caching capabilities and distributed high-performance low-latency responsiveness, HarperDB makes an ideal data caching server. HarperDB can store cached data in standard tables, as queryable structured data, so data can easily be consumed in one format (for example JSON or CSV) and provided to end users in different formats with different selected properties (for example MessagePack, with a subset of selected properties), or even with customized querying capabilities. HarperDB also manages and provides timestamps/tags for proper caching control, facilitating further downstreaming caching. With these combined capabilities, HarperDB is an extremely fast, interoperable, flexible, and customizable caching server. + +## Configuring Caching + +To set up caching, first you will need to define a table that you will use as your cache (to store the cached data). You can review the [introduction to building applications](./) for more information on setting up the application (and the [defining schemas documentation](./defining-schemas)), but once you have defined an application folder with a schema, you can add a table for caching to your `schema.graphql`: + +```graphql +type MyCache @table(expiration: 3600) @export { + id: ID @primaryKey +} +``` + +You may also note that we can define a time-to-live (TTL) expiration on the table, indicating when table records/entries should expire and be evicted from this table. This is generally necessary for "passive" caches where there is no active notification of when entries expire. However, this is not needed if you provide a means of notifying when data is invalidated and changed. The units for expiration, and other duration-based properties, are in seconds. + +While you can provide a single expiration time, there are actually several expiration timings that are potentially relevant, and can be independently configured. These settings are available as directive properties on the table configuration (like `expiration` above): stale expiration: The point when a request for a record should trigger a request to origin (but might possibly return the current stale record depending on policy) must-revalidate expiration: The point when a request for a record must make a request to origin first and return the latest value from origin. eviction expiration: The point when a record is actually removed from the caching table. + +You can provide a single expiration and it defines the behavior for all three. You can also provide three settings for expiration, through table directives: +* expiration - The amount of time until a record goes stale. +* eviction - The amount of time after expiration before a record can be evicted (defaults to zero). +* scanInterval - The interval for scanning for expired records (defaults to one quarter of the total of expiration and eviction). + +## Define External Data Source + +Next, you need to define the source for your cache. External data sources could be HTTP APIs, other databases, microservices, or any other source of data. This can be defined as a resource class in your application's `resources.js` module. You can extend the `Resource` class (which is available as a global variable in the HarperDB environment) as your base class. The first method to implement is a `get()` method to define how to retrieve the source data. For example, if we were caching an external HTTP API, we might define it as such: + +```javascript +class ThirdPartyAPI extends Resource { + async get() { + return (await fetch(`http:/some-api.com/${this.getId()}`)).json(); + } +} +``` + +Next, we define this external data resource as the "source" for the caching table we defined above: + +```javascript +const { MyTable } = tables; +MyTable.sourcedFrom(ThirdPartyAPI); +``` + +Now we have a fully configured and connected caching table. If you access data from `MyCache` (for example, through the REST API, like `/MyCache/some-id`), HarperDB will check to see if the requested entry is in the table and return it if it is available (and hasn't expired). If there is no entry, or it has expired (it is older than one hour in this case), it will go to the source, calling the `get()` method, which will then retrieve the requested entry. Once the entry is retrieved, it will be saved/cached in the caching table (for one hour based on our expiration time). + +```mermaid +flowchart TD + Client1(Client 1)-->Cache(Caching Table) + Client2(Client 2)-->Cache + Cache-->Resource(Data Source Connector) + Resource-->API(Remote Data Source API) +``` + + +HarperDB handles waiting for an existing cache resolution to finish and uses its result. This prevents a "cache stampede" when entries expire, ensuring that multiple requests to a cache entry will all wait on a single request to the data source. + +Cache tables with an expiration are periodically pruned for expired entries. Because this is done periodically, there is usually some amount of time between when a record has expired and when the record is actually evicted (the cached data is removed). But when a record is checked for availability, the expiration time is used to determine if the record is fresh (and the cache entry can be used). + +### Eviction with Indexing + +Eviction is the removal of a locally cached copy of data, but it does not imply the deletion of the actual data from the canonical or origin data source. Because evicted records still exist (just not in the local cache), if a caching table uses expiration (and eviction), and has indexing on certain attributes, the data is not removed from the indexes. The indexes that reference the evicted record are preserved, along with the attribute data necessary to maintain these indexes. Therefore eviction means the removal of non-indexed data (in this case evictions are stored as "partial" records). Eviction only removes the data that can be safely removed from a cache without affecting the integrity or behavior of the indexes. If a search query is performed that matches this evicted record, the record will be requested on-demand to fulfill the search query. + +### Specifying a Timestamp + +In the example above, we simply retrieved data to fulfill a cache request. We may want to supply the timestamp of the record we are fulfilling as well. This can be set on the context for the request: + +```javascript +class ThirdPartyAPI extends Resource { + async get() { + let response = await fetch(`http:/some-api.com/${this.getId()}`); + this.getContext().lastModified = response.headers.get('Last-Modified'); + return response.json(); + } +} +``` + +#### Specifying an Expiration + +In addition, we can also specify when a cached record "expires". When a cached record expires, this means that a request for that record will trigger a request to the data source again. This does not necessarily mean that the cached record has been evicted (removed), although expired records will be periodically evicted. If the cached record still exists, the data source can revalidate it and return it. For example: + +```javascript +class ThirdPartyAPI extends Resource { + async get() { + const context = this.getContext(); + let headers = new Headers(); + if (context.replacingVersion) / this is the existing cached record + headers.set('If-Modified-Since', new Date(context.replacingVersion).toUTCString()); + let response = await fetch(`http:/some-api.com/${this.getId()}`, { headers }); + let cacheInfo = response.headers.get('Cache-Control'); + let maxAge = cacheInfo?.match(/max-age=(\d)/)?.[1]; + if (maxAge) / we can set a specific expiration time by setting context.expiresAt + context.expiresAt = Date.now() + maxAge * 1000; / convert from seconds to milliseconds and add to current time + / we can just revalidate and return the record if the origin has confirmed that it has the same version: + if (response.status === 304) return context.replacingRecord; + ... +``` + +## Active Caching and Invalidation + +The cache we have created above is a "passive" cache; it only pulls data from the data source as needed, and has no knowledge of if and when data from the data source has actually changed, so it must rely on timer-based expiration to periodically retrieve possibly updated data. This means that it is possible that the cache may have stale data for a while (if the underlying data has changed, but the cached data hasn't expired), and the cache may have to refresh more than necessary if the data source data hasn't changed. Consequently it can be significantly more effective to implement an "active" cache, in which the data source is monitored and notifies the cache when any data changes. This ensures that when data changes, the cache can immediately load the updated data, and unchanged data can remain cached much longer (or indefinitely). + +### Invalidate + +One way to provide more active caching is to specifically invalidate individual records. Invalidation is useful when you know the source data has changed, and the cache needs to re-retrieve data from the source the next time that record is accessed. This can be done by executing the `invalidate()` method on a resource. For example, you could extend a table (in your resources.js) and provide a custom POST handler that does invalidation: + +```javascript +const { MyTable } = tables; +export class MyTableEndpoint extends MyTable { + async post(data) { + if (data.invalidate) / use this flag as a marker + this.invalidate(); + } +} +``` + +(Note that if you are now exporting this endpoint through resources.js, you don't necessarily need to directly export the table separately in your schema.graphql). + +### Subscriptions + +We can provide more control of an active cache with subscriptions. If there is a way to receive notifications from the external data source of data changes, we can implement this data source as an "active" data source for our cache by implementing a `subscribe` method. A `subscribe` method should return an asynchronous iterable that iterates and returns events indicating the updates. One straightforward way of creating an asynchronous iterable is by defining the `subscribe` method as an asynchronous generator. If we had an endpoint that we could poll for changes every second, we could implement this like: + +```javascript +class ThirdPartyAPI extends Resource { + async *subscribe() { + setInterval(() => { / every second retrieve more data + / get the next data change event from the source + let update = (await fetch(`http:/some-api.com/latest-update`)).json(); + const event = { / define the change event (which will update the cache) + type: 'put', / this would indicate that the event includes the new data value + id: / the primary key of the record that updated + value: / the new value of the record that updated + timestamp: / the timestamp of when the data change occurred + }; + yield event; / this returns this event, notifying the cache of the change + }, 1000); + } + async get() { +... +``` + +Notification events should always include an `id` property to indicate the primary key of the updated record. The event should have a `value` property for `put` and `message` event types. The `timestamp` is optional and can be used to indicate the exact timestamp of the change. The following event `type`s are supported: + +* `put` - This indicates that the record has been updated and provides the new value of the record. +* `invalidate` - Alternately, you can notify with an event type of `invalidate` to indicate that the data has changed, but without the overhead of actually sending the data (the `value` property is not needed), so the data only needs to be sent if and when the data is requested through the cache. An `invalidate` will evict the entry and update the timestamp to indicate that there is new data that should be requested (if needed). +* `delete` - This indicates that the record has been deleted. +* `message` - This indicates a message is being passed through the record. The record value has not changed, but this is used for [publish/subscribe messaging](../real-time). +* `transaction` - This indicates that there are multiple writes that should be treated as a single atomic transaction. These writes should be included as an array of data notification events in the `writes` property. + +And the following properties can be defined on event objects: + +* `type`: The event type as described above. +* `id`: The primary key of the record that updated +* `value`: The new value of the record that updated (for put and message) +* `writes`: An array of event properties that are part of a transaction (used in conjunction with the transaction event type). +* `table`: The name of the table with the record that was updated. This can be used with events within a transaction to specify events across multiple tables. +* `timestamp`: The timestamp of when the data change occurred + +With an active external data source with a `subscribe` method, the data source will proactively notify the cache, ensuring a fresh and efficient active cache. Note that with an active data source, we still use the `sourcedFrom` method to register the source for a caching table, and the table will automatically detect and call the subscribe method on the data source. + +By default, HarperDB will only run the subscribe method on one thread. HarperDB is multi-threaded and normally runs many concurrent worker threads, but typically running a subscription on multiple threads can introduce overlap in notifications and race conditions and running on a subscription on a single thread is preferable. However, if you want to enable subscribe on multiple threads, you can define a `static subscribeOnThisThread` method to specify if the subscription should run on the current thread: + +```javascript +class ThirdPartyAPI extends Resource { + static subscribeOnThisThread(threadIndex) { + return threadIndex < 2; / run on two threads (the first two threads) + } + async *subscribe() { + .... +``` + +An alternative to using asynchronous generators is to use a subscription stream and send events to it. A default subscription stream (that doesn't generate its own events) is available from the Resource's default subscribe method: + +```javascript +class ThirdPartyAPI extends Resource { + subscribe() { + const subscription = super.subscribe(); + setupListeningToRemoteService().on('update', (event) => { + subscription.send(event); + }); + return subscription; + } +} +``` + +## Downstream Caching + +It is highly recommended that you utilize the [REST interface](../rest) for accessing caching tables, as it facilitates downstreaming caching for clients. Timestamps are recorded with all cached entries. Timestamps are then used for incoming [REST requests to specify the `ETag` in the response](../rest#cachingconditional-requests). Clients can cache data themselves and send requests using the `If-None-Match` header to conditionally get a 304 and preserve their cached data based on the timestamp/`ETag` of the entries that are cached in HarperDB. Caching tables also have [subscription capabilities](./caching#subscribing-to-caching-tables), which means that downstream caches can be fully "layered" on top of HarperDB, both as passive or active caches. + +## Write-Through Caching + +The cache we have defined so far only has data flowing from the data source to the cache. However, you may wish to support write methods, so that writes to the cache table can flow through to underlying canonical data source, as well as populate the cache. This can be accomplished by implementing the standard write methods, like `put` and `delete`. If you were using an API with standard RESTful methods, you can pass writes through to the data source like this: + +```javascript +class ThirdPartyAPI extends Resource { + async put(data) { + await fetch(`http:/some-api.com/${this.getId()}`, { + method: 'PUT', + body: JSON.stringify(data) + }); + } + async delete() { + await fetch(`http:/some-api.com/${this.getId()}`, { + method: 'DELETE', + }); + } + ... +``` + +When doing an insert or update to the MyCache table, the data will be sent to the underlying data source through the `put` method and the new record value will be stored in the cache as well. + +### Loading from Source in Methods + +When you are using a caching table, it is important to remember that any resource methods besides `get()`, will not automatically load data from the source. If you have defined a `put()`, `post()`, or `delete()` method and you need the source data, you can ensure it is loaded by calling the `ensureLoaded()` method. For example, if you want to modify the existing record from the source, adding a property to it: + +```javascript +class MyCache extends tables.MyCache { + async post(data) { + / if the data is not cached locally, retrieves from source: + await this.ensuredLoaded(); + / now we can be sure that the data is loaded, and can access properties + this.quantity = this.quantity - data.purchases; + } +} +``` + +### Subscribing to Caching Tables + +You can subscribe to a caching table just like any other table. The one difference is that normal tables do not usually have `invalidate` events, but an active caching table may have `invalidate` events. Again, this event type gives listeners an opportunity to choose whether or not to actually retrieve the value that changed. + +### Passive-Active Updates + +With our passive update examples, we have provided a data source handler with a `get()` method that returns the specific requested record as the response. However, we can also actively update other records in our response handler (if our data source provides data that should be propagated to other related records). This can be done transactionally, to ensure that all updates occur atomically. The context that is provided to the data source holds the transaction information, so we can simply pass the context to any update/write methods that we call. For example, let's say we are loading a blog post, which should also includes comment records: + +```javascript +const { Post, Comment } = tables; +class BlogSource extends Resource { + get() { + let post = await (await fetch(`http:/my-blog-server/${this.getId()}`).json()); + for (let comment of comments) { + await Comment.put(comment, this); / save this comment as part of our current context and transaction + } + return post; + } +} +Post.sourcedFrom(BlogSource); +``` + +Here both the update to the post and the update to the comments will be atomically/transactionally committed together with the same timestamp. + +## Cache-Control header + +When interacting with cached data, you can also use the `Cache-Control` request header to specify certain caching behaviors. When performing a PUT (or POST) method, you can use the `max-age` directive to indicate how long the resource should be cached (until stale): + +```http +PUT /my-resource/id +Cache-Control: max-age=86400 +``` + +You can use the `only-if-cached` directive on GET requests to only return a resource if it is cached (otherwise will return 504). Note, that if the entry is not cached, this will still trigger a request for the source data from the data source. If you do not want source data retrieved, you can add the `no-store` directive. You can also use the `no-cache` directive if you do not want to use the cached resource. If you wanted to check if there is a cached resource without triggering a request to the data source: + +```http +GET /my-resource/id +Cache-Control: only-if-cached, no-store +``` + +You may also use the `stale-if-error` to indicate if it is acceptable to return a stale cached resource when the data source returns an error (network connection error, 500, 502, 503, or 504). The `must-revalidate` directive can indicate a stale cached resource can not be returned, even when the data source has an error (by default a stale cached resource is returned when there is a network connection error). + + +## Caching Flow +It may be helpful to understand the flow of a cache request. When a request is made to a caching table: +* HarperDB will first create a resource instance to handle the process, and ensure that the data is loaded for the resource instance. To do this, it will first check if the record is in the table/cache. + * If the record is not in the cache, HarperDB will first check if there is a current request to get the record from the source. If there is, HarperDB will wait for the request to complete and return the record from the cache. + * If not, HarperDB will call the `get()` method on the source to retrieve the record. The record will then be stored in the cache. + * If the record is in the cache, HarperDB will check if the record is stale. If the record is not stale, HarperDB will immediately return the record from the cache. If the record is stale, HarperDB will call the `get()` method on the source to retrieve the record. + * The record will then be stored in the cache. This will write of the record to the cache will be done in a separate asynchronous/background write-behind transaction, so it does not block the current request, which will return the data immediately once it has it. +* The `get()` method will be called on the resource instance to return the record to the client (or perform any querying on the record). If this is overriden, the method will be called at this time. + +### Caching Flow with Write-Through +When a writes are performed on a caching table (in `put()` or `post()` method, for example), the flow is slightly different: +* HarperDB will have first created a resource instance to handle the process, and this resource instance that will be the current `this` for a call to `put()` or `post()`. +* If a `put()` or `update()` is called, for example, this action will be record in the current transaction. +* Once the transaction is committed (which is done automatically as the request handler completes), the transaction write will be sent to the source to update the data. + * The local writes will wait for the source to confirm the writes have completed (note that this effectively allows you to perform a two-phase transactional write to the source, and the source can confirm the writes have completed before the transaction is committed locally). + * The transaction writes will then be written the local caching table. +* The transaction handler will wait for the local commit to be written, then the transaction will be resolved and a response will be sent to the client. diff --git a/site/versioned_docs/version-4.3/developers/applications/debugging.md b/site/versioned_docs/version-4.3/developers/applications/debugging.md new file mode 100644 index 00000000..ca03115f --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/applications/debugging.md @@ -0,0 +1,39 @@ +--- +title: Debugging Applications +--- + +# Debugging Applications + +HarperDB components and applications run inside the HarperDB process, which is a standard Node.js process that can be debugged with standard JavaScript development tools like Chrome's devtools, VSCode, and WebStorm. Debugging can be performed by launching the HarperDB entry script with your IDE, or you can start HarperDB in dev mode and connect your debugger to the running process (defaults to standard 9229 port): + +``` +harperdb dev +# or to run and debug a specific app +harperdb dev /path/to/app +``` + +Once you have connected a debugger, you may set breakpoints in your application and fully debug it. Note that when using the `dev` command from the CLI, this will run HarperDB in single-threaded mode. This would not be appropriate for production use, but makes it easier to debug applications. + +For local debugging and development, it is recommended that you use standard console log statements for logging. For production use, you may want to use HarperDB's logging facilities, so you aren't logging to the console. The logging functions are available on the global `logger` variable that is provided by HarperDB. This logger can be used to output messages directly to the HarperDB log using standardized logging level functions, described below. The log level can be set in the [HarperDB Configuration File](../../deployments/configuration). + +HarperDB Logger Functions + +* `trace(message)`: Write a 'trace' level log, if the configured level allows for it. +* `debug(message)`: Write a 'debug' level log, if the configured level allows for it. +* `info(message)`: Write a 'info' level log, if the configured level allows for it. +* `warn(message)`: Write a 'warn' level log, if the configured level allows for it. +* `error(message)`: Write a 'error' level log, if the configured level allows for it. +* `fatal(message)`: Write a 'fatal' level log, if the configured level allows for it. +* `notify(message)`: Write a 'notify' level log. + +For example, you can log a warning: + +```javascript +logger.warn('You have been warned'); +``` + +If you want to ensure a message is logged, you can use `notify` as these messages will appear in the log regardless of log level configured. + +## Viewing the Log + +The HarperDB Log can be found in your local `~/hdb/log/hdb.log` file (or in the log folder if you have specified an alternate hdb root), or in the [Studio Status page](../../administration/harperdb-studio/instance-metrics). Additionally, you can use the [`read_log` operation](../operations-api/logs) to query the HarperDB log. diff --git a/site/versioned_docs/version-4.3/developers/applications/define-routes.md b/site/versioned_docs/version-4.3/developers/applications/define-routes.md new file mode 100644 index 00000000..6a9ed34b --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/applications/define-routes.md @@ -0,0 +1,118 @@ +--- +title: Define Fastify Routes +--- + +# Define Fastify Routes + +HarperDB’s applications provide an extension for loading [Fastify](https:/www.fastify.io/) routes as a way to handle endpoints. While we generally recommend building your endpoints/APIs with HarperDB's [REST interface](../rest) for better performance and standards compliance, Fastify's route can provide an extensive API for highly customized path handling. Below is a very simple example of a route declaration. + +The fastify route handler can be configured in your application's config.yaml (this is the default config if you used the [application template](https:/github.com/HarperDB/application-template)): + +```yaml +fastifyRoutes: # This loads files that define fastify routes using fastify's auto-loader + files: routes/*.js # specify the location of route definition modules + path: . # relative to the app-name, like http:/server/app-name/route-name +``` + +By default, route URLs are configured to be: + +* \[**Instance URL**]:\[**HTTP Port**]/\[**Project Name**]/\[**Route URL**] + +However, you can specify the path to be `/` if you wish to have your routes handling the root path of incoming URLs. + +* The route below, using the default config, within the **dogs** project, with a route of **breeds** would be available at **http:/localhost:9926/dogs/breeds**. + +In effect, this route is just a pass-through to HarperDB. The same result could have been achieved by hitting the core HarperDB API, since it uses **hdbCore.preValidation** and **hdbCore.request**, which are defined in the “helper methods” section, below. + +```javascript +export default async (server, { hdbCore, logger }) => { + server.route({ + url: '/', + method: 'POST', + preValidation: hdbCore.preValidation, + handler: hdbCore.request, + }) +} +``` + +## Custom Handlers + +For endpoints where you want to execute multiple operations against HarperDB, or perform additional processing (like an ML classification, or an aggregation, or a call to a 3rd party API), you can define your own logic in the handler. The function below will execute a query against the dogs table, and filter the results to only return those dogs over 4 years in age. + +**IMPORTANT: This route has NO preValidation and uses hdbCore.requestWithoutAuthentication, which- as the name implies- bypasses all user authentication. See the security concerns and mitigations in the “helper methods” section, below.** + +```javascript +export default async (server, { hdbCore, logger }) => { + server.route({ + url: '/:id', + method: 'GET', + handler: (request) => { + request.body= { + operation: 'sql', + sql: `SELECT * FROM dev.dog WHERE id = ${request.params.id}` + }; + + const result = await hdbCore.requestWithoutAuthentication(request); + return result.filter((dog) => dog.age > 4); + } + }); +} +``` + +## Custom preValidation Hooks + +The simple example above was just a pass-through to HarperDB- the exact same result could have been achieved by hitting the core HarperDB API. But for many applications, you may want to authenticate the user using custom logic you write, or by conferring with a 3rd party service. Custom preValidation hooks let you do just that. + +Below is an example of a route that uses a custom validation hook: + +```javascript +import customValidation from '../helpers/customValidation'; + +export default async (server, { hdbCore, logger }) => { + server.route({ + url: '/:id', + method: 'GET', + preValidation: (request) => customValidation(request, logger), + handler: (request) => { + request.body= { + operation: 'sql', + sql: `SELECT * FROM dev.dog WHERE id = ${request.params.id}` + }; + + return hdbCore.requestWithoutAuthentication(request); + } + }); +} +``` + +Notice we imported customValidation from the **helpers** directory. To include a helper, and to see the actual code within customValidation, see [Define Helpers](#helper-methods). + +## Helper Methods + +When declaring routes, you are given access to 2 helper methods: hdbCore and logger. + +**hdbCore** + +hdbCore contains three functions that allow you to authenticate an inbound request, and execute operations against HarperDB directly, by passing the standard Operations API. + +* **preValidation** + + This is an array of functions used for fastify authentication. The second function takes the authorization header from the inbound request and executes the same authentication as the standard HarperDB Operations API (for example, `hdbCore.preValidation[1](./req, resp, callback)`). It will determine if the user exists, and if they are allowed to perform this operation. **If you use the request method, you have to use preValidation to get the authenticated user**. +* **request** + + This will execute a request with HarperDB using the operations API. The `request.body` should contain a standard HarperDB operation and must also include the `hdb_user` property that was in `request.body` provided in the callback. +* **requestWithoutAuthentication** + + Executes a request against HarperDB without any security checks around whether the inbound user is allowed to make this request. For security purposes, you should always take the following precautions when using this method: + + * Properly handle user-submitted values, including url params. User-submitted values should only be used for `search_value` and for defining values in records. Special care should be taken to properly escape any values if user-submitted values are used for SQL. + +**logger** + +This helper allows you to write directly to the log file, hdb.log. It’s useful for debugging during development, although you may also use the console logger. There are 5 functions contained within logger, each of which pertains to a different **logging.level** configuration in your harperdb-config.yaml file. + +* logger.trace(‘Starting the handler for /dogs’) +* logger.debug(‘This should only fire once’) +* logger.warn(‘This should never ever fire’) +* logger.error(‘This did not go well’) +* logger.fatal(‘This did not go very well at all’) diff --git a/site/versioned_docs/version-4.3/developers/applications/defining-schemas.md b/site/versioned_docs/version-4.3/developers/applications/defining-schemas.md new file mode 100644 index 00000000..29a0c12d --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/applications/defining-schemas.md @@ -0,0 +1,161 @@ +--- +title: Defining Schemas +--- + +# Defining Schemas + +Schemas define tables and their attributes. Schemas can be declaratively defined in HarperDB's using GraphQL schema definitions. Schemas definitions can be used to ensure that tables exist (that are required for applications), and have the appropriate attributes. Schemas can define the primary key, data types for attributes, if they are required, and specify which attributes should be indexed. The [introduction to applications provides](./) a helpful introduction to how to use schemas as part of database application development. + +Schemas can be used to define the expected structure of data, but are also highly flexible and support heterogeneous data structures and by default allows data to include additional properties. The standard types for GraphQL schemas are specified in the [GraphQL schema documentation](https:/graphql.org/learn/schema/). + +An example schema that defines a couple tables might look like: + +```graphql +# schema.graphql: +type Dog @table { + id: ID @primaryKey + name: String + breed: String + age: Int +} + +type Breed @table { + id: ID @primaryKey +} +``` + +In this example, you can see that we specified the expected data structure for records in the Dog and Breed table. For example, this will enforce that Dog records are required to have a `name` property with a string (or null, unless the type were specified to be non-nullable). This does not preclude records from having additional properties (see `@sealed` for preventing additional properties. For example, some Dog records could also optionally include a `favoriteTrick` property. + +In this page, we will describe the specific directives that HarperDB uses for defining tables and attributes in a schema. + +### Type Directives + +#### `@table` + +The schema for tables are defined using GraphQL type definitions with a `@table` directive: + +```graphql +type TableName @table +``` + +By default the table name is inherited from the type name (in this case the table name would be "TableName"). The `@table` directive supports several optional arguments (all of these are optional and can be freely combined): + +* `@table(table: "table_name")` - This allows you to explicitly specify the table name. +* `@table(database: "database_name")` - This allows you to specify which database the table belongs to. This defaults to the "data" database. +* `@table(expiration: 3600)` - Sets an expiration time on entries in the table before they are automatically cleared (primarily useful for caching tables). This is specified in seconds. +* `@table(audit: true)` - This enables the audit log for the table so that a history of record changes are recorded. This defaults to [configuration file's setting for `auditLog`](../../deployments/configuration#logging). + +#### `@export` + +This indicates that the specified table should be exported as a resource that is accessible as an externally available endpoints, through REST, MQTT, or any of the external resource APIs. + +This directive also accepts a `name` parameter to specify the name that should be used for the exported resource (how it will appear in the URL path). For example: + +``` +type MyTable @table @export(name: "my-table") +``` + +This table would be available at the URL path `/my-table/`. Without the `name` parameter, the exported name defaults to the name of the table type ("MyTable" in this example). + +### Relationships: `@relationship` +Defining relationships is the foundation of using "join" queries in HarperDB. A relationship defines how one table relates to another table using a foreign key. Using the `@relationship` directive will define a property as a computed property, which resolves to the an record/instance from a target type, based on the referenced attribute, which can be in this table or the target table. The `@relationship` directive must be used in combination with an attribute with a type that references another table. + +#### `@relationship(from: attribute)` +This defines a relationship where the foreign key is defined in this table, and relates to the primary key of the target table. If the foreign key is single-valued, this establishes a many-to-one relationship with the target table. The foreign key may also be a multi-valued array, in which case this will be a many-to-many relationship. +For example, we can define a foreign key that references another table and then define the relationship. Here we create a `brandId` attribute that will be our foreign key (it will hold an id that references the primary key of the Brand table), and we define a relationship to the `Brand` table through the `brand` attribute: +```graphql +type Product @table @export { + id: ID @primaryKey + brandId: ID @indexed + brand: Brand @relationship(from: brandId) +} +type Brand @table @export { + id: ID @primaryKey +} +``` +Once this is defined we can use the `brand` attribute as a [property in our product instances](../../technical-details/reference/resource) and allow for querying by `brand` and selecting brand attributes as returned properties in [query results](../rest). + +Again, the foreign key may be a multi-valued array (array of keys referencing the target table records). For example, if we had a list of features that references a Feature table: + +```graphql +type Product @table @export { + id: ID @primaryKey + featureIds: [ID] @indexed # array of ids + features: [Feature] @relationship(from: featureIds) # array of referenced feature records +} +type Feature @table { + id: ID @primaryKey + ... +} +``` + +#### `@relationship(to: attribute)` +This defines a relationship where the foreign key is defined in the target table and relates to primary key of this table. If the foreign key is single-valued, this establishes a one-to-many relationship with the target table. Note that the target table type must be an array element type (like `[Table]`). The foreign key may also be a multi-valued array, in which case this will be a many-to-many relationship. +For example, we can define on a reciprocal relationship, from the example above, adding a relationship from brand back to product. Here we use continue to use the `brandId` attribute from the `Product` schema, and we define a relationship to the `Product` table through the `products` attribute: +```graphql +type Brand @table @export { + id: ID @primaryKey + name: String + products: [Product] @relationship(to: brandId) +} +``` +Once this is defined we can use the `products` attribute as a property in our brand instances and allow for querying by `products` and selecting product attributes as returned properties in query results. + +Note that schemas can also reference themselves with relationships, allow records to define relationships like parent-child relationships between records in the same table. + +#### `@sealed` + +The `@sealed` directive specifies that no additional properties should be allowed on records besides though specified in the type itself. + +### Field Directives + +The field directives can be used for information about each attribute in table type definition. + +#### `@primaryKey` + +The `@primaryKey` directive specifies that an attribute is the primary key for a table. These must be unique and when records are created, this will be auto-generated with a UUID if no primary key is provided. + +#### `@indexed` + +The `@indexed` directive specifies that an attribute should be indexed. This is necessary if you want to execute queries using this attribute (whether that is through RESTful query parameters, SQL, or NoSQL operations). + +#### `@createdTime` + +The `@createdTime` directive indicates that this property should be assigned a timestamp of the creation time of the record (in epoch milliseconds). + +#### `@updatedTime` + +The `@updatedTime` directive indicates that this property should be assigned a timestamp of each updated time of the record (in epoch milliseconds). + +### Defined vs Dynamic Schemas + +If you do not define a schema for a table and create a table through the operations API (without specifying attributes) or studio, such a table will not have a defined schema and will follow the behavior of a ["dynamic-schema" table](../../technical-details/reference/dynamic-schema). It is generally best-practice to define schemas for your tables to ensure predictable, consistent structures with data integrity. + +### Field Types + +HarperDB supports the following field types in addition to user defined (object) types: + +* `String`: String/text. +* `Int`: A 32-bit signed integer (from -2147483648 to 2147483647). +* `Long`: A 54-bit signed integer (from -9007199254740992 to 9007199254740992). +* `Float`: Any number (any number that can be represented as a [64-bit double precision floating point number](https:/en.wikipedia.org/wiki/Double-precision\_floating-point\_format). Note that all numbers are stored in the most compact representation available). +* `BigInt`: Any integer (negative or positive) with less than 300 digits. (Note that `BigInt` is a distinct and separate type from standard numbers in JavaScript, so custom code should handle this type appropriately.) +* `Boolean`: true or false. +* `ID`: A string (but indicates it is not intended to be human readable). +* `Any`: Any primitive, object, or array is allowed. +* `Date`: A Date object. +* `Bytes`: Binary data (as a Buffer or Uint8Array). + +#### Renaming Tables + +It is important to note that HarperDB does not currently support renaming tables. If you change the name of a table in your schema definition, this will result in the creation of a new, empty table. + +### OpenAPI Specification + +_The [OpenAPI Specification](https:/spec.openapis.org/oas/v3.1.0) defines a standard, programming language-agnostic interface description for HTTP APIs, +which allows both humans and computers to discover and understand the capabilities of a service without requiring +access to source code, additional documentation, or inspection of network traffic._ + +If a set of endpoints are configured through a HarperDB GraphQL schema, those endpoints can be described by using a default REST endpoint called `GET /openapi`. + +_Note: The `/openapi` endpoint should only be used as a starting guide, it may not cover all the elements of an endpoint._ \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/developers/applications/example-projects.md b/site/versioned_docs/version-4.3/developers/applications/example-projects.md new file mode 100644 index 00000000..2eb92ba4 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/applications/example-projects.md @@ -0,0 +1,37 @@ +--- +title: Example Projects +--- + +# Example Projects + +**Library of example HarperDB applications and components:** + +* [Authorization in HarperDB using Okta Customer Identity Cloud](https:/www.harperdb.io/post/authorization-in-harperdb-using-okta-customer-identity-cloud), by Yitaek Hwang + +* [How to Speed Up your Applications by Caching at the Edge with HarperDB](https:/dev.to/doabledanny/how-to-speed-up-your-applications-by-caching-at-the-edge-with-harperdb-3o2l), by Danny Adams + +* [OAuth Authentication in HarperDB using Auth0 & Node.js](https:/www.harperdb.io/post/oauth-authentication-in-harperdb-using-auth0-and-node-js), by Lucas Santos + +* [How To Create a CRUD API with Next.js & HarperDB Custom Functions](https:/www.harperdb.io/post/create-a-crud-api-w-next-js-harperdb), by Colby Fayock + +* [Build a Dynamic REST API with Custom Functions](https:/harperdb.io/blog/build-a-dynamic-rest-api-with-custom-functions/), by Terra Roush + +* [How to use HarperDB Custom Functions to Build your Entire Backend](https:/dev.to/andrewbaisden/how-to-use-harperdb-custom-functions-to-build-your-entire-backend-a2m), by Andrew Baisden + +* [Using TensorFlowJS & HarperDB Custom Functions for Machine Learning](https:/harperdb.io/blog/using-tensorflowjs-harperdb-for-machine-learning/), by Kevin Ashcraft + +* [Build & Deploy a Fitness App with Python & HarperDB](https:/www.youtube.com/watch?v=KMkmA4i2FQc), by Patrick Löber + +* [Create a Discord Slash Bot using HarperDB Custom Functions](https:/geekysrm.hashnode.dev/discord-slash-bot-with-harperdb-custom-functions), by Soumya Ranjan Mohanty + +* [How I used HarperDB Custom Functions to Build a Web App for my Newsletter](https:/blog.hrithwik.me/how-i-used-harperdb-custom-functions-to-build-a-web-app-for-my-newsletter), by Hrithwik Bharadwaj + +* [How I used HarperDB Custom Functions and Recharts to create Dashboard](https:/blog.greenroots.info/how-to-create-dashboard-with-harperdb-custom-functions-and-recharts), by Tapas Adhikary + +* [How To Use HarperDB Custom Functions With Your React App](https:/dev.to/tyaga001/how-to-use-harperdb-custom-functions-with-your-react-app-2c43), by Ankur Tyagi + +* [Build a Web App Using HarperDB’s Custom Functions](https:/www.youtube.com/watch?v=rz6prItVJZU), livestream by Jaxon Repp + +* [How to Web Scrape Using Python, Snscrape & Custom Functions](https:/hackernoon.com/how-to-web-scrape-using-python-snscrape-and-harperdb), by Davis David + +* [What’s the Big Deal w/ Custom Functions](https:/rss.com/podcasts/harperdb-select-star/278933/), Select* Podcast \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/developers/applications/index.md b/site/versioned_docs/version-4.3/developers/applications/index.md new file mode 100644 index 00000000..7fee6bd8 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/applications/index.md @@ -0,0 +1,376 @@ +--- +title: Applications +--- + +# Applications + +## Overview of HarperDB Applications + +HarperDB is more than a database, it's a distributed clustering platform allowing you to package your schema, endpoints and application logic and deploy them to an entire fleet of HarperDB instances optimized for on-the-edge scalable data delivery. + +In this guide, we are going to explore the evermore extensible architecture that HarperDB provides by building a HarperDB component, a fundamental building-block of the HarperDB ecosystem. + +When working through this guide, we recommend you use the [HarperDB Application Template](https:/github.com/HarperDB/application-template) repo as a reference. + +## Understanding the Component Application Architecture + +HarperDB provides several types of components. Any package that is added to HarperDB is called a "component", and components are generally categorized as either "applications", which deliver a set of endpoints for users, or "extensions", which are building blocks for features like authentication, additional protocols, and connectors that can be used by other components. Components can be added to the `hdb/components` directory and will be loaded by HarperDB when it starts. Components that are remotely deployed to HarperDB (through the studio or the operation API) are installed into the hdb/node\_modules directory. Using `harperdb run .` or `harperdb dev .` allows us to specifically load a certain application in addition to any that have been manually added to `hdb/components` or installed (in `node\_modules`). + +```mermaid +flowchart LR + Client(Client)-->Endpoints + Client(Client)-->HTTP + Client(Client)-->Extensions + subgraph HarperDB + direction TB + Applications(Applications)-- "Schemas" --> Tables[(Tables)] + Applications-->Endpoints[/Custom Endpoints/] + Applications-->Extensions + Endpoints-->Tables + HTTP[/REST/HTTP/]-->Tables + Extensions[/Extensions/]-->Tables + end +``` + +## Getting up and Running + +### Pre-Requisites + +We assume you are running HarperDB version 4.2 or greater, which supports HarperDB Application architecture (in previous versions, this is 'custom functions'). + +### Scaffolding our Application Directory + +Let's create and initialize a new directory for our application. It is recommended that you start by using the [HarperDB application template](https:/github.com/HarperDB/application-template). Assuming you have `git` installed, you can create your project directory by cloning: + +```shell +> git clone https:/github.com/HarperDB/application-template my-app +> cd my-app +``` + +
+ +You can also start with an empty application directory if you'd prefer. + +To create your own application from scratch, you'll may want to initialize it as an npm package with the \`type\` field set to \`module\` in the \`package.json\` so that you can use the EcmaScript module syntax used in this tutorial: + +```shell +> mkdir my-app +> cd my-app +> npm init -y esnext +``` + +
+ +
+ +If you want to version control your application code, you can adjust the remote URL to your repository. + +Here's an example for a github repo: + +```shell +> git remote set-url origin git@github.com:// +``` + +Locally developing your application and then committing your app to a source control is a great way to manage your code and configuration, and then you can [directly deploy from your repository](./#deploying-your-application). + +
+ +## Creating our first Table + +The core of a HarperDB application is the database, so let's create a database table! + +A quick and expressive way to define a table is through a [GraphQL Schema](https:/graphql.org/learn/schema). Using your editor of choice, edit the file named `schema.graphql` in the root of the application directory, `my-app`, that we created above. To create a table, we will need to add a `type` of `@table` named `Dog` (and you can remove the example table in the template): + +```graphql +type Dog @table { + # properties will go here soon +} +``` + +And then we'll add a primary key named `id` of type `ID`: + +_(Note: A GraphQL schema is a fast method to define tables in HarperDB, but you are by no means required to use GraphQL to query your application, nor should you necessarily do so)_ + +```graphql +type Dog @table { + id: ID @primaryKey +} +``` + +Now we tell HarperDB to run this as an application: + +```shell +> harperdb dev . # tell HarperDB cli to run current directory as an application in dev mode +``` + +HarperDB will now create the `Dog` table and its `id` attribute we just defined. Not only is this an easy way to get create a table, but this schema is included in our application, which will ensure that this table exists wherever we deploy this application (to any HarperDB instance). + +## Adding Attributes to our Table + +Next, let's expand our `Dog` table by adding additional typed attributes for dog `name`, `breed` and `age`. + +```graphql +type Dog @table { + id: ID @primaryKey + name: String + breed: String + age: Int +} +``` + +This will ensure that new records must have these properties with these types. + +Because we ran `harperdb dev .` earlier (dev mode), HarperDB is now monitoring the contents of our application directory for changes and reloading when they occur. This means that once we save our schema file with these new attributes, HarperDB will automatically reload our application, read `my-app/schema.graphql` and update the `Dog` table and attributes we just defined. The dev mode will also ensure that any logging or errors are immediately displayed in the console (rather only in the log file). + +As a NoSQL database, HarperDB supports heterogeneous records (also referred to as documents), so you can freely specify additional properties on any record. If you do want to restrict the records to only defined properties, you can always do that by adding the `sealed` directive: + +```graphql +type Dog @table @sealed { + id: ID @primaryKey + name: String + breed: String + age: Int + tricks: [String] +} +``` + +If you are using HarperDB Studio, we can now [add JSON-formatted records](../../administration/harperdb-studio/manage-databases-browse-data#add-a-record) to this new table in the studio or upload data as [CSV from a local file or URL](../../administration/harperdb-studio/manage-databases-browse-data#load-csv-data). A third, more advanced, way to add data to your database is to use the [operations API](../operations-api/), which provides full administrative control over your new HarperDB instance and tables. + +## Adding an Endpoint + +Now that we have a running application with a database (with data if you imported any data), let's make this data accessible from a RESTful URL by adding an endpoint. To do this, we simply add the `@export` directive to our `Dog` table: + +```graphql +type Dog @table @export { + id: ID @primaryKey + name: String + breed: String + age: Int + tricks: [String] +} +``` + +By default the application HTTP server port is `9926` (this can be [configured here](../../deployments/configuration#http)), so the local URL would be [http:/localhost:9926/Dog/](http:/localhost:9926/Dog/) with a full REST API. We can PUT or POST data into this table using this new path, and then GET or DELETE from it as well (you can even view data directly from the browser). If you have not added any records yet, we could use a PUT or POST to add a record. PUT is appropriate if you know the id, and POST can be used to assign an id: + +```http +POST /Dog/ +Content-Type: application/json + +{ + "name": "Harper", + "breed": "Labrador", + "age": 3, + "tricks": ["sits"] +} +``` + +With this a record will be created and the auto-assigned id will be available through the `Location` header. If you added a record, you can visit the path `/Dog/` to view that record. Alternately, the curl command `curl http:/localhost:9926/Dog/` will achieve the same thing. + +## Authenticating Endpoints + +These endpoints automatically support `Basic`, `Cookie`, and `JWT` authentication methods. See the documentation on [security](../security/) for more information on different levels of access. + +By default, HarperDB also automatically authorizes all requests from loopback IP addresses (from the same computer) as the superuser, to make it simple to interact for local development. If you want to test authentication/authorization, or enforce stricter security, you may want to disable the [`authentication.authorizeLocal` setting](../../deployments/configuration#authentication). + +### Content Negotiation + +These endpoints support various content types, including `JSON`, `CBOR`, `MessagePack` and `CSV`. Simply include an `Accept` header in your requests with the preferred content type. We recommend `CBOR` as a compact, efficient encoding with rich data types, but `JSON` is familiar and great for web application development, and `CSV` can be useful for exporting data to spreadsheets or other processing. + +HarperDB works with other important standard HTTP headers as well, and these endpoints are even capable of caching interaction: + +``` +Authorization: Basic +Accept: application/cbor +If-None-Match: "etag-id" # browsers can automatically provide this +``` + +## Querying + +Querying your application database is straightforward and easy, as tables exported with the `@export` directive are automatically exposed via [REST endpoints](../rest). Simple queries can be crafted through [URL query parameters](https:/en.wikipedia.org/wiki/Query\_string). + +In order to maintain reasonable query speed on a database as it grows in size, it is critical to select and establish the proper indexes. So, before we add the `@export` declaration to our `Dog` table and begin querying it, let's take a moment to target some table properties for indexing. We'll use `name` and `breed` as indexed table properties on our `Dog` table. All we need to do to accomplish this is tag these properties with the `@indexed` directive: + +```graphql +type Dog @table { + id: ID @primaryKey + name: String @indexed + breed: String @indexed + owner: String + age: Int + tricks: [String] +} +``` + +And finally, we'll add the `@export` directive to expose the table as a RESTful endpoint + +```graphql +type Dog @table @export { + id: ID @primaryKey + name: String @indexed + breed: String @indexed + owner: String + age: Int + tricks: [String] +} +``` + +Now we can start querying. Again, we just simply access the endpoint with query parameters (basic GET requests), like: + +``` +http:/localhost:9926/Dog/?name=Harper +http:/localhost:9926/Dog/?breed=Labrador +http:/localhost:9926/Dog/?breed=Husky&name=Balto&select=id,name,breed +``` + +Congratulations, you now have created a secure database application backend with a table, a well-defined structure, access controls, and a functional REST endpoint with query capabilities! See the [REST documentation for more information on HTTP access](../rest) and see the [Schema reference](./defining-schemas) for more options for defining schemas. + +## Deploying your Application + +This guide assumes that you're building a HarperDB application locally. If you have a cloud instance available, you can deploy it by doing the following: + +* Commit and push your application component directory code (i.e., the `my-app` directory) to a Github repo. In this tutorial we started with a clone of the application-template. To commit and push to your own repository, change the origin to your repo: `git remote set-url origin git@github.com:your-account/your-repo.git` +* Go to the applications section of your target cloud instance in the [HarperDB Studio](../../administration/harperdb-studio/manage-applications). +* In the left-hand menu of the applications IDE, click 'deploy' and specify a package location reference that follows the [npm package specification](https:/docs.npmjs.com/cli/v8/using-npm/package-spec) (i.e., a string like `HarperDB/Application-Template` or a URL like `https:/github.com/HarperDB/application-template`, for example, that npm knows how to install). + +You can also deploy your application from your repository by directly using the [`deploy_component` operation](../operations-api/components#deploy-component). + +Once you have deployed your application to a HarperDB cloud instance, you can start scaling your application by adding additional instances in other regions. + +With the help of a global traffic manager/load balancer configured, you can distribute incoming requests to the appropriate server. You can deploy and re-deploy your application to all the nodes in your mesh. + +Now, with an application that you can deploy, update, and re-deploy, you have an application that is horizontally and globally scalable! + +## Custom Functionality with JavaScript + +So far we have built an application entirely through schema configuration. However, if your application requires more custom functionality, you will probably want to employ your own JavaScript modules to implement more specific features and interactions. This gives you tremendous flexibility and control over how data is accessed and modified in HarperDB. Let's take a look at how we can use JavaScript to extend and define "resources" for custom functionality. Let's add a property to the dog records when they are returned, that includes their age in human years. In HarperDB, data is accessed through our [Resource API](../../technical-details/reference/resource), a standard interface to access data sources, tables, and make them available to endpoints. Database tables are `Resource` classes, and so extending the function of a table is as simple as extending their class. + +To define custom (JavaScript) resources as endpoints, we need to create a `resources.js` module (this goes in the root of your application folder). And then endpoints can be defined with Resource classes that `export`ed. This can be done in addition to, or in lieu of the `@export`ed types in the schema.graphql. If you are exporting and extending a table you defined in the schema make sure you remove the `@export` from the schema so that don't export the original table or resource to the same endpoint/path you are exporting with a class. Resource classes have methods that correspond to standard HTTP/REST methods, like `get`, `post`, `patch`, and `put` to implement specific handling for any of these methods (for tables they all have default implementations). To do this, we get the `Dog` class from the defined tables, extend it, and export it: + +```javascript +/ resources.js: +const { Dog } = tables; / get the Dog table from the HarperDB provided set of tables (in the default database) + +export class DogWithHumanAge extends Dog { + get(query) { + this.humanAge = 15 + this.age * 5; / silly calculation of human age equivalent + return super.get(query); + } +} +``` + +Here we exported the `DogWithHumanAge` class (exported with the same name), which directly maps to the endpoint path. Therefore, now we have a `/DogWithHumanAge/` endpoint based on this class, just like the direct table interface that was exported as `/Dog/`, but the new endpoint will return objects with the computed `humanAge` property. Resource classes provide getters/setters for every defined attribute so that accessing instance properties like `age`, will get the value from the underlying record. The instance holds information about the primary key of the record so updates and actions can be applied to the correct record. And changing or assigning new properties can be saved or included in the resource as it returned and serialized. The `return super.get(query)` call at the end allows for any query parameters to be applied to the resource, such as selecting individual properties (with a [`select` query parameter](../rest#select-properties)). + +Often we may want to incorporate data from other tables or data sources in your data models. Next, let's say that we want a `Breed` table that holds detailed information about each breed, and we want to add that information to the returned dog object. We might define the Breed table as (back in schema.graphql): + +```graphql +type Breed @table { + name: String @primaryKey + description: String @indexed + lifespan: Int + averageWeight: Float +} +``` + +And next we will use this table in our `get()` method. We will call the new table's (static) `get()` method to retrieve a breed by id. To do this correctly, we access the table using our current context by passing in `this` as the second argument. This is important because it ensures that we are accessing the data atomically, in a consistent snapshot across tables. This provides automatically tracking of most recently updated timestamps across resources for caching purposes. This allows for sharing of contextual metadata (like user who requested the data), and ensure transactional atomicity for any writes (not needed in this get operation, but important for other operations). The resource methods are automatically wrapped with a transaction (will commit/finish when the method completes), and this allows us to fully utilize multiple resources in our current transaction. With our own snapshot of the database for the Dog and Breed table we can then access data like this: + +```javascript +/resource.js: +const { Dog, Breed } = tables; / get the Breed table too +export class DogWithBreed extends Dog { + async get(query) { + let breedDescription = await Breed.get(this.breed, this); + this.breedDescription = breedDescription; + return super.get(query); + } +} +``` + +The call to `Breed.get` will return an instance of the `Breed` resource class, which holds the record specified the provided id/primary key. Like the `Dog` instance, we can access or change properties on the Breed instance. + +Here we have focused on customizing how we retrieve data, but we may also want to define custom actions for writing data. While HTTP PUT method has a specific semantic definition (replace current record), a common method for custom actions is through the HTTP POST method. the POST method has much more open-ended semantics and is a good choice for custom actions. POST requests are handled by our Resource's post() method. Let's say that we want to define a POST handler that adds a new trick to the `tricks` array to a specific instance. We might do it like this, and specify an action to be able to differentiate actions: + +```javascript +export class CustomDog extends Dog { + async post(data) { + if (data.action === 'add-trick') + this.tricks.push(data.trick); + } +} +``` + +And a POST request to /CustomDog/ would call this `post` method. The Resource class then automatically tracks changes you make to your resource instances and saves those changes when this transaction is committed (again these methods are automatically wrapped in a transaction and committed once the request handler is finished). So when you push data on to the `tricks` array, this will be recorded and persisted when this method finishes and before sending a response to the client. + +The `post` method automatically marks the current instance as being update. However, you can also explicitly specify that you are changing a resource by calling the `update()` method. If you want to modify a resource instance that you retrieved through a `get()` call (like `Breed.get()` call above), you can call its `update()` method to ensure changes are saved (and will be committed in the current transaction). + +We can also define custom authorization capabilities. For example, we might want to specify that only the owner of a dog can make updates to a dog. We could add logic to our `post` method or `put` method to do this, but we may want to separate the logic so these methods can be called separately without authorization checks. The [Resource API](../../technical-details/reference/resource) defines `allowRead`, `allowUpdate`, `allowCreate`, and `allowDelete`, or to easily configure individual capabilities. For example, we might do this: + +```javascript +export class CustomDog extends Dog { + allowUpdate(user) { + return this.owner === user.username; + } +} +``` + +Any methods that are not defined will fall back to HarperDB's default authorization procedure based on users' roles. If you are using/extending a table, this is based on HarperDB's [role based access](../security/users-and-roles). If you are extending the base `Resource` class, the default access requires super user permission. + +You can also use the `default` export to define the root path resource handler. For example: + +```javascript +/ resources.json +export default class CustomDog extends Dog { + ... +``` + +This will allow requests to url like / to be directly resolved to this resource. + +## Define Custom Data Sources + +We can also directly implement the Resource class and use it to create new data sources from scratch that can be used as endpoints. Custom resources can also be used as caching sources. Let's say that we defined a `Breed` table that was a cache of information about breeds from another source. We could implement a caching table like: + +```javascript +const { Breed } = tables; / our Breed table +class BreedSource extends Resource { / define a data source + async get() { + return (await fetch(`http:/best-dog-site.com/${this.getId()}`)).json(); + } +} +/ define that our breed table is a cache of data from the data source above, with a specified expiration +Breed.sourcedFrom(BreedSource, { expiration: 3600 }); +``` + +The [caching documentation](./caching) provides much more information on how to use HarperDB's powerful caching capabilities and set up data sources. + +HarperDB provides a powerful JavaScript API with significant capabilities that go well beyond a "getting started" guide. See our documentation for more information on using the [`globals`](../../technical-details/reference/globals) and the [Resource interface](../../technical-details/reference/resource). + +## Configuring Applications/Components + +Every application or component can define their own configuration in a `config.yaml`. If you are using the application template, you will have a [default configuration in this config file](https:/github.com/HarperDB/application-template/blob/main/config.yaml) (which is default configuration if no config file is provided). Within the config file, you can configure how different files and resources are loaded and handled. The default configuration file itself is documented with directions. Each entry can specify any `files` that the loader will handle, and can also optionally specify what, if any, URL `path`s it will handle. A path of `/` means that the root URLs are handled by the loader, and a path of `.` indicates that the URLs that start with this application's name are handled. + +This config file allows you define a location for static files, as well (that are directly delivered as-is for incoming HTTP requests). + +Each configuration entry can have the following properties, in addition to properties that may be specific to the individual component: + +* `files`: This specifies the set of files that should be handled the component. This is a glob pattern, so a set of files can be specified like "directory/**". +* `path`: This is the URL path that is handled by this component. +* `root`: This specifies the root directory for mapping file paths to the URLs. For example, if you want all the files in `web/**` to be available in the root URL path via the static handler, you could specify a root of `web`, to indicate that the web directory maps to the root URL path. +* `package`: This is used to specify that this component is a third party package, and can be loaded from the specified package reference (which can be an NPM package, Github reference, URL, etc.). + +## Define Fastify Routes + +Exporting resource will generate full RESTful endpoints. But, you may prefer to define endpoints through a framework. HarperDB includes a resource plugin for defining routes with the Fastify web framework. Fastify is a full-featured framework with many plugins, that provides sophisticated route definition capabilities. + +By default, applications are configured to load any modules in the `routes` directory (matching `routes/*.js`) with Fastify's autoloader, which will allow these modules to export a function to define fastify routes. See the [defining routes documentation](./define-routes) for more information on how to create Fastify routes. + +However, Fastify is not as fast as HarperDB's RESTful endpoints (about 10%-20% slower/more-overhead), nor does it automate the generation of a full uniform interface with correct RESTful header interactions (for caching control), so generally the HarperDB's REST interface is recommended for optimum performance and ease of use. + +## Restarting Your Instance + +Generally, HarperDB will auto-detect when files change and auto-restart the appropriate threads. However, if there are changes that aren't detected, you may manually restart, with the `restart_service` operation: + +```json +{ + "operation": "restart_service", + "service": "http_workers" +} +``` diff --git a/site/versioned_docs/version-4.3/developers/clustering/certificate-management.md b/site/versioned_docs/version-4.3/developers/clustering/certificate-management.md new file mode 100644 index 00000000..58243cb7 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/clustering/certificate-management.md @@ -0,0 +1,70 @@ +--- +title: Certificate Management +--- + +# Certificate Management + +## Development + +Out of the box HarperDB generates certificates that are used when HarperDB nodes are clustered together to securely share data between nodes. These certificates are meant for testing and development purposes. Because these certificates do not have Common Names (CNs) that will match the Fully Qualified Domain Name (FQDN) of the HarperDB node, the following settings (see the full [configuration file](../../deployments/configuration) docs for more details) are defaulted & recommended for ease of development: + +``` +clustering: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem + insecure: true + verify: true +``` + +The certificates that HarperDB generates are stored in your `/keys/`. + +`insecure` is set to `true` to accept the certificate CN mismatch due to development certificates. + +`verify` is set to `true` to enable mutual TLS between the nodes. + +## Production + +In a production environment, we recommend using your own certificate authority (CA), or a public CA such as LetsEncrypt to generate certs for your HarperDB cluster. This will let you generate certificates with CNs that match the FQDN of your nodes. + +Once you generate new certificates, to make HarperDB start using them you can either replace the generated files with your own, or update the configuration to point to your new certificates, and then restart HarperDB. + +Since these new certificates can be issued with correct CNs, you should set `insecure` to `false` so that nodes will do full validation of the certificates of the other nodes. + +### Certificate Requirements + +* Certificates must have an `Extended Key Usage` that defines both `TLS Web Server Authentication` and `TLS Web Client Authentication` as these certificates will be used to accept connections from other HarperDB nodes and to make requests to other HarperDB nodes. Example: + +``` +X509v3 Key Usage: critical + Digital Signature, Key Encipherment +X509v3 Extended Key Usage: + TLS Web Server Authentication, TLS Web Client Authentication +``` + +* If you are using an intermediate CA to issue the certificates, the entire certificate chain (to the root CA) must be included in the `certificateAuthority` file. +* If your certificates expire you will need a way to issue new certificates to the nodes and then restart HarperDB. If you are using a public CA such as LetsEncrypt, a tool like `certbot` can be used to renew certificates. + +### Certificate Troubleshooting + +If you are having TLS issues with clustering, use the following steps to verify that your certificates are valid. + +1. Make sure certificates can be parsed and that you can view the contents: + +``` +openssl x509 -in .pem -noout -text` +``` + +1. Make sure the certificate validates with the CA: + +``` +openssl verify -CAfile .pem .pem` +``` + +1. Make sure the certificate and private key are a valid pair by verifying that the output of the following commands match: + +``` +openssl rsa -modulus -noout -in .pem | openssl md5 +openssl x509 -modulus -noout -in .pem | openssl md5 +``` diff --git a/site/versioned_docs/version-4.3/developers/clustering/creating-a-cluster-user.md b/site/versioned_docs/version-4.3/developers/clustering/creating-a-cluster-user.md new file mode 100644 index 00000000..3edecd29 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/clustering/creating-a-cluster-user.md @@ -0,0 +1,59 @@ +--- +title: Creating a Cluster User +--- + +# Creating a Cluster User + +Inter-node authentication takes place via HarperDB users. There is a special role type called `cluster_user` that exists by default and limits the user to only clustering functionality. + +A `cluster_user` must be created and added to the `harperdb-config.yaml` file for clustering to be enabled. + +All nodes that are intended to be clustered together need to share the same `cluster_user` credentials (i.e. username and password). + +There are multiple ways a `cluster_user` can be created, they are: + +1. Through the operations API by calling `add_user` + +```json +{ + "operation": "add_user", + "role": "cluster_user", + "username": "cluster_account", + "password": "letsCluster123!", + "active": true +} +``` + +When using the API to create a cluster user the `harperdb-config.yaml` file must be updated with the username of the new cluster user. + +This can be done through the API by calling `set_configuration` or by editing the `harperdb-config.yaml` file. + +```json +{ + "operation": "set_configuration", + "clustering_user": "cluster_account" +} +``` + +In the `harperdb-config.yaml` file under the top-level `clustering` element there will be a user element. Set this to the name of the cluster user. + +```yaml +clustering: + user: cluster_account +``` + +_Note: When making any changes to the `harperdb-config.yaml` file, HarperDB must be restarted for the changes to take effect._ + +1. Upon installation using **command line variables**. This will automatically set the user in the `harperdb-config.yaml` file. + +_Note: Using command line or environment variables for setting the cluster user only works on install._ + +``` +harperdb install --CLUSTERING_USER cluster_account --CLUSTERING_PASSWORD letsCluster123! +``` + +1. Upon installation using **environment variables**. This will automatically set the user in the `harperdb-config.yaml` file. + +``` +CLUSTERING_USER=cluster_account CLUSTERING_PASSWORD=letsCluster123 +``` diff --git a/site/versioned_docs/version-4.3/developers/clustering/enabling-clustering.md b/site/versioned_docs/version-4.3/developers/clustering/enabling-clustering.md new file mode 100644 index 00000000..6b563b19 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/clustering/enabling-clustering.md @@ -0,0 +1,49 @@ +--- +title: Enabling Clustering +--- + +# Enabling Clustering + +Clustering does not run by default; it needs to be enabled. + +To enable clustering the `clustering.enabled` configuration element in the `harperdb-config.yaml` file must be set to `true`. + +There are multiple ways to update this element, they are: + +1. Directly editing the `harperdb-config.yaml` file and setting enabled to `true` + +```yaml +clustering: + enabled: true +``` + +_Note: When making any changes to the `harperdb-config.yaml` file HarperDB must be restarted for the changes to take effect._ + +1. Calling `set_configuration` through the operations API + +```json +{ + "operation": "set_configuration", + "clustering_enabled": true +} +``` + +_Note: When making any changes to HarperDB configuration HarperDB must be restarted for the changes to take effect._ + +1. Using **command line variables**. + +``` +harperdb --CLUSTERING_ENABLED true +``` + +1. Using **environment variables**. + +``` +CLUSTERING_ENABLED=true +``` + +An efficient way to **install HarperDB**, **create the cluster user**, **set the node name** and **enable clustering** in one operation is to combine the steps using command line and/or environment variables. Here is an example using command line variables. + +``` +harperdb install --CLUSTERING_ENABLED true --CLUSTERING_NODENAME Node1 --CLUSTERING_USER cluster_account --CLUSTERING_PASSWORD letsCluster123! +``` diff --git a/site/versioned_docs/version-4.3/developers/clustering/establishing-routes.md b/site/versioned_docs/version-4.3/developers/clustering/establishing-routes.md new file mode 100644 index 00000000..b733aaed --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/clustering/establishing-routes.md @@ -0,0 +1,73 @@ +--- +title: Establishing Routes +--- + +# Establishing Routes + +A route is a connection between two nodes. It is how the clustering network is established. + +Routes do not need to cross connect all nodes in the cluster. You can select a leader node or a few leaders and all nodes connect to them, you can chain, etc… As long as there is one route connecting a node to the cluster all other nodes should be able to reach that node. + +Using routes the clustering servers will create a mesh network between nodes. This mesh network ensures that if a node drops out all other nodes can still communicate with each other. That being said, we recommend designing your routing with failover in mind, this means not storing all your routes on one node but dispersing them throughout the network. + +A simple route example is a two node topology, if Node1 adds a route to connect it to Node2, Node2 does not need to add a route to Node1. That one route configuration is all that’s needed to establish a bidirectional connection between the nodes. + +A route consists of a `port` and a `host`. + +`port` - the clustering port of the remote instance you are creating the connection with. This is going to be the `clustering.hubServer.cluster.network.port` in the HarperDB configuration on the node you are connecting with. + +`host` - the host of the remote instance you are creating the connection with.This can be an IP address or a URL. + +Routes are set in the `harperdb-config.yaml` file using the `clustering.hubServer.cluster.network.routes` element, which expects an object array, where each object has two properties, `port` and `host`. + +```yaml +clustering: + hubServer: + cluster: + network: + routes: + - host: 3.62.184.22 + port: 9932 + - host: 3.735.184.8 + port: 9932 +``` + +![figure 1](/img/v4.3/clustering/figure1.png) + +This diagram shows one way of using routes to connect a network of nodes. Node2 and Node3 do not reference any routes in their config. Node1 contains routes for Node2 and Node3, which is enough to establish a network between all three nodes. + +There are multiple ways to set routes, they are: + +1. Directly editing the `harperdb-config.yaml` file (refer to code snippet above). +1. Calling `cluster_set_routes` through the API. + +```json +{ + "operation": "cluster_set_routes", + "server": "hub", + "routes":[ {"host": "3.735.184.8", "port": 9932} ] +} +``` + +_Note: When making any changes to HarperDB configuration HarperDB must be restarted for the changes to take effect._ + +1. From the command line. + +```bash +--CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES "[{\"host\": \"3.735.184.8\", \"port\": 9932}]" +``` + +1. Using environment variables. + +```bash +CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES=[{"host": "3.735.184.8", "port": 9932}] +``` + +The API also has `cluster_get_routes` for getting all routes in the config and `cluster_delete_routes` for deleting routes. + +```json +{ + "operation": "cluster_delete_routes", + "routes":[ {"host": "3.735.184.8", "port": 9932} ] +} +``` diff --git a/site/versioned_docs/version-4.3/developers/clustering/index.md b/site/versioned_docs/version-4.3/developers/clustering/index.md new file mode 100644 index 00000000..f5949afd --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/clustering/index.md @@ -0,0 +1,31 @@ +--- +title: Clustering +--- + +# Clustering + +HarperDB clustering is the process of connecting multiple HarperDB databases together to create a database mesh network that enables users to define data replication patterns. + +HarperDB’s clustering engine replicates data between instances of HarperDB using a highly performant, bi-directional pub/sub model on a per-table basis. Data replicates asynchronously with eventual consistency across the cluster following the defined pub/sub configuration. Individual transactions are sent in the order in which they were transacted, once received by the destination instance, they are processed in an ACID-compliant manner. Conflict resolution follows a last writer wins model based on recorded transaction time on the transaction and the timestamp on the record on the node. + +*** + +### Common Use Case + +A common use case is an edge application collecting and analyzing sensor data that creates an alert if a sensor value exceeds a given threshold: + +* The edge application should not be making outbound http requests for security purposes. +* There may not be a reliable network connection. +* Not all sensor data will be sent to the cloud--either because of the unreliable network connection, or maybe it’s just a pain to store it. +* The edge node should be inaccessible from outside the firewall. +* The edge node will send alerts to the cloud with a snippet of sensor data containing the offending sensor readings. + +HarperDB simplifies the architecture of such an application with its bi-directional, table-level replication: + +* The edge instance subscribes to a “thresholds” table on the cloud instance, so the application only makes localhost calls to get the thresholds. +* The application continually pushes sensor data into a “sensor\_data” table via the localhost API, comparing it to the threshold values as it does so. +* When a threshold violation occurs, the application adds a record to the “alerts” table. +* The application appends to that record array “sensor\_data” entries for the 60 seconds (or minutes, or days) leading up to the threshold violation. +* The edge instance publishes the “alerts” table up to the cloud instance. + +By letting HarperDB focus on the fault-tolerant logistics of transporting your data, you get to write less code. By moving data only when and where it’s needed, you lower storage and bandwidth costs. And by restricting your app to only making local calls to HarperDB, you reduce the overall exposure of your application to outside forces. diff --git a/site/versioned_docs/version-4.3/developers/clustering/managing-subscriptions.md b/site/versioned_docs/version-4.3/developers/clustering/managing-subscriptions.md new file mode 100644 index 00000000..a8a4b407 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/clustering/managing-subscriptions.md @@ -0,0 +1,168 @@ +--- +title: Managing subscriptions +--- + +# Managing subscriptions + +Subscriptions can be added, updated, or removed through the API. + +_Note: The databases and tables in the subscription must exist on either the local or the remote node. Any databases or tables that do not exist on one particular node, for example, the local node, will be automatically created on the local node._ + +To add a single node and create one or more subscriptions use `set_node_replication`. + +```json +{ + "operation": "set_node_replication", + "node_name": "Node2", + "subscriptions": [ + { + "database": "data", + "table": "dog", + "publish": false, + "subscribe": true + }, + { + "database": "data", + "table": "chicken", + "publish": true, + "subscribe": true + } + ] +} +``` + +This is an example of adding Node2 to your local node. Subscriptions are created for two tables, dog and chicken. + +To update one or more subscriptions with a single node you can also use `set_node_replication`, however this will behave as a PATCH/upsert, where only the subscription(s) changing will be inserted/update while the others will be left untouched. + +```json +{ + "operation": "set_node_replication", + "node_name": "Node2", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ] +} +``` + +This call will update the subscription with the dog table. Any other subscriptions with Node2 will not change. + +To add or update subscriptions with one or more nodes in one API call use `configure_cluster`. + +```json +{ + "operation": "configure_cluster", + "connections": [ + { + "node_name": "Node2", + "subscriptions": [ + { + "database": "dev", + "table": "chicken", + "publish": false, + "subscribe": true + }, + { + "database": "prod", + "table": "dog", + "publish": true, + "subscribe": true + } + ] + }, + { + "node_name": "Node3", + "subscriptions": [ + { + "database": "dev", + "table": "chicken", + "publish": true, + "subscribe": false + } + ] + } + ] +} +``` + +_Note: `configure_cluster` will override **any and all** existing subscriptions defined on the local node. This means that before going through the connections in the request and adding the subscriptions, it will first go through **all existing subscriptions the local node has** and remove them. To get all existing subscriptions use `cluster_status`._ + +#### Start time + +There is an optional property called `start_time` that can be passed in the subscription. This property accepts an ISO formatted UTC date. + +`start_time` can be used to set from what time you would like to source transactions from a table when creating or updating a subscription. + +```json +{ + "operation": "set_node_replication", + "node_name": "Node2", + "subscriptions": [ + { + "database": "dev", + "table": "dog", + "publish": false, + "subscribe": true, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] +} +``` + +This example will get all transactions on Node2’s dog table starting from `2022-09-02T20:06:35.993Z` and replicate them locally on the dog table. + +If no start time is passed it defaults to the current time. + +_Note: start time utilizes clustering to back source transactions. For this reason it can only source transactions that occurred when clustering was enabled._ + +#### Remove node + +To remove a node and all its subscriptions use `remove_node`. + +```json +{ + "operation":"remove_node", + "node_name":"Node2" +} +``` + +#### Cluster status + +To get the status of all connected nodes and see their subscriptions use `cluster_status`. + +```json +{ + "node_name": "Node1", + "is_enabled": true, + "connections": [ + { + "node_name": "Node2", + "status": "open", + "ports": { + "clustering": 9932, + "operations_api": 9925 + }, + "latency_ms": 65, + "uptime": "11m 19s", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ], + "system_info": { + "hdb_version": "4.0.0", + "node_version": "16.17.1", + "platform": "linux" + } + } + ] +} +``` diff --git a/site/versioned_docs/version-4.3/developers/clustering/naming-a-node.md b/site/versioned_docs/version-4.3/developers/clustering/naming-a-node.md new file mode 100644 index 00000000..d1ebdfb1 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/clustering/naming-a-node.md @@ -0,0 +1,45 @@ +--- +title: Naming a Node +--- + +# Naming a Node + +Node name is the name given to a node. It is how nodes are identified within the cluster and must be unique to the cluster. + +The name cannot contain any of the following characters: `.,*>` . Dot, comma, asterisk, greater than, or whitespace. + +The name is set in the `harperdb-config.yaml` file using the `clustering.nodeName` configuration element. + +_Note: If you want to change the node name make sure there are no subscriptions in place before doing so. After the name has been changed a full restart is required._ + +There are multiple ways to update this element, they are: + +1. Directly editing the `harperdb-config.yaml` file. + +```yaml +clustering: + nodeName: Node1 +``` + +_Note: When making any changes to the `harperdb-config.yaml` file HarperDB must be restarted for the changes to take effect._ + +1. Calling `set_configuration` through the operations API + +```json +{ + "operation": "set_configuration", + "clustering_nodeName":"Node1" +} +``` + +1. Using command line variables. + +``` +harperdb --CLUSTERING_NODENAME Node1 +``` + +1. Using environment variables. + +``` +CLUSTERING_NODENAME=Node1 +``` diff --git a/site/versioned_docs/version-4.3/developers/clustering/requirements-and-definitions.md b/site/versioned_docs/version-4.3/developers/clustering/requirements-and-definitions.md new file mode 100644 index 00000000..1e2dd6af --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/clustering/requirements-and-definitions.md @@ -0,0 +1,11 @@ +--- +title: Requirements and Definitions +--- + +# Requirements and Definitions + +To create a cluster you must have two or more nodes\* (aka instances) of HarperDB running. + +\*_A node is a single instance/installation of HarperDB. A node of HarperDB can operate independently with clustering on or off._ + +On the following pages we'll walk you through the steps required, in order, to set up a HarperDB cluster. diff --git a/site/versioned_docs/version-4.3/developers/clustering/subscription-overview.md b/site/versioned_docs/version-4.3/developers/clustering/subscription-overview.md new file mode 100644 index 00000000..6ceac7fe --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/clustering/subscription-overview.md @@ -0,0 +1,45 @@ +--- +title: Subscription Overview +--- + +# Subscription Overview + +A subscription defines how data should move between two nodes. They are exclusively table level and operate independently. They connect a table on one node to a table on another node, the subscription will apply to a matching database name and table name on both nodes. + +_Note: ‘local’ and ‘remote’ will often be referred to. In the context of these docs ‘local’ is the node that is receiving the API request to create/update a subscription and remote is the other node that is referred to in the request, the node on the other end of the subscription._ + +A subscription consists of: + +`database` - the name of the database that the table you are creating the subscription for belongs to. *Note, this was previously referred to as schema and may occasionally still be referenced that way.* + +`table` - the name of the table the subscription will apply to. + +`publish` - a boolean which determines if transactions on the local table should be replicated on the remote table. + +`subscribe` - a boolean which determines if transactions on the remote table should be replicated on the local table. + +#### Publish subscription + +![figure 2](/img/v4.3/clustering/figure2.png) + +This diagram is an example of a `publish` subscription from the perspective of Node1. + +The record with id 2 has been inserted in the dog table on Node1, after it has completed that insert it is sent to Node 2 and inserted in the dog table there. + +#### Subscribe subscription + +![figure 3](/img/v4.3/clustering/figure3.png) + +This diagram is an example of a `subscribe` subscription from the perspective of Node1. + +The record with id 3 has been inserted in the dog table on Node2, after it has completed that insert it is sent to Node1 and inserted there. + +#### Subscribe and Publish + +![figure 4](/img/v4.3/clustering/figure4.png) + +This diagram shows both subscribe and publish but publish is set to false. You can see that because subscribe is true the insert on Node2 is being replicated on Node1 but because publish is set to false the insert on Node1 is _**not**_ being replicated on Node2. + +![figure 5](/img/v4.3/clustering/figure5.png) + +This shows both subscribe and publish set to true. The insert on Node1 is replicated on Node2 and the update on Node2 is replicated on Node1. diff --git a/site/versioned_docs/version-4.3/developers/clustering/things-worth-knowing.md b/site/versioned_docs/version-4.3/developers/clustering/things-worth-knowing.md new file mode 100644 index 00000000..c57a47ca --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/clustering/things-worth-knowing.md @@ -0,0 +1,43 @@ +--- +title: Things Worth Knowing +--- + +# Things Worth Knowing + +Additional information that will help you define your clustering topology. + +*** + +### Transactions + +Transactions that are replicated across the cluster are: + +* Insert +* Update +* Upsert +* Delete +* Bulk loads + * CSV data load + * CSV file load + * CSV URL load + * Import from S3 + +When adding or updating a node any databases and tables in the subscription that don’t exist on the remote node will be automatically created. + +**Destructive database operations do not replicate across a cluster**. Those operations include `drop_database`, `drop_table`, and `drop_attribute`. If the desired outcome is to drop database information from any nodes then the operation(s) will need to be run on each node independently. + +Users and roles are not replicated across the cluster. + +*** + +### Queueing + +HarperDB has built-in resiliency for when network connectivity is lost within a subscription. When connections are reestablished, a catchup routine is executed to ensure data that was missed, specific to the subscription, is sent/received as defined. + +*** + +### Topologies + +HarperDB clustering creates a mesh network between nodes giving end users the ability to create an infinite number of topologies. subscription topologies can be simple or as complex as needed. + +![](/img/v4.3/clustering/figure6.png) diff --git a/site/versioned_docs/version-4.3/developers/components/drivers.md b/site/versioned_docs/version-4.3/developers/components/drivers.md new file mode 100644 index 00000000..0f1c063e --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/components/drivers.md @@ -0,0 +1,12 @@ +--- +title: Drivers +description: >- + Industry standard tools to real-time HarperDB data with BI, analytics, + reporting and data visualization technologies. +--- + +# Drivers + + + +
DriverDocsDownload
Power BIPowerBI DocsWindows
TableauTableau DocsWindows
Mac
Driver JAR
ExcelExcel DocsWindows
JDBCJDBC DocsWindows
Mac
Driver JAR
ODBCODBC DocsWindows
Mac
Linux (RPM)
Linux (DEB)
ADOADO DocsWindows
CmdletsCmdlets DocsWindows
SSISSSIS DocsWindows
diff --git a/site/versioned_docs/version-4.3/developers/components/google-data-studio.md b/site/versioned_docs/version-4.3/developers/components/google-data-studio.md new file mode 100644 index 00000000..e33fb2bd --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/components/google-data-studio.md @@ -0,0 +1,37 @@ +--- +title: Google Data Studio +--- + +# Google Data Studio + +[Google Data Studio](https:/datastudio.google.com/) is a free collaborative visualization tool which enables users to build configurable charts and tables quickly. The HarperDB Google Data Studio connector seamlessly integrates your HarperDB data with Google Data Studio so you can build custom, real-time data visualizations. + +The HarperDB Google Data Studio Connector is subject to our [Terms of Use](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/) and [Privacy Policy](https:/harperdb.io/legal/privacy-policy/). + +## Requirements + +The HarperDB database must be accessible through the Internet in order for Google Data Studio servers to access it. The database may be hosted by you or via [HarperDB Cloud](../../deployments/harperdb-cloud/). + +## Get Started + +Get started by selecting the HarperDB connector from the [Google Data Studio Partner Connector Gallery](https:/datastudio.google.com/u/0/datasources/create). + +1. Log in to https:/datastudio.google.com/. +1. Add a new Data Source using the HarperDB connector. The current release version can be added as a data source by following this link: [HarperDB Google Data Studio Connector](https:/datastudio.google.com/datasources/create?connectorId=AKfycbxBKgF8FI5R42WVxO-QCOq7dmUys0HJrUJMkBQRoGnCasY60\_VJeO3BhHJPvdd20-S76g). +1. Authorize the connector to access other servers on your behalf (this allows the connector to contact your database). +1. Enter the Web URL to access your database (preferably with HTTPS), as well as the Basic Auth key you use to access the database. Just include the key, not the word “Basic” at the start of it. +1. Check the box for “Secure Connections Only” if you want to always use HTTPS connections for this data source; entering a Web URL that starts with https:/ will do the same thing, if you prefer. +1. Check the box for “Allow Bad Certs” if your HarperDB instance does not have a valid SSL certificate. [HarperDB Cloud](../../deployments/harperdb-cloud/) always has valid certificates, and so will never require this to be checked. Instances you set up yourself may require this, if you are using self-signed certs. If you are using [HarperDB Cloud](../../deployments/harperdb-cloud/) or another instance you know should always have valid SSL certificates, do not check this box. +1. Choose your Query Type. This determines what information the configuration will ask for after pressing the Next button. + * Table will ask you for a Schema and a Table to return all fields of using `SELECT *`. + * SQL will ask you for the SQL query you’re using to retrieve fields from the database. You may `JOIN` multiple tables together, and use HarperDB specific SQL functions, along with the usual power SQL grants. +1. When all information is entered correctly, press the Connect button in the top right of the new Data Source view to generate the Schema. You may also want to name the data source at this point. If the connector encounters any errors, a dialog box will tell you what went wrong so you can correct the issue. +1. If there are no errors, you now have a data source you can use in your reports! You may change the types of the generated fields in the Schema view if you need to (for instance, changing a Number field to a specific currency), as well as creating new fields from the report view that do calculations on other fields. + +## Considerations + +* Both Postman and the [HarperDB Studio](../../administration/harperdb-studio/) app have ways to convert a user:password pair to a Basic Auth token. Use either to create the token for the connector’s user. + * You may sign out of your current user by going to the instances tab in HarperDB Studio, then clicking on the lock icon at the top-right of a given instance’s box. Click the lock again to sign in as any user. The Basic Auth token will be visible in the Authorization header portion of any code created in the Sample Code tab. +* It’s highly recommended that you create a read-only user role in HarperDB Studio, and create a user with that role for your data sources to use. This prevents that authorization token from being used to alter your database, should someone else ever get ahold of it. +* The RecordCount field is intended for use as a metric, for counting how many instances of a given set of values appear in a report’s data set. +* _Do not attempt to create fields with spaces in their names_ for any data sources! Google Data Studio will crash when attempting to retrieve a field with such a name, producing a System Error instead of a useful chart on your reports. Using CamelCase or snake\_case gets around this. diff --git a/site/versioned_docs/version-4.3/developers/components/index.md b/site/versioned_docs/version-4.3/developers/components/index.md new file mode 100644 index 00000000..4901c49f --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/components/index.md @@ -0,0 +1,38 @@ +--- +title: Components +--- + +# Components + +HarperDB is a highly extensible database application platform with support for a rich variety of composable modular components and components that can be used and combined to build applications and add functionality to existing applications. HarperDB tools, components, and add-ons can be found in a few places: + +* [SDK libraries](./sdks) are available for connecting to HarperDB from different languages. +* [Drivers](./drivers) are available for connecting to HarperDB from different products and tools. +* [HarperDB-Add-Ons repositories](https:/github.com/orgs/HarperDB-Add-Ons/repositories) lists various templates and add-ons for HarperDB. +* [HarperDB repositories](https:/github.com/orgs/HarperDB-Add-Ons/repositories) include additional tools for HarperDB. +* You can also [search github.com for ever-growing list of projects that use, or work with, HarperDB](https:/github.com/search?q=harperdb\&type=repositories) +* [Google Data Studio](./google-data-studio) is a visualization tool for building charts and tables from HarperDB data. + +## Components + +There are four general categories of components for HarperDB. The most common is applications. Applications are simply a component that delivers complete functionality through an external interface that it defines, and is usually composed of other components. See [our guide to building applications for getting started](../applications/). + +A data source component can implement the Resource API to customize access to a table or provide access to an external data source. External data source components are used to retrieve and access data from other sources. + +The next two are considered extension components. Server protocol extension components provide and define ways for clients to access data and can be used to extend or create new protocols. + +Server resource components implement support for different types of files that can be used as resources in applications. HarperDB includes support for using JavaScript modules and GraphQL Schemas as resources, but resource components may add support for different file types like HTML templates (like JSX), CSV data, and more. + +## Server components + +Server components can be easily be added and configured by simply adding an entry to your harperdb-config.yaml: + +```yaml +my-server-component: + package: 'HarperDB-Add-Ons/package-name' # this can be any valid github or npm reference + port: 4321 +``` + +## Writing Extension Components + +You can write your own extensions to build new functionality on HarperDB. See the [writing extension components documentation](./writing-extensions) for more information. diff --git a/site/versioned_docs/version-4.3/developers/components/installing.md b/site/versioned_docs/version-4.3/developers/components/installing.md new file mode 100644 index 00000000..aac137ea --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/components/installing.md @@ -0,0 +1,79 @@ +--- +title: Installing +--- + +# Installing + +Components can be easily added by adding a new top level element to your `harperdb-config.yaml` file. + +The configuration comprises two values: + +* component name - can be anything, as long as it follows valid YAML syntax. +* package - a reference to your component. + +```yaml +myComponentName: + package: HarperDB-Add-Ons/package +``` + +Under the hood HarperDB is calling npm install on all components, this means that the package value can be any valid npm reference such as a GitHub repo, an NPM package, a tarball, a local directory or a website. + +```yaml +myGithubComponent: + package: HarperDB-Add-Ons/package#v2.2.0 # install from GitHub +myNPMComponent: + package: harperdb # install from NPM +myTarBall: + package: /Users/harper/cool-component.tar # install from tarball +myLocal: + package: /Users/harper/local # install from local path +myWebsite: + package: https:/harperdb-component # install from URL +``` + +When HarperDB is run or restarted it checks to see if there are any new or updated components. If there are, it will dynamically create a package.json file in the `rootPath` directory and call `npm install`. + +NPM will install all the components in `/node_moduels`. + +The package.json file that is created will look something like this. + +```json +{ + "dependencies": { + "myGithubComponent": "github:HarperDB-Add-Ons/package#v2.2.0", + "myNPMComponent": "npm:harperdb", + "myTarBall": "file:/Users/harper/cool-component.tar", + "myLocal": "file:/Users/harper/local", + "myWebsite": "https:/harperdb-component" + } +} +``` + +The package prefix is automatically added, however you can manually set it in your package reference. + +```yaml +myCoolComponent: + package: file:/Users/harper/cool-component.tar +``` + +## Installing components using the operations API + +To add a component using the operations API use the `deploy_component` operation. + +```json +{ + "operation": "deploy_component", + "project": "my-cool-component", + "package": "HarperDB-Add-Ons/package/mycc" +} +``` + +Another option is to pass `deploy_component` a base64-encoded string representation of your component as a `.tar` file. HarperDB can generate this via the `package_component` operation. When deploying with a payload, your component will be deployed to your `/components` directory. Any components in this directory will be automatically picked up by HarperDB. + +```json +{ + "operation": "deploy_component", + "project": "my-cool-component", + "payload": "NzY1IAAwMDAwMjQgADAwMDAwMDAwMDAwIDE0NDIwMDQ3...." +} +``` diff --git a/site/versioned_docs/version-4.3/developers/components/operations.md b/site/versioned_docs/version-4.3/developers/components/operations.md new file mode 100644 index 00000000..fc5d2bf9 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/components/operations.md @@ -0,0 +1,37 @@ +--- +title: Operations +--- + +# Operations + +One way to manage applications and components is through [HarperDB Studio](../../administration/harperdb-studio/). It performs all the necessary operations automatically. To get started, navigate to your instance in HarperDB Studio and click the subnav link for “applications”. Once configuration is complete, you can manage and deploy applications in minutes. + +HarperDB Studio manages your applications using nine HarperDB operations. You may view these operations within our [API Docs](../operations-api/). A brief overview of each of the operations is below: + +* **components\_status** + + Returns the state of the applications server. This includes whether it is enabled, upon which port it is listening, and where its root project directory is located on the host machine. +* **get\_components** + + Returns an array of projects within the applications root project directory. +* **get\_component\_file** + + Returns the content of the specified file as text. HarperDB Studio uses this call to render the file content in its built-in code editor. +* **set\_component\_file** + + Updates the content of the specified file. HarperDB Studio uses this call to save any changes made through its built-in code editor. +* **drop\_component\_file** + + Deletes the specified file. +* **add\_component\_project** + + Creates a new project folder in the applications root project directory. It also inserts into the new directory the contents of our applications Project template, which is available publicly, here: https:/github.com/HarperDB/harperdb-custom-functions-template. +* **drop\_component\_project** + + Deletes the specified project folder and all of its contents. +* **package\_component\_project** + + Creates a .tar file of the specified project folder, then reads it into a base64-encoded string and returns that string to the user. +* **deploy\_component\_project** + + Takes the output of package\_component\_project, decrypts the base64-encoded string, reconstitutes the .tar file of your project folder, and extracts it to the applications root project directory. diff --git a/site/versioned_docs/version-4.3/developers/components/sdks.md b/site/versioned_docs/version-4.3/developers/components/sdks.md new file mode 100644 index 00000000..9064851e --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/components/sdks.md @@ -0,0 +1,21 @@ +--- +title: SDKs +description: >- + Software Development Kits available for connecting to HarperDB from different + languages. +--- + +# SDKs + +| SDK/Tool | Description | Installation | +| ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------- | +| [HarperDB.NET.Client](https:/www.nuget.org/packages/HarperDB.NET.Client) | A Dot Net Core client to execute operations against HarperDB | `dotnet add package HarperDB.NET.Client --version 1.1.0` | +| [Websocket Client](https:/www.npmjs.com/package/harperdb-websocket-client) | A Javascript client for real-time access to HarperDB transactions | `npm i -s harperdb-websocket-client` | +| [Gatsby HarperDB Source](https:/www.npmjs.com/package/gatsby-source-harperdb) | Use HarperDB as the data source for a Gatsby project at the build time | `npm i -s gatsby-source-harperdb` | +| [HarperDB.EntityFrameworkCore](https:/www.nuget.org/packages/HarperDB.EntityFrameworkCore) | The HarperDB EntityFrameworkCore Provider Package for .NET 6.0 | `dotnet add package HarperDB.EntityFrameworkCore --version 1.0.0` | +| [Python SDK](https:/pypi.org/project/harperdb/) | Python3 implementations of HarperDB API functions with wrappers for an object-oriented interface | `pip3 install harperdb` | +| [HarperDB Flutter SDK](https:/github.com/HarperDB/harperdb-sdk-flutter) | A HarperDB SDK for Flutter | `flutter pub add harperdb` | +| [React Hook](https:/www.npmjs.com/package/use-harperdb) | A ReactJS Hook for HarperDB | `npm i -s use-harperdb` | +| [Node Red Node](https:/flows.nodered.org/node/node-red-contrib-harperdb) | Easy drag and drop connections to HarperDB using the Node-Red platform | `npm i -s node-red-contrib-harperdb` | +| [NodeJS SDK](https:/www.npmjs.com/package/harperive) | A HarperDB SDK for NodeJS | `npm i -s harperive` | +| [HarperDB Cargo Crate](https:/crates.io/crates/harperdb) | A HarperDB SDK for Rust | `Cargo.toml > harperdb = '1.0.0'` | diff --git a/site/versioned_docs/version-4.3/developers/components/writing-extensions.md b/site/versioned_docs/version-4.3/developers/components/writing-extensions.md new file mode 100644 index 00000000..51ba8de7 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/components/writing-extensions.md @@ -0,0 +1,171 @@ +--- +title: Writing Extensions +--- + +# Writing Extensions + +HarperDB is a highly extensible database application platform with support for a rich variety of composable modular components and extensions that can be used and combined to build applications and add functionality to existing applications. Here we describe the different types of components/extensions that can be developed for HarperDB and how to create them. + +There are three general categories of components for HarperDB: + +* **protocol extensions** that provide and define ways for clients to access data +* **resource extensions** that handle and interpret different types of files +* **consumer data sources** that provide a way to access and retrieve data from other sources. + +Server protocol extensions can be used to implement new protocols like MQTT, AMQP, Kafka, or maybe a retro-style Gopher interface. It can also be used to augment existing protocols like HTTP with "middleware" that can add authentication, analytics, or additional content negotiation, or add layer protocols on top of WebSockets. + +Server resource extensions implement support for different types of files that can be used as resources in applications. HarperDB includes support for using JavaScript modules and GraphQL Schemas as resources, but resource extensions could be added to support different file types like HTML templates (like JSX), CSV data, and more. + +Consumer data source components are used to retrieve and access data from other sources, and can be very useful if you want to use HarperDB to cache or use data from other databases like MySQL, Postgres, or Oracle, or subscribe to data from messaging brokers (again possibly Kafka, NATS, etc.). + +These are not mutually exclusive, you may build components that fulfill any or all of these roles. + +## Server Extensions + +Server Extensions are implemented as JavaScript packages/modules and interact with HarperDB through a number of possible hooks. A component can be defined as an extension by specifying the extensionModule in the config.yaml: + +```yaml +extensionModule: './entry-module-name.js' +``` + +### Module Initialization + +Once a user has configured an extension, HarperDB will attempt to load the extension package specified by `package` property. Once loaded, there are several functions that the extension module can export, that will be called by HarperDB: + +`export function start(options: { port: number, server: {}})` If defined, this will be called on the initialization of the extension. The provided `server` property object includes a set of additional entry points for utilizing or layering on top of other protocols (and when implementing a new protocol, you can add your own entry points). The most common entry is to provide an HTTP middleware layer. This looks like: + +```javascript +export function start(options: { port: number, server: {}}) { + options.server.http(async (request, nextLayer) => { + / we can directly return a response here, or do some processing on the request and delegate to the next layer + let response = await nextLayer(request); + return response; + }); +} +``` + +Here, the `request` object will have the following structure (this is based on Node's request, but augmented to conform to a subset of the [WHATWG Request API](https:/developer.mozilla.org/en-US/docs/Web/API/Request)): + +```typescript +interface Request { + method: string + headers: Headers / use request.headers.get(headerName) to get header values + body: Stream + data: any / deserialized data from the request body +} +``` + +The returned `response` object should have the following structure (again, following a structural subset of the [WHATWG Response API](https:/developer.mozilla.org/en-US/docs/Web/API/Response)): + +```typescript +interface Response { + status?: number + headers?: {} / an object with header name/values + data?: any / object/value that will be serialized into the body + body?: Stream +} +``` + +The `server.http` function also accepts an options argument that supports a `runFirst` flag to indicate that the middleware should go at the top of the stack and be executed prior to other HTTP components. +If you were implementing an authentication extension, you could get authentication information from the request and use it to add the `user` property to the request: + +```javascript +export function start(options: { port: number, server: {}, resources: Map}) { + options.server.http((request, nextLayer) => { + let authorization = request.headers.authorization; + if (authorization) { + / get some token for the user and determine the user + / if we want to use harperdb's user database + let user = server.getUser(username, password); + request.user = user; / authenticate user object goes on the request + } + / continue on to the next layer + return nextLayer(request); + }, { runFirst: true }); + / if you needed to add a login resource, could add it as well: + resources.set('/login', LoginResource); +} +``` + +#### Direct Socket Server +If you were implementing a new protocol, you can directly interact with the sockets and listen for new incoming TCP connections: + +```javascript +export function start(options: { port: number, server: {}}) { + options.server.socket((socket) => { + / called for each incoming socket + }); +}) +``` +#### WebSockets +If you were implementing a protocol using WebSockets, you can define a listener for incoming WebSocket connections and indicate the WebSockets (sub)protocol to specifically handle (which will select your listener if the `Sec-WebSocket-Protocol` header matches your protocol): + +```javascript +export function start(options) { + server.ws((socket) => { + / called for each incoming WebSocket + }, Object.assign({ subProtocol: 'my-cool-protocol' }, options)); +}) +``` + +### Resource Handling + +Typically, servers not only communicate with clients, but serve up meaningful data based on the resources within the server. While resource extensions typically handle defining resources, once resources are defined, they can be consumed by server extensions. The `resources` argument provides access to the set of all the resources that have been defined. A server can call `resources.getMatch(path)` to get the resource associated with the URL path. + +## Resource Extensions + +Resource extensions allow us to handle different files and make them accessible to servers as resources, following the common [Resource API](../../technical-details/reference/resource). To implement a resource extension, you export a function called `handleFile`. Users can then configure which files that should be handled by your extension. For example, if we had implemented an EJS handler, it could be configured as: + +```yaml + module: 'ejs-extension', + path: '/templates/*.ejs' +``` + +And in our extension module, we could implement `handleFile`: + +```javascript +export function handleFile?(contents, relative_path, file_path, resources) { + / will be called for each .ejs file. + / We can then add the generate resource: + resources.set(relative_path, GeneratedResource); +} +``` + +We can also implement a handler for directories. This can be useful for implementing a handler for broader frameworks that load their own files, like Next.js or Remix, or a static file handler. HarperDB includes such an extension for fastify's auto-loader that loads a directory of route definitions. This hook looks like: + +```javascript +export function handleDirectory?(relative_path, path, resources) { +} +``` + +Note that these hooks are not mutually exclusive. You can write an extension that implements any or all of these hooks, potentially implementing a custom protocol and file handling. + +## Data Source Components + +Data source components implement the `Resource` interface to provide access to various data sources, which may be other APIs, databases, or local storage. Components that implement this interface can then be used as a source for caching tables, can be accessed as part of endpoint implementations, or even used as endpoints themselves. See the [Resource documentation](../../technical-details/reference/resource) for more information on implementing new resources. + +## Content Type Extensions + +HarperDB uses content negotiation to determine how to deserialize content incoming data from HTTP requests (and any other protocols that support content negotiation) and to serialize data into responses. This negotiation is performed by comparing the `Content-Type` header with registered content type handler to determine how to deserialize content into structured data that is processed and stored, and comparing the `Accept` header with registered content type handlers to determine how to serialize structured data. HarperDB comes with a rich set of content type handlers including JSON, CBOR, MessagePack, CSV, Event-Stream, and more. However, you can also add your own content type handlers by adding new entries (or even replacing existing entries) to the `contentTypes` exported map from the `server` global (or `harperdb` export). This map is keyed by the MIME type, and the value is an object with properties (all optional): +* `serialize(data): Buffer|Uint8Array|string`: If defined, this will be called with the data structure and should return the data serialized as binary data (NodeJS Buffer or Uint8Array) or a string, for the response. +* `serializeStream(data): ReadableStream`: If defined, this will be called with the data structure and should return the data serialized as a ReadableStream. This is generally necessary for handling asynchronous iteratables. +* `deserialize(Buffer|string): any`: If defined (and deserializeStream is not defined), this will be called with the raw data received from the incoming request and should return the deserialized data structure. This will be called with a string for text MIME types ("text/..."), and a Buffer for all others. +* `deserializeStream(ReadableStream): any`: If defined, this will be called with the raw data stream (if there is one) received from the incoming request and should return the deserialized data structure (potentially as an asynchronous iterable). +* `q: number`: This is an indication of this serialization quality between 0 and 1, and if omitted, defaults to 1. It is called "content negotiation" instead of "content demanding" because both client and server may have multiple supported content types, and the server needs to choose the best for both. This is determined by finding the content type (of all supported) with the highest product of client q and server q (1 is a perfect representation of the data, 0 is worst, 0.5 is medium quality). + +For example, if you wanted to define an XML serializer (that can respond with XML to requests with `Accept: text/xml`) you could write: + +```javascript +contentTypes.set('text/xml', { + serialize(data) { + return '' ... some serialization ''; + }, + q: 0.8, +}); +``` + +## Trusted/Untrusted (Future Plans) + +In the future, extensions may be categorized as trusted or untrusted. For some HarperDB installations, administrators may choose to constrain users to only using trusted extensions for security reasons (such multi-tenancy requirements or added defense in depth). Most installations do not impose such constraints, but this may exist in some situations. + +An extension can be automatically considered trusted if it conforms to the requirements of [Secure EcmaScript](https:/www.npmjs.com/package/ses/v/0.7.0) (basically strict mode code that doesn't modify any global objects), and either does not use any other modules, or only uses modules from other trusted extensions/components. An extension can be marked as trusted by review by the HarperDB team as well, but developers should not expect that HarperDB can review all extensions. Untrusted extensions can access any other packages/modules, and may have many additional capabilities. diff --git a/site/versioned_docs/version-4.3/developers/operations-api/advanced-json-sql-examples.md b/site/versioned_docs/version-4.3/developers/operations-api/advanced-json-sql-examples.md new file mode 100644 index 00000000..1584a0c4 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/advanced-json-sql-examples.md @@ -0,0 +1,1780 @@ +--- +title: Advanced JSON SQL Examples +--- + +# Advanced JSON SQL Examples + +## Create movies database +Create a new database called "movies" using the 'create_database' operation. + +_Note: Creating a database is optional, if one is not created HarperDB will default to using a database named `data`_ + +### Body +```json +{ + "operation": "create_database", + "database": "movies" +} +``` + +### Response: 200 +```json +{ + "message": "database 'movies' successfully created" +} +``` + +--- + +## Create movie Table +Creates a new table called "movie" inside the database "movies" using the ‘create_table’ operation. + +### Body + +```json +{ + "operation": "create_table", + "database": "movies", + "table": "movie", + "primary_key": "id" +} +``` + +### Response: 200 +```json +{ + "message": "table 'movies.movie' successfully created." +} +``` + + +--- + +## Create credits Table +Creates a new table called "credits" inside the database "movies" using the ‘create_table’ operation. + +### Body + +```json +{ + "operation": "create_table", + "database": "movies", + "table": "credits", + "primary_key": "movie_id" +} +``` + +### Response: 200 +```json +{ + "message": "table 'movies.credits' successfully created." +} +``` + + +--- + +## Bulk Insert movie Via CSV +Inserts data from a hosted CSV file into the "movie" table using the 'csv_url_load' operation. + +### Body + +```json +{ + "operation": "csv_url_load", + "database": "movies", + "table": "movie", + "csv_url": "https:/search-json-sample-data.s3.us-east-2.amazonaws.com/movie.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 1889eee4-23c1-4945-9bb7-c805fc20726c" +} +``` + + +--- + +## Bulk Insert credits Via CSV +Inserts data from a hosted CSV file into the "credits" table using the 'csv_url_load' operation. + +### Body + +```json +{ + "operation": "csv_url_load", + "database": "movies", + "table": "credits", + "csv_url": "https:/search-json-sample-data.s3.us-east-2.amazonaws.com/credits.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 3a14cd74-67f3-41e9-8ccd-45ffd0addc2c", + "job_id": "3a14cd74-67f3-41e9-8ccd-45ffd0addc2c" +} +``` + + +--- + +## View raw data +In the following example we will be running expressions on the keywords & production_companies attributes, so for context we are displaying what the raw data looks like. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT title, rank, keywords, production_companies FROM movies.movie ORDER BY rank LIMIT 10" +} +``` + +### Response: 200 +```json +[ + { + "title": "Ad Astra", + "rank": 1, + "keywords": [ + { + "id": 305, + "name": "moon" + }, + { + "id": 697, + "name": "loss of loved one" + }, + { + "id": 839, + "name": "planet mars" + }, + { + "id": 14626, + "name": "astronaut" + }, + { + "id": 157265, + "name": "moon colony" + }, + { + "id": 162429, + "name": "solar system" + }, + { + "id": 240119, + "name": "father son relationship" + }, + { + "id": 244256, + "name": "near future" + }, + { + "id": 257878, + "name": "planet neptune" + }, + { + "id": 260089, + "name": "space walk" + } + ], + "production_companies": [ + { + "id": 490, + "name": "New Regency Productions", + "origin_country": "" + }, + { + "id": 79963, + "name": "Keep Your Head", + "origin_country": "" + }, + { + "id": 73492, + "name": "MadRiver Pictures", + "origin_country": "" + }, + { + "id": 81, + "name": "Plan B Entertainment", + "origin_country": "US" + }, + { + "id": 30666, + "name": "RT Features", + "origin_country": "BR" + }, + { + "id": 30148, + "name": "Bona Film Group", + "origin_country": "CN" + }, + { + "id": 22213, + "name": "TSG Entertainment", + "origin_country": "US" + } + ] + }, + { + "title": "Extraction", + "rank": 2, + "keywords": [ + { + "id": 3070, + "name": "mercenary" + }, + { + "id": 4110, + "name": "mumbai (bombay), india" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 9730, + "name": "crime boss" + }, + { + "id": 11107, + "name": "rescue mission" + }, + { + "id": 18712, + "name": "based on graphic novel" + }, + { + "id": 265216, + "name": "dhaka (dacca), bangladesh" + } + ], + "production_companies": [ + { + "id": 106544, + "name": "AGBO", + "origin_country": "US" + }, + { + "id": 109172, + "name": "Thematic Entertainment", + "origin_country": "US" + }, + { + "id": 92029, + "name": "TGIM Films", + "origin_country": "US" + } + ] + }, + { + "title": "To the Beat! Back 2 School", + "rank": 3, + "keywords": [ + { + "id": 10873, + "name": "school" + } + ], + "production_companies": [] + }, + { + "title": "Bloodshot", + "rank": 4, + "keywords": [ + { + "id": 2651, + "name": "nanotechnology" + }, + { + "id": 9715, + "name": "superhero" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 164218, + "name": "psychotronic" + }, + { + "id": 255024, + "name": "shared universe" + }, + { + "id": 258575, + "name": "valiant comics" + } + ], + "production_companies": [ + { + "id": 34, + "name": "Sony Pictures", + "origin_country": "US" + }, + { + "id": 10246, + "name": "Cross Creek Pictures", + "origin_country": "US" + }, + { + "id": 6573, + "name": "Mimran Schur Pictures", + "origin_country": "US" + }, + { + "id": 333, + "name": "Original Film", + "origin_country": "US" + }, + { + "id": 103673, + "name": "The Hideaway Entertainment", + "origin_country": "US" + }, + { + "id": 124335, + "name": "Valiant Entertainment", + "origin_country": "US" + }, + { + "id": 5, + "name": "Columbia Pictures", + "origin_country": "US" + }, + { + "id": 1225, + "name": "One Race", + "origin_country": "US" + }, + { + "id": 30148, + "name": "Bona Film Group", + "origin_country": "CN" + } + ] + }, + { + "title": "The Call of the Wild", + "rank": 5, + "keywords": [ + { + "id": 818, + "name": "based on novel or book" + }, + { + "id": 4542, + "name": "gold rush" + }, + { + "id": 15162, + "name": "dog" + }, + { + "id": 155821, + "name": "sled dogs" + }, + { + "id": 189390, + "name": "yukon" + }, + { + "id": 207928, + "name": "19th century" + }, + { + "id": 259987, + "name": "cgi animation" + }, + { + "id": 263806, + "name": "1890s" + } + ], + "production_companies": [ + { + "id": 787, + "name": "3 Arts Entertainment", + "origin_country": "US" + }, + { + "id": 127928, + "name": "20th Century Studios", + "origin_country": "US" + }, + { + "id": 22213, + "name": "TSG Entertainment", + "origin_country": "US" + } + ] + }, + { + "title": "Sonic the Hedgehog", + "rank": 6, + "keywords": [ + { + "id": 282, + "name": "video game" + }, + { + "id": 6054, + "name": "friendship" + }, + { + "id": 10842, + "name": "good vs evil" + }, + { + "id": 41645, + "name": "based on video game" + }, + { + "id": 167043, + "name": "road movie" + }, + { + "id": 172142, + "name": "farting" + }, + { + "id": 188933, + "name": "bar fight" + }, + { + "id": 226967, + "name": "amistad" + }, + { + "id": 245230, + "name": "live action remake" + }, + { + "id": 258111, + "name": "fantasy" + }, + { + "id": 260223, + "name": "videojuego" + } + ], + "production_companies": [ + { + "id": 333, + "name": "Original Film", + "origin_country": "US" + }, + { + "id": 10644, + "name": "Blur Studios", + "origin_country": "US" + }, + { + "id": 77884, + "name": "Marza Animation Planet", + "origin_country": "JP" + }, + { + "id": 4, + "name": "Paramount", + "origin_country": "US" + }, + { + "id": 113750, + "name": "SEGA", + "origin_country": "JP" + }, + { + "id": 100711, + "name": "DJ2 Entertainment", + "origin_country": "" + }, + { + "id": 24955, + "name": "Paramount Animation", + "origin_country": "US" + } + ] + }, + { + "title": "Birds of Prey (and the Fantabulous Emancipation of One Harley Quinn)", + "rank": 7, + "keywords": [ + { + "id": 849, + "name": "dc comics" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 187056, + "name": "woman director" + }, + { + "id": 229266, + "name": "dc extended universe" + } + ], + "production_companies": [ + { + "id": 9993, + "name": "DC Entertainment", + "origin_country": "US" + }, + { + "id": 82968, + "name": "LuckyChap Entertainment", + "origin_country": "GB" + }, + { + "id": 103462, + "name": "Kroll & Co Entertainment", + "origin_country": "US" + }, + { + "id": 174, + "name": "Warner Bros. Pictures", + "origin_country": "US" + }, + { + "id": 429, + "name": "DC Comics", + "origin_country": "US" + }, + { + "id": 128064, + "name": "DC Films", + "origin_country": "US" + }, + { + "id": 101831, + "name": "Clubhouse Pictures", + "origin_country": "US" + } + ] + }, + { + "title": "Justice League Dark: Apokolips War", + "rank": 8, + "keywords": [ + { + "id": 849, + "name": "dc comics" + } + ], + "production_companies": [ + { + "id": 2785, + "name": "Warner Bros. Animation", + "origin_country": "US" + }, + { + "id": 9993, + "name": "DC Entertainment", + "origin_country": "US" + }, + { + "id": 429, + "name": "DC Comics", + "origin_country": "US" + } + ] + }, + { + "title": "Parasite", + "rank": 9, + "keywords": [ + { + "id": 1353, + "name": "underground" + }, + { + "id": 5318, + "name": "seoul" + }, + { + "id": 5732, + "name": "birthday party" + }, + { + "id": 5752, + "name": "private lessons" + }, + { + "id": 9866, + "name": "basement" + }, + { + "id": 10453, + "name": "con artist" + }, + { + "id": 11935, + "name": "working class" + }, + { + "id": 12565, + "name": "psychological thriller" + }, + { + "id": 13126, + "name": "limousine driver" + }, + { + "id": 14514, + "name": "class differences" + }, + { + "id": 14864, + "name": "rich poor" + }, + { + "id": 17997, + "name": "housekeeper" + }, + { + "id": 18015, + "name": "tutor" + }, + { + "id": 18035, + "name": "family" + }, + { + "id": 33421, + "name": "crime family" + }, + { + "id": 173272, + "name": "flood" + }, + { + "id": 188861, + "name": "smell" + }, + { + "id": 198673, + "name": "unemployed" + }, + { + "id": 237462, + "name": "wealthy family" + } + ], + "production_companies": [ + { + "id": 7036, + "name": "CJ Entertainment", + "origin_country": "KR" + }, + { + "id": 4399, + "name": "Barunson E&A", + "origin_country": "KR" + } + ] + }, + { + "title": "Star Wars: The Rise of Skywalker", + "rank": 10, + "keywords": [ + { + "id": 161176, + "name": "space opera" + } + ], + "production_companies": [ + { + "id": 1, + "name": "Lucasfilm", + "origin_country": "US" + }, + { + "id": 11461, + "name": "Bad Robot", + "origin_country": "US" + }, + { + "id": 2, + "name": "Walt Disney Pictures", + "origin_country": "US" + }, + { + "id": 120404, + "name": "British Film Commission", + "origin_country": "" + } + ] + } +] +``` + + +--- + +## Simple search_json call +This query uses search_json to convert the keywords object array to a simple string array. The expression '[name]' tells the function to extract all values for the name attribute and wrap them in an array. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT title, rank, search_json('[name]', keywords) as keywords FROM movies.movie ORDER BY rank LIMIT 10" +} +``` + +### Response: 200 +```json +[ + { + "title": "Ad Astra", + "rank": 1, + "keywords": [ + "moon", + "loss of loved one", + "planet mars", + "astronaut", + "moon colony", + "solar system", + "father son relationship", + "near future", + "planet neptune", + "space walk" + ] + }, + { + "title": "Extraction", + "rank": 2, + "keywords": [ + "mercenary", + "mumbai (bombay), india", + "based on comic", + "crime boss", + "rescue mission", + "based on graphic novel", + "dhaka (dacca), bangladesh" + ] + }, + { + "title": "To the Beat! Back 2 School", + "rank": 3, + "keywords": [ + "school" + ] + }, + { + "title": "Bloodshot", + "rank": 4, + "keywords": [ + "nanotechnology", + "superhero", + "based on comic", + "psychotronic", + "shared universe", + "valiant comics" + ] + }, + { + "title": "The Call of the Wild", + "rank": 5, + "keywords": [ + "based on novel or book", + "gold rush", + "dog", + "sled dogs", + "yukon", + "19th century", + "cgi animation", + "1890s" + ] + }, + { + "title": "Sonic the Hedgehog", + "rank": 6, + "keywords": [ + "video game", + "friendship", + "good vs evil", + "based on video game", + "road movie", + "farting", + "bar fight", + "amistad", + "live action remake", + "fantasy", + "videojuego" + ] + }, + { + "title": "Birds of Prey (and the Fantabulous Emancipation of One Harley Quinn)", + "rank": 7, + "keywords": [ + "dc comics", + "based on comic", + "woman director", + "dc extended universe" + ] + }, + { + "title": "Justice League Dark: Apokolips War", + "rank": 8, + "keywords": [ + "dc comics" + ] + }, + { + "title": "Parasite", + "rank": 9, + "keywords": [ + "underground", + "seoul", + "birthday party", + "private lessons", + "basement", + "con artist", + "working class", + "psychological thriller", + "limousine driver", + "class differences", + "rich poor", + "housekeeper", + "tutor", + "family", + "crime family", + "flood", + "smell", + "unemployed", + "wealthy family" + ] + }, + { + "title": "Star Wars: The Rise of Skywalker", + "rank": 10, + "keywords": [ + "space opera" + ] + } +] +``` + + +--- + +## Use search_json in a where clause +This example shows how we can use SEARCH_JSON to filter out records in a WHERE clause. The production_companies attribute holds an object array of companies that produced each movie, we want to only see movies which were produced by Marvel Studios. Our expression is a filter '$[name="Marvel Studios"]' this tells the function to iterate the production_companies array and only return entries where the name is "Marvel Studios". + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT title, release_date FROM movies.movie where search_json('$[name=\"Marvel Studios\"]', production_companies) IS NOT NULL ORDER BY release_date" +} +``` + +### Response: 200 +```json +[ + { + "title": "Iron Man", + "release_date": "2008-04-30" + }, + { + "title": "The Incredible Hulk", + "release_date": "2008-06-12" + }, + { + "title": "Iron Man 2", + "release_date": "2010-04-28" + }, + { + "title": "Thor", + "release_date": "2011-04-21" + }, + { + "title": "Captain America: The First Avenger", + "release_date": "2011-07-22" + }, + { + "title": "Marvel One-Shot: The Consultant", + "release_date": "2011-09-12" + }, + { + "title": "Marvel One-Shot: A Funny Thing Happened on the Way to Thor's Hammer", + "release_date": "2011-10-25" + }, + { + "title": "The Avengers", + "release_date": "2012-04-25" + }, + { + "title": "Marvel One-Shot: Item 47", + "release_date": "2012-09-13" + }, + { + "title": "Iron Man 3", + "release_date": "2013-04-18" + }, + { + "title": "Marvel One-Shot: Agent Carter", + "release_date": "2013-09-08" + }, + { + "title": "Thor: The Dark World", + "release_date": "2013-10-29" + }, + { + "title": "Marvel One-Shot: All Hail the King", + "release_date": "2014-02-04" + }, + { + "title": "Marvel Studios: Assembling a Universe", + "release_date": "2014-03-18" + }, + { + "title": "Captain America: The Winter Soldier", + "release_date": "2014-03-20" + }, + { + "title": "Guardians of the Galaxy", + "release_date": "2014-07-30" + }, + { + "title": "Avengers: Age of Ultron", + "release_date": "2015-04-22" + }, + { + "title": "Ant-Man", + "release_date": "2015-07-14" + }, + { + "title": "Captain America: Civil War", + "release_date": "2016-04-27" + }, + { + "title": "Team Thor", + "release_date": "2016-08-28" + }, + { + "title": "Doctor Strange", + "release_date": "2016-10-25" + }, + { + "title": "Guardians of the Galaxy Vol. 2", + "release_date": "2017-04-19" + }, + { + "title": "Spider-Man: Homecoming", + "release_date": "2017-07-05" + }, + { + "title": "Thor: Ragnarok", + "release_date": "2017-10-25" + }, + { + "title": "Black Panther", + "release_date": "2018-02-13" + }, + { + "title": "Avengers: Infinity War", + "release_date": "2018-04-25" + }, + { + "title": "Ant-Man and the Wasp", + "release_date": "2018-07-04" + }, + { + "title": "Captain Marvel", + "release_date": "2019-03-06" + }, + { + "title": "Avengers: Endgame", + "release_date": "2019-04-24" + }, + { + "title": "Spider-Man: Far from Home", + "release_date": "2019-06-28" + }, + { + "title": "Black Widow", + "release_date": "2020-10-28" + }, + { + "title": "Untitled Spider-Man 3", + "release_date": "2021-11-04" + }, + { + "title": "Thor: Love and Thunder", + "release_date": "2022-02-10" + }, + { + "title": "Doctor Strange in the Multiverse of Madness", + "release_date": "2022-03-23" + }, + { + "title": "Untitled Marvel Project (3)", + "release_date": "2022-07-29" + }, + { + "title": "Guardians of the Galaxy Vol. 3", + "release_date": "2023-02-16" + } +] +``` + + +--- + +## Use search_json to show the movies with the largest casts +This example shows how we can use SEARCH_JSON to perform a simple calculation on JSON and order by the results. The cast attribute holds an object array of details around the cast of a movie. We use the expression '$count(id)' that counts each id and returns the value back which we alias in SQL as cast_size which in turn gets used to sort the rows. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT movie_title, search_json('$count(id)', `cast`) as cast_size FROM movies.credits ORDER BY cast_size DESC LIMIT 10" +} +``` + +### Response: 200 +```json +[ + { + "movie_title": "Around the World in Eighty Days", + "cast_size": 312 + }, + { + "movie_title": "And the Oscar Goes To...", + "cast_size": 259 + }, + { + "movie_title": "Rock of Ages", + "cast_size": 223 + }, + { + "movie_title": "Mr. Smith Goes to Washington", + "cast_size": 213 + }, + { + "movie_title": "Les Misérables", + "cast_size": 208 + }, + { + "movie_title": "Jason Bourne", + "cast_size": 201 + }, + { + "movie_title": "The Muppets", + "cast_size": 191 + }, + { + "movie_title": "You Don't Mess with the Zohan", + "cast_size": 183 + }, + { + "movie_title": "The Irishman", + "cast_size": 173 + }, + { + "movie_title": "Spider-Man: Far from Home", + "cast_size": 173 + } +] +``` + + +--- + +## search_json as a condition, in a select with a table join +This example shows how we can use SEARCH_JSON to find movies where at least of 2 our favorite actors from Marvel films have acted together then list the movie, its overview, release date, and the actors names and their characters. The WHERE clause performs a count on credits.cast attribute that have the matching actors. The SELECT performs the same filter on the cast attribute and performs a transform on each object to just return the actor's name and their character. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT m.title, m.overview, m.release_date, search_json('$[name in [\"Robert Downey Jr.\", \"Chris Evans\", \"Scarlett Johansson\", \"Mark Ruffalo\", \"Chris Hemsworth\", \"Jeremy Renner\", \"Clark Gregg\", \"Samuel L. Jackson\", \"Gwyneth Paltrow\", \"Don Cheadle\"]].{\"actor\": name, \"character\": character}', c.`cast`) as characters FROM movies.credits c INNER JOIN movies.movie m ON c.movie_id = m.id WHERE search_json('$count($[name in [\"Robert Downey Jr.\", \"Chris Evans\", \"Scarlett Johansson\", \"Mark Ruffalo\", \"Chris Hemsworth\", \"Jeremy Renner\", \"Clark Gregg\", \"Samuel L. Jackson\", \"Gwyneth Paltrow\", \"Don Cheadle\"]])', c.`cast`) >= 2" +} +``` + +### Response: 200 +```json +[ + { + "title": "Out of Sight", + "overview": "Meet Jack Foley, a smooth criminal who bends the law and is determined to make one last heist. Karen Sisco is a federal marshal who chooses all the right moves … and all the wrong guys. Now they're willing to risk it all to find out if there's more between them than just the law.", + "release_date": "1998-06-26", + "characters": [ + { + "actor": "Don Cheadle", + "character": "Maurice Miller" + }, + { + "actor": "Samuel L. Jackson", + "character": "Hejira Henry (uncredited)" + } + ] + }, + { + "title": "Iron Man", + "overview": "After being held captive in an Afghan cave, billionaire engineer Tony Stark creates a unique weaponized suit of armor to fight evil.", + "release_date": "2008-04-30", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "Captain America: The First Avenger", + "overview": "During World War II, Steve Rogers is a sickly man from Brooklyn who's transformed into super-soldier Captain America to aid in the war effort. Rogers must stop the Red Skull – Adolf Hitler's ruthless head of weaponry, and the leader of an organization that intends to use a mysterious device of untold powers for world domination.", + "release_date": "2011-07-22", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "In Good Company", + "overview": "Dan Foreman is a seasoned advertisement sales executive at a high-ranking publication when a corporate takeover results in him being placed under naive supervisor Carter Duryea, who is half his age. Matters are made worse when Dan's new supervisor becomes romantically involved with his daughter an 18 year-old college student Alex.", + "release_date": "2004-12-29", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Alex Foreman" + }, + { + "actor": "Clark Gregg", + "character": "Mark Steckle" + } + ] + }, + { + "title": "Zodiac", + "overview": "The true story of the investigation of the \"Zodiac Killer\", a serial killer who terrified the San Francisco Bay Area, taunting police with his ciphers and letters. The case becomes an obsession for three men as their lives and careers are built and destroyed by the endless trail of clues.", + "release_date": "2007-03-02", + "characters": [ + { + "actor": "Mark Ruffalo", + "character": "Dave Toschi" + }, + { + "actor": "Robert Downey Jr.", + "character": "Paul Avery" + } + ] + }, + { + "title": "Hard Eight", + "overview": "A stranger mentors a young Reno gambler who weds a hooker and befriends a vulgar casino regular.", + "release_date": "1996-02-28", + "characters": [ + { + "actor": "Gwyneth Paltrow", + "character": "Clementine" + }, + { + "actor": "Samuel L. Jackson", + "character": "Jimmy" + } + ] + }, + { + "title": "The Spirit", + "overview": "Down these mean streets a man must come. A hero born, murdered, and born again. A Rookie cop named Denny Colt returns from the beyond as The Spirit, a hero whose mission is to fight against the bad forces from the shadows of Central City. The Octopus, who kills anyone unfortunate enough to see his face, has other plans; he is going to wipe out the entire city.", + "release_date": "2008-12-25", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Silken Floss" + }, + { + "actor": "Samuel L. Jackson", + "character": "Octopuss" + } + ] + }, + { + "title": "S.W.A.T.", + "overview": "Hondo Harrelson recruits Jim Street to join an elite unit of the Los Angeles Police Department. Together they seek out more members, including tough Deke Kay and single mom Chris Sanchez. The team's first big assignment is to escort crime boss Alex Montel to prison. It seems routine, but when Montel offers a huge reward to anyone who can break him free, criminals of various stripes step up for the prize.", + "release_date": "2003-08-08", + "characters": [ + { + "actor": "Samuel L. Jackson", + "character": "Sgt. Dan 'Hondo' Harrelson" + }, + { + "actor": "Jeremy Renner", + "character": "Brian Gamble" + } + ] + }, + { + "title": "Iron Man 2", + "overview": "With the world now aware of his dual life as the armored superhero Iron Man, billionaire inventor Tony Stark faces pressure from the government, the press and the public to share his technology with the military. Unwilling to let go of his invention, Stark, with Pepper Potts and James 'Rhodey' Rhodes at his side, must forge new alliances – and confront powerful enemies.", + "release_date": "2010-04-28", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Scarlett Johansson", + "character": "Natalie Rushman / Natasha Romanoff / Black Widow" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + } + ] + }, + { + "title": "Thor", + "overview": "Against his father Odin's will, The Mighty Thor - a powerful but arrogant warrior god - recklessly reignites an ancient war. Thor is cast down to Earth and forced to live among humans as punishment. Once here, Thor learns what it takes to be a true hero when the most dangerous villain of his world sends the darkest forces of Asgard to invade Earth.", + "release_date": "2011-04-21", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye (uncredited)" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + } + ] + }, + { + "title": "View from the Top", + "overview": "A small-town woman tries to achieve her goal of becoming a flight attendant.", + "release_date": "2003-03-21", + "characters": [ + { + "actor": "Gwyneth Paltrow", + "character": "Donna" + }, + { + "actor": "Mark Ruffalo", + "character": "Ted Stewart" + } + ] + }, + { + "title": "The Nanny Diaries", + "overview": "A college graduate goes to work as a nanny for a rich New York family. Ensconced in their home, she has to juggle their dysfunction, a new romance, and the spoiled brat in her charge.", + "release_date": "2007-08-24", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Annie Braddock" + }, + { + "actor": "Chris Evans", + "character": "Hayden \"Harvard Hottie\"" + } + ] + }, + { + "title": "The Perfect Score", + "overview": "Six high school seniors decide to break into the Princeton Testing Center so they can steal the answers to their upcoming SAT tests and all get perfect scores.", + "release_date": "2004-01-30", + "characters": [ + { + "actor": "Chris Evans", + "character": "Kyle" + }, + { + "actor": "Scarlett Johansson", + "character": "Francesca Curtis" + } + ] + }, + { + "title": "The Avengers", + "overview": "When an unexpected enemy emerges and threatens global safety and security, Nick Fury, director of the international peacekeeping agency known as S.H.I.E.L.D., finds himself in need of a team to pull the world back from the brink of disaster. Spanning the globe, a daring recruitment effort begins!", + "release_date": "2012-04-25", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + } + ] + }, + { + "title": "Iron Man 3", + "overview": "When Tony Stark's world is torn apart by a formidable terrorist called the Mandarin, he starts an odyssey of rebuilding and retribution.", + "release_date": "2013-04-18", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / Iron Patriot" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner (uncredited)" + } + ] + }, + { + "title": "Marvel One-Shot: The Consultant", + "overview": "Agent Coulson informs Agent Sitwell that the World Security Council wishes Emil Blonsky to be released from prison to join the Avengers Initiative. As Nick Fury doesn't want to release Blonsky, the two agents decide to send a patsy to sabotage the meeting...", + "release_date": "2011-09-12", + "characters": [ + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark (archive footage)" + } + ] + }, + { + "title": "Thor: The Dark World", + "overview": "Thor fights to restore order across the cosmos… but an ancient race led by the vengeful Malekith returns to plunge the universe back into darkness. Faced with an enemy that even Odin and Asgard cannot withstand, Thor must embark on his most perilous and personal journey yet, one that will reunite him with Jane Foster and force him to sacrifice everything to save us all.", + "release_date": "2013-10-29", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Chris Evans", + "character": "Loki as Captain America (uncredited)" + } + ] + }, + { + "title": "Avengers: Age of Ultron", + "overview": "When Tony Stark tries to jumpstart a dormant peacekeeping program, things go awry and Earth’s Mightiest Heroes are put to the ultimate test as the fate of the planet hangs in the balance. As the villainous Ultron emerges, it is up to The Avengers to stop him from enacting his terrible plans, and soon uneasy alliances and unexpected action pave the way for an epic and unique global adventure.", + "release_date": "2015-04-22", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + } + ] + }, + { + "title": "Captain America: The Winter Soldier", + "overview": "After the cataclysmic events in New York with The Avengers, Steve Rogers, aka Captain America is living quietly in Washington, D.C. and trying to adjust to the modern world. But when a S.H.I.E.L.D. colleague comes under attack, Steve becomes embroiled in a web of intrigue that threatens to put the world at risk. Joining forces with the Black Widow, Captain America struggles to expose the ever-widening conspiracy while fighting off professional assassins sent to silence him at every turn. When the full scope of the villainous plot is revealed, Captain America and the Black Widow enlist the help of a new ally, the Falcon. However, they soon find themselves up against an unexpected and formidable enemy—the Winter Soldier.", + "release_date": "2014-03-20", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + } + ] + }, + { + "title": "Thanks for Sharing", + "overview": "A romantic comedy that brings together three disparate characters who are learning to face a challenging and often confusing world as they struggle together against a common demon—sex addiction.", + "release_date": "2013-09-19", + "characters": [ + { + "actor": "Mark Ruffalo", + "character": "Adam" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Phoebe" + } + ] + }, + { + "title": "Chef", + "overview": "When Chef Carl Casper suddenly quits his job at a prominent Los Angeles restaurant after refusing to compromise his creative integrity for its controlling owner, he is left to figure out what's next. Finding himself in Miami, he teams up with his ex-wife, his friend and his son to launch a food truck. Taking to the road, Chef Carl goes back to his roots to reignite his passion for the kitchen -- and zest for life and love.", + "release_date": "2014-05-08", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Molly" + }, + { + "actor": "Robert Downey Jr.", + "character": "Marvin" + } + ] + }, + { + "title": "Marvel Studios: Assembling a Universe", + "overview": "A look at the story behind Marvel Studios and the Marvel Cinematic Universe, featuring interviews and behind-the-scenes footage from all of the Marvel films, the Marvel One-Shots and \"Marvel's Agents of S.H.I.E.L.D.\"", + "release_date": "2014-03-18", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Himself / Tony Stark / Iron Man" + }, + { + "actor": "Chris Hemsworth", + "character": "Himself / Thor" + }, + { + "actor": "Chris Evans", + "character": "Himself / Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Himself / Bruce Banner / Hulk" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Herself" + }, + { + "actor": "Clark Gregg", + "character": "Himself" + }, + { + "actor": "Samuel L. Jackson", + "character": "Himself" + }, + { + "actor": "Scarlett Johansson", + "character": "Herself" + }, + { + "actor": "Jeremy Renner", + "character": "Himself" + } + ] + }, + { + "title": "Captain America: Civil War", + "overview": "Following the events of Age of Ultron, the collective governments of the world pass an act designed to regulate all superhuman activity. This polarizes opinion amongst the Avengers, causing two factions to side with Iron Man or Captain America, which causes an epic battle between former allies.", + "release_date": "2016-04-27", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + } + ] + }, + { + "title": "Thor: Ragnarok", + "overview": "Thor is imprisoned on the other side of the universe and finds himself in a race against time to get back to Asgard to stop Ragnarok, the destruction of his home-world and the end of Asgardian civilization, at the hands of an all-powerful new threat, the ruthless Hela.", + "release_date": "2017-10-25", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / Hulk" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow (archive footage / uncredited)" + } + ] + }, + { + "title": "Avengers: Endgame", + "overview": "After the devastating events of Avengers: Infinity War, the universe is in ruins due to the efforts of the Mad Titan, Thanos. With the help of remaining allies, the Avengers must assemble once more in order to undo Thanos' actions and restore order to the universe once and for all, no matter what consequences may be in store.", + "release_date": "2019-04-24", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Don Cheadle", + "character": "James Rhodes / War Machine" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Pepper Potts" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "Avengers: Infinity War", + "overview": "As the Avengers and their allies have continued to protect the world from threats too large for any one hero to handle, a new danger has emerged from the cosmic shadows: Thanos. A despot of intergalactic infamy, his goal is to collect all six Infinity Stones, artifacts of unimaginable power, and use them to inflict his twisted will on all of reality. Everything the Avengers have fought for has led up to this moment - the fate of Earth and existence itself has never been more uncertain.", + "release_date": "2018-04-25", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + } + ] + }, + { + "title": "Captain Marvel", + "overview": "The story follows Carol Danvers as she becomes one of the universe’s most powerful heroes when Earth is caught in the middle of a galactic war between two alien races. Set in the 1990s, Captain Marvel is an all-new adventure from a previously unseen period in the history of the Marvel Cinematic Universe.", + "release_date": "2019-03-06", + "characters": [ + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Agent Phil Coulson" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America (uncredited)" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow (uncredited)" + }, + { + "actor": "Don Cheadle", + "character": "James 'Rhodey' Rhodes / War Machine (uncredited)" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk (uncredited)" + } + ] + }, + { + "title": "Spider-Man: Homecoming", + "overview": "Following the events of Captain America: Civil War, Peter Parker, with the help of his mentor Tony Stark, tries to balance his life as an ordinary high school student in Queens, New York City, with fighting crime as his superhero alter ego Spider-Man as a new threat, the Vulture, emerges.", + "release_date": "2017-07-05", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + } + ] + }, + { + "title": "Team Thor", + "overview": "Discover what Thor was up to during the events of Captain America: Civil War.", + "release_date": "2016-08-28", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner" + } + ] + }, + { + "title": "Black Widow", + "overview": "Natasha Romanoff, also known as Black Widow, confronts the darker parts of her ledger when a dangerous conspiracy with ties to her past arises. Pursued by a force that will stop at nothing to bring her down, Natasha must deal with her history as a spy and the broken relationships left in her wake long before she became an Avenger.", + "release_date": "2020-10-28", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + } + ] + } +] +``` diff --git a/site/versioned_docs/version-4.3/developers/operations-api/bulk-operations.md b/site/versioned_docs/version-4.3/developers/operations-api/bulk-operations.md new file mode 100644 index 00000000..048ec5d4 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/bulk-operations.md @@ -0,0 +1,136 @@ +--- +title: Bulk Operations +--- + +# Bulk Operations + +## CSV Data Load +Ingests CSV data, provided directly in the operation as an `insert`, `update` or `upsert` into the specified database table. + +* operation _(required)_ - must always be `csv_data_load` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* data _(required)_ - csv data to import into HarperDB + +### Body +```json +{ + "operation": "csv_data_load", + "database": "dev", + "action": "insert", + "table": "breed", + "data": "id,name,section,country,image\n1,ENGLISH POINTER,British and Irish Pointers and Setters,GREAT BRITAIN,http:/www.fci.be/Nomenclature/Illustrations/001g07.jpg\n2,ENGLISH SETTER,British and Irish Pointers and Setters,GREAT BRITAIN,http:/www.fci.be/Nomenclature/Illustrations/002g07.jpg\n3,KERRY BLUE TERRIER,Large and medium sized Terriers,IRELAND,\n" +} +``` + +### Response: 200 +```json + { + "message": "Starting job with id 2fe25039-566e-4670-8bb3-2db3d4e07e69", + "job_id": "2fe25039-566e-4670-8bb3-2db3d4e07e69" + } +``` + +--- + +## CSV File Load +Ingests CSV data, provided via a path on the local filesystem, as an `insert`, `update` or `upsert` into the specified database table. + +_Note: The CSV file must reside on the same machine on which HarperDB is running. For example, the path to a CSV on your computer will produce an error if your HarperDB instance is a cloud instance._ + +* operation _(required)_ - must always be `csv_file_load` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* file_path _(required)_ - path to the csv file on the host running harperdb + +### Body +```json +{ + "operation": "csv_file_load", + "action": "insert", + "database": "dev", + "table": "breed", + "file_path": "/home/user/imports/breeds.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 3994d8e2-ec6a-43c4-8563-11c1df81870e", + "job_id": "3994d8e2-ec6a-43c4-8563-11c1df81870e" +} +``` + +--- + +## CSV URL Load +Ingests CSV data, provided via URL, as an `insert`, `update` or `upsert` into the specified database table. + +* operation _(required)_ - must always be `csv_url_load` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* csv_url _(required)_ - URL to the csv + +### Body +```json +{ + "operation": "csv_url_load", + "action": "insert", + "database": "dev", + "table": "breed", + "csv_url": "https:/s3.amazonaws.com/complimentarydata/breeds.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 332aa0a2-6833-46cd-88a6-ae375920436a", + "job_id": "332aa0a2-6833-46cd-88a6-ae375920436a" +} +``` + +--- + +## Import from S3 +This operation allows users to import CSV or JSON files from an AWS S3 bucket as an `insert`, `update` or `upsert`. + +* operation _(required)_ - must always be `import_from_s3` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* s3 _(required)_ - object containing required AWS S3 bucket info for operation: + * aws_access_key_id - AWS access key for authenticating into your S3 bucket + * aws_secret_access_key - AWS secret for authenticating into your S3 bucket + * bucket - AWS S3 bucket to import from + * key - the name of the file to import - _the file must include a valid file extension ('.csv' or '.json')_ + * region - the region of the bucket + +### Body +```json +{ + "operation": "import_from_s3", + "action": "insert", + "database": "dev", + "table": "dog", + "s3": { + "aws_access_key_id": "YOUR_KEY", + "aws_secret_access_key": "YOUR_SECRET_KEY", + "bucket": "BUCKET_NAME", + "key": "OBJECT_NAME", + "region": "BUCKET_REGION" + } +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 062a1892-6a0a-4282-9791-0f4c93b12e16", + "job_id": "062a1892-6a0a-4282-9791-0f4c93b12e16" +} +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/developers/operations-api/clustering.md b/site/versioned_docs/version-4.3/developers/operations-api/clustering.md new file mode 100644 index 00000000..300664c4 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/clustering.md @@ -0,0 +1,457 @@ +--- +title: Clustering +--- + +# Clustering + +## Cluster Set Routes +Adds a route/routes to either the hub or leaf server cluster configuration. This operation behaves as a PATCH/upsert, meaning it will add new routes to the configuration while leaving existing routes untouched. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_set_routes` +* server _(required)_ - must always be `hub` or `leaf`, in most cases you should use `hub` here +* routes _(required)_ - must always be an objects array with a host and port: + * host - the host of the remote instance you are clustering to + * port - the clustering port of the remote instance you are clustering to, in most cases this is the value in `clustering.hubServer.cluster.network.port` on the remote instance `harperdb-config.yaml` + +### Body +```json +{ + "operation": "cluster_set_routes", + "server": "hub", + "routes": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "cluster routes successfully set", + "set": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "skipped": [] +} +``` + +--- + +## Cluster Get Routes +Gets all the hub and leaf server routes from the config file. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_get_routes` + +### Body +```json +{ + "operation": "cluster_get_routes" +} +``` + +### Response: 200 +```json +{ + "hub": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "leaf": [] +} +``` + +--- + +## Cluster Delete Routes +Removes route(s) from hub and/or leaf server routes array in config file. Returns a deletion success message and arrays of deleted and skipped records. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_delete_routes` +* routes _required_ - Must be an array of route object(s) + +### Body + +```json +{ + "operation": "cluster_delete_routes", + "routes": [ + { + "host": "18.116.24.71", + "port": 12345 + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "cluster routes successfully deleted", + "deleted": [ + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "skipped": [] +} +``` + + +--- + +## Add Node +Registers an additional HarperDB instance with associated subscriptions. Learn more about [HarperDB clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_node` +* node_name _(required)_ - the node name of the remote node +* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `schema`, `table`, `subscribe` and `publish`: + * schema - the schema to replicate from + * table - the table to replicate from + * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + * publish - a boolean which determines if transactions on the local table should be replicated on the remote table + * start_time _(optional)_ - How far back to go to get transactions from node being added. Must be in UTC YYYY-MM-DDTHH:mm:ss.sssZ format + +### Body +```json +{ + "operation": "add_node", + "node_name": "ec2-3-22-181-22", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": false, + "publish": true, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "Successfully added 'ec2-3-22-181-22' to manifest" +} +``` + +--- + +## Update Node +Modifies an existing HarperDB instance registration and associated subscriptions. This operation behaves as a PATCH/upsert, meaning it will insert or update the specified replication configurations while leaving other table replication configuration untouched. Learn more about [HarperDB clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `update_node` +* node_name _(required)_ - the node name of the remote node you are updating +* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `schema`, `table`, `subscribe` and `publish`: + * schema - the schema to replicate from + * table - the table to replicate from + * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + * publish - a boolean which determines if transactions on the local table should be replicated on the remote table + * start_time _(optional)_ - How far back to go to get transactions from node being added. Must be in UTC YYYY-MM-DDTHH:mm:ss.sssZ format + +### Body +```json +{ + "operation": "update_node", + "node_name": "ec2-18-223-239-195", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": false, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "Successfully updated 'ec2-3-22-181-22'" +} +``` + +--- + +## Set Node Replication +A more adeptly named alias for add and update node. This operation behaves as a PATCH/upsert, meaning it will insert or update the specified replication configurations while leaving other table replication configuration untouched. The `database` (aka `schema`) parameter is optional, it will default to `data`. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_node_replication` +* node_name _(required)_ - the node name of the remote node you are updating +* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and `table`, `subscribe` and `publish`: + * database *(optional)* - the database to replicate from + * table *(required)* - the table to replicate from + * subscribe *(required)* - a boolean which determines if transactions on the remote table should be replicated on the local table + * publish *(required)* - a boolean which determines if transactions on the local table should be replicated on the remote table +* +### Body +```json +{ + "operation": "set_node_replication", + "node_name": "node1", + "subscriptions": [ + { + "table": "dog", + "subscribe": true, + "publish": true + } + ] +} +``` +### Response: 200 +```json +{ + "message": "Successfully updated 'ec2-3-22-181-22'" +} +``` + +--- + +## Cluster Status +Returns an array of status objects from a cluster. A status object will contain the clustering node name, whether or not clustering is enabled, and a list of possible connections. Learn more about [HarperDB clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_status` + +### Body +```json +{ + "operation": "cluster_status" +} +``` + +### Response: 200 +```json +{ + "node_name": "ec2-18-221-143-69", + "is_enabled": true, + "connections": [ + { + "node_name": "ec2-3-22-181-22", + "status": "open", + "ports": { + "clustering": 12345, + "operations_api": 9925 + }, + "latency_ms": 13, + "uptime": "30d 1h 18m 8s", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ] + } + ] +} +``` + + +--- + +## Cluster Network +Returns an object array of enmeshed nodes. Each node object will contain the name of the node, the amount of time (in milliseconds) it took for it to respond, the names of the nodes it is enmeshed with and the routes set in its config file. Learn more about [HarperDB clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_- must always be `cluster_network` +* timeout (_optional_) - the amount of time in milliseconds to wait for a response from the network. Must be a number +* connected_nodes (_optional_) - omit `connected_nodes` from the response. Must be a boolean. Defaults to `false` +* routes (_optional_) - omit `routes` from the response. Must be a boolean. Defaults to `false` + +### Body + +```json +{ + "operation": "cluster_network" +} +``` + +### Response: 200 +```json +{ + "nodes": [ + { + "name": "local_node", + "response_time": 4, + "connected_nodes": ["ec2-3-142-255-78"], + "routes": [ + { + "host": "3.142.255.78", + "port": 9932 + } + ] + }, + { + "name": "ec2-3-142-255-78", + "response_time": 57, + "connected_nodes": ["ec2-3-12-153-124", "ec2-3-139-236-138", "local_node"], + "routes": [] + } + ] +} +``` + +--- + +## Remove Node +Removes a HarperDB instance and associated subscriptions from the cluster. Learn more about [HarperDB clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `remove_node` +* name _(required)_ - The name of the node you are de-registering + +### Body +```json +{ + "operation": "remove_node", + "node_name": "ec2-3-22-181-22" +} +``` + +### Response: 200 +```json +{ + "message": "Successfully removed 'ec2-3-22-181-22' from manifest" +} +``` + +--- + +## Configure Cluster +Bulk create/remove subscriptions for any number of remote nodes. Resets and replaces any existing clustering setup. +Learn more about [HarperDB clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `configure_cluster` +* connections _(required)_ - must be an object array with each object containing `node_name` and `subscriptions` for that node + +### Body +```json +{ + "operation": "configure_cluster", + "connections": [ + { + "node_name": "ec2-3-137-184-8", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": false + } + ] + }, + { + "node_name": "ec2-18-223-239-195", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": true + } + ] + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "Cluster successfully configured." +} +``` + +--- + +## Purge Stream + +Will purge messages from a stream + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `purge_stream` +* database _(required)_ - the name of the database where the streams table resides +* table _(required)_ - the name of the table that belongs to the stream +* options _(optional)_ - control how many messages get purged. Options are: + * `keep` - purge will keep this many most recent messages + * `seq` - purge all messages up to, but not including, this sequence + +### Body +```json +{ + "operation": "purge_stream", + "database": "dev", + "table": "dog", + "options": { + "keep": 100 + } +} +``` + +--- \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/developers/operations-api/components.md b/site/versioned_docs/version-4.3/developers/operations-api/components.md new file mode 100644 index 00000000..17ba5f0a --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/components.md @@ -0,0 +1,291 @@ +--- +title: Components +--- + +# Components + +## Add Component + +Creates a new component project in the component root directory using a predefined template. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_component` +* project _(required)_ - the name of the project you wish to create + +### Body +```json +{ + "operation": "add_component", + "project": "my-component" +} +``` + +### Response: 200 +```json +{ + "message": "Successfully added project: my-component" +} +``` +--- +## Deploy Component + +Will deploy a component using either a base64-encoded string representation of a `.tar` file (the output from `package_component`) or a package value, which can be any valid NPM reference, such as a GitHub repo, an NPM package, a tarball, a local directory or a website.\ + +If deploying with the `payload` option, HarperDB will decrypt the base64-encoded string, reconstitute the .tar file of your project folder, and extract it to the component root project directory.\ + +If deploying with the `package` option, the package value will be written to `harperdb-config.yaml`. Then npm install will be utilized to install the component in the `node_modules` directory located in the hdb root. The value is a package reference, which should generally be a [URL reference, as described here](https:/docs.npmjs.com/cli/v10/configuring-npm/package-json#urls-as-dependencies) (it is also possible to include NPM registerd packages and file paths). URL package references can directly reference tarballs that can be installed as a package. However, the most common and recommended usage is to install from a Git repository, which can be combined with a tag to deploy a specific version directly from versioned source control. When using tags, we highly recommend that you use the `semver` directive to ensure consistent and reliable installation by NPM. In addition to tags, you can also reference branches or commit numbers. Here is an example URL package reference to a (public) Git repository that doesn't require authentication: +``` +https:/github.com/HarperDB/application-template#semver:v1.0.0 +``` +or this can be shortened to: +``` +HarperDB/application-template#semver:v1.0.0 +``` + +You can also install from private repository if you have an installed SSH keys on the server: +``` +git+ssh:/git@github.com:my-org/my-app.git#semver:v1.0.0 +``` +Or you can use a Github token: +``` +https:/@github.com/my-org/my-app#semver:v1.0.0 +``` +Or you can use a GitLab Project Access Token: +``` +https:/my-project:@gitlab.com/my-group/my-project#semver:v1.0.0 +``` +Note that your component will be installed by NPM. If your component has dependencies, NPM will attempt to download and install these as well. NPM normally uses the public registry.npmjs.org registry. If you are installing without network access to this, you may wish to define [custom registry locations](https:/docs.npmjs.com/cli/v8/configuring-npm/npmrc) if you have any dependencies that need to be installed. NPM will install the deployed component and any dependencies in node_modules in the hdb root directory (typically `~/hdb/node_modules`). + +_Note: After deploying a component a restart may be required_ + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `deploy_component` +* project _(required)_ - the name of the project you wish to deploy +* package _(optional)_ - this can be any valid GitHub or NPM reference +* payload _(optional)_ - a base64-encoded string representation of the .tar file. Must be a string + +### Body + +```json +{ + "operation": "deploy_component", + "project": "my-component", + "payload": "A very large base64-encoded string representation of the .tar file" +} +``` + +```json +{ + "operation": "deploy_component", + "project": "my-component", + "package": "HarperDB/application-template" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deployed: my-component" +} +``` +--- +## Package Component + +Creates a temporary `.tar` file of the specified project folder, then reads it into a base64-encoded string and returns an object with the string and the payload. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `package_component` +* project _(required)_ - the name of the project you wish to package +* skip_node_modules _(optional)_ - if true, creates option for tar module that will exclude the project's node_modules directory. Must be a boolean + +### Body + +```json +{ + "operation": "package_component", + "project": "my-component", + "skip_node_modules": true +} +``` + +### Response: 200 + +```json +{ + "project": "my-component", + "payload": "LgAAAAAAAAAAAAAAAAAAA...AAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" +} +``` +--- +## Drop Component + +Deletes a file from inside the component project or deletes the complete project. + +**If just `project` is provided it will delete all that projects local files and folders** + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `drop_component` +* project _(required)_ - the name of the project you wish to delete or to delete from if using the `file` parameter +* file _(optional)_ - the path relative to your project folder of the file you wish to delete + +### Body + +```json +{ + "operation": "drop_component", + "project": "my-component", + "file": "utils/myUtils.js" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully dropped: my-component/utils/myUtils.js" +} +``` +--- +## Get Components + +Gets all local component files and folders and any component config from `harperdb-config.yaml` + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_components` + +### Body + +```json +{ + "operation": "get_components" +} +``` + +### Response: 200 + +```json +{ + "name": "components", + "entries": [ + { + "package": "HarperDB/application-template", + "name": "deploy-test-gh" + }, + { + "package": "@fastify/compress", + "name": "fast-compress" + }, + { + "name": "my-component", + "entries": [ + { + "name": "LICENSE", + "mtime": "2023-08-22T16:00:40.286Z", + "size": 1070 + }, + { + "name": "index.md", + "mtime": "2023-08-22T16:00:40.287Z", + "size": 1207 + }, + { + "name": "config.yaml", + "mtime": "2023-08-22T16:00:40.287Z", + "size": 1069 + }, + { + "name": "package.json", + "mtime": "2023-08-22T16:00:40.288Z", + "size": 145 + }, + { + "name": "resources.js", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 583 + }, + { + "name": "schema.graphql", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 466 + }, + { + "name": "utils", + "entries": [ + { + "name": "commonUtils.js", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 583 + } + ] + } + ] + } + ] +} +``` +--- +## Get Component File + +Gets the contents of a file inside a component project. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_component_file` +* project _(required)_ - the name of the project where the file is located +* file _(required)_ - the path relative to your project folder of the file you wish to view +* encoding _(optional)_ - the encoding that will be passed to the read file call. Defaults to `utf8` + +### Body + +```json +{ + "operation": "get_component_file", + "project": "my-component", + "file": "resources.js" +} +``` + +### Response: 200 + +```json +{ + "message": "/**export class MyCustomResource extends tables.TableName {\n\t/ we can define our own custom POST handler\n\tpost(content) {\n\t\t/ do something with the incoming content;\n\t\treturn super.post(content);\n\t}\n\t/ or custom GET handler\n\tget() {\n\t\t/ we can modify this resource before returning\n\t\treturn super.get();\n\t}\n}\n */\n/ we can also define a custom resource without a specific table\nexport class Greeting extends Resource {\n\t/ a \"Hello, world!\" handler\n\tget() {\n\t\treturn { greeting: 'Hello, world!' };\n\t}\n}" +} +``` +--- +## Set Component File + +Creates or updates a file inside a component project. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_component_file` +* project _(required)_ - the name of the project the file is located in +* file _(required)_ - the path relative to your project folder of the file you wish to set +* payload _(required)_ - what will be written to the file +* encoding _(optional)_ - the encoding that will be passed to the write file call. Defaults to `utf8` + +### Body + +```json +{ + "operation": "set_component_file", + "project": "my-component", + "file": "test.js", + "payload": "console.log('hello world')" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully set component: test.js" +} +``` diff --git a/site/versioned_docs/version-4.3/developers/operations-api/custom-functions.md b/site/versioned_docs/version-4.3/developers/operations-api/custom-functions.md new file mode 100644 index 00000000..bf9537fc --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/custom-functions.md @@ -0,0 +1,276 @@ +--- +title: Custom Functions +--- + +# Custom Functions + +## Custom Functions Status + +Returns the state of the Custom functions server. This includes whether it is enabled, upon which port it is listening, and where its root project directory is located on the host machine. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `custom_function_status` + +### Body +```json +{ + "operation": "custom_functions_status" +} +``` + +### Response: 200 +```json +{ + "is_enabled": true, + "port": 9926, + "directory": "/Users/myuser/hdb/custom_functions" +} +``` + +--- + +## Get Custom Functions + +Returns an array of projects within the Custom Functions root project directory. Each project has details including each of the files in the routes and helpers directories, and the total file count in the static folder. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_custom_functions` + +### Body + +```json +{ + "operation": "get_custom_functions" +} +``` + +### Response: 200 + +```json +{ + "dogs": { + "routes": ["examples"], + "helpers":["example"], + "static":3 + } +} +``` + +--- + +## Get Custom Function + +Returns the content of the specified file as text. HarperDB Studio uses this call to render the file content in its built-in code editor. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_custom_function` +* project _(required)_ - the name of the project containing the file for which you wish to get content +* type _(required)_ - the name of the sub-folder containing the file for which you wish to get content - must be either routes or helpers +* file _(required)_ - The name of the file for which you wish to get content - should not include the file extension (which is always .js) + +### Body + +```json +{ + "operation": "get_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example" +} +``` + +### Response: 200 + +```json +{ + "message": "'use strict';\n\nconst https = require('https');\n\nconst authRequest = (options) => {\n return new Promise((resolve, reject) => {\n const req = https.request(options, (res) => {\n res.setEncoding('utf8');\n let responseBody = '';\n\n res.on('data', (chunk) => {\n responseBody += chunk;\n });\n\n res.on('end', () => {\n resolve(JSON.parse(responseBody));\n });\n });\n\n req.on('error', (err) => {\n reject(err);\n });\n\n req.end();\n });\n};\n\nconst customValidation = async (request,logger) => {\n const options = {\n hostname: 'jsonplaceholder.typicode.com',\n port: 443,\n path: '/todos/1',\n method: 'GET',\n headers: { authorization: request.headers.authorization },\n };\n\n const result = await authRequest(options);\n\n /*\n * throw an authentication error based on the response body or statusCode\n */\n if (result.error) {\n const errorString = result.error || 'Sorry, there was an error authenticating your request';\n logger.error(errorString);\n throw new Error(errorString);\n }\n return request;\n};\n\nmodule.exports = customValidation;\n" +} +``` + +--- + +## Set Custom Function + +Updates the content of the specified file. HarperDB Studio uses this call to save any changes made through its built-in code editor. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_custom_function` +* project _(required)_ - the name of the project containing the file for which you wish to set content +* type _(required)_ - the name of the sub-folder containing the file for which you wish to set content - must be either routes or helpers +* file _(required)_ - the name of the file for which you wish to set content - should not include the file extension (which is always .js) +* function_content _(required)_ - the content you wish to save into the specified file + +### Body + +```json +{ + "operation": "set_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example", + "function_content": "'use strict';\n\nconst https = require('https');\n\nconst authRequest = (options) => {\n return new Promise((resolve, reject) => {\n const req = https.request(options, (res) => {\n res.setEncoding('utf8');\n let responseBody = '';\n\n res.on('data', (chunk) => {\n responseBody += chunk;\n });\n\n res.on('end', () => {\n resolve(JSON.parse(responseBody));\n });\n });\n\n req.on('error', (err) => {\n reject(err);\n });\n\n req.end();\n });\n};\n\nconst customValidation = async (request,logger) => {\n const options = {\n hostname: 'jsonplaceholder.typicode.com',\n port: 443,\n path: '/todos/1',\n method: 'GET',\n headers: { authorization: request.headers.authorization },\n };\n\n const result = await authRequest(options);\n\n /*\n * throw an authentication error based on the response body or statusCode\n */\n if (result.error) {\n const errorString = result.error || 'Sorry, there was an error authenticating your request';\n logger.error(errorString);\n throw new Error(errorString);\n }\n return request;\n};\n\nmodule.exports = customValidation;\n" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully updated custom function: example.js" +} +``` + +--- + +## Drop Custom Function + +Deletes the specified file. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `drop_custom_function` +* project _(required)_ - the name of the project containing the file you wish to delete +* type _(required)_ - the name of the sub-folder containing the file you wish to delete. Must be either routes or helpers +* file _(required)_ - the name of the file you wish to delete. Should not include the file extension (which is always .js) + +### Body + +```json +{ + "operation": "drop_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example" +} +``` + +### Response: 200 + +```json +{ + "message":"Successfully deleted custom function: example.js" +} +``` + +--- + +## Add Custom Function Project + +Creates a new project folder in the Custom Functions root project directory. It also inserts into the new directory the contents of our Custom Functions Project template, which is available publicly, here: https:/github.com/HarperDB/harperdb-custom-functions-template. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_custom_function_project` +* project _(required)_ - the name of the project you wish to create + +### Body + +```json +{ + "operation": "add_custom_function_project", + "project": "dogs" +} +``` + +### Response: 200 + +```json +{ + "message":"Successfully created custom function project: dogs" +} +``` + +--- + +## Drop Custom Function Project + +Deletes the specified project folder and all of its contents. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `drop_custom_function_project` +* project _(required)_ - the name of the project you wish to delete + +### Body + +```json +{ + "operation": "drop_custom_function_project", + "project": "dogs" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deleted project: dogs" +} +``` + +--- + +## Package Custom Function Project + +Creates a .tar file of the specified project folder, then reads it into a base64-encoded string and returns an object with the string, the payload and the file. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `package_custom_function_project` +* project _(required)_ - the name of the project you wish to package up for deployment +* skip_node_modules _(optional)_ - if true, creates option for tar module that will exclude the project's node_modules directory. Must be a boolean. + +### Body + +```json +{ + "operation": "package_custom_function_project", + "project": "dogs", + "skip_node_modules": true +} +``` + +### Response: 200 + +```json +{ + "project": "dogs", + "payload": "LgAAAAAAAAAAAAAAAAAAA...AAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "file": "/tmp/d27f1154-5d82-43f0-a5fb-a3018f366081.tar" +} +``` + +--- + +## Deploy Custom Function Project + +Takes the output of package_custom_function_project, decrypts the base64-encoded string, reconstitutes the .tar file of your project folder, and extracts it to the Custom Functions root project directory. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `deploy_custom_function_project` +* project _(required)_ - the name of the project you wish to deploy. Must be a string +* payload _(required)_ - a base64-encoded string representation of the .tar file. Must be a string + + +### Body + +```json +{ + "operation": "deploy_custom_function_project", + "project": "dogs", + "payload": "A very large base64-encoded string represenation of the .tar file" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deployed project: dogs" +} +``` diff --git a/site/versioned_docs/version-4.3/developers/operations-api/databases-and-tables.md b/site/versioned_docs/version-4.3/developers/operations-api/databases-and-tables.md new file mode 100644 index 00000000..68f30089 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/databases-and-tables.md @@ -0,0 +1,362 @@ +--- +title: Databases and Tables +--- + +# Databases and Tables + +## Describe All +Returns the definitions of all databases and tables within the database. Record counts about 5000 records are estimated, as determining the exact count can be expensive. When the record count is estimated, this is indicated by the inclusion of a confidence interval of `estimated_record_range`. If you need the exact count, you can include an `"exact_count": true` in the operation, but be aware that this requires a full table scan (may be expensive). + +* operation _(required)_ - must always be `describe_all` + +### Body +```json +{ + "operation": "describe_all" +} +``` + +### Response: 200 +```json +{ + "dev": { + "dog": { + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 + } + } +} +``` + +--- + +## Describe database +Returns the definitions of all tables within the specified database. + +* operation _(required)_ - must always be `describe_database` +* database _(optional)_ - database where the table you wish to describe lives. The default is `data` + +### Body +```json +{ + "operation": "describe_database", + "database": "dev" +} +``` + +### Response: 200 +```json +{ + "dog": { + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 + } +} +``` + +--- + +## Describe Table +Returns the definition of the specified table. + +* operation _(required)_ - must always be `describe_table` +* table _(required)_ - table you wish to describe +* database _(optional)_ - database where the table you wish to describe lives. The default is `data` + +### Body +```json +{ + "operation": "describe_table", + "table": "dog" +} +``` + +### Response: 200 +```json +{ + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 +} +``` + +--- + +## Create database +Create a new database. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `create_database` +* database _(optional)_ - name of the database you are creating. The default is `data` + +### Body +```json +{ + "operation": "create_database", + "database": "dev" +} +``` + +### Response: 200 +```json +{ + "message": "database 'dev' successfully created" +} +``` + +--- + +## Drop database +Drop an existing database. NOTE: Dropping a database will delete all tables and all of their records in that database. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `drop_database` +* database _(required)_ - name of the database you are dropping + +### Body +```json +{ + "operation": "drop_database", + "database": "dev" +} +``` + +### Response: 200 +```json +{ + "message": "successfully deleted 'dev'" +} +``` + +--- + +## Create Table +Create a new table within a database. + +_Operation is restricted to super_user roles only_ + + +* operation _(required)_ - must always be `create_table` +* database _(optional)_ - name of the database where you want your table to live. If the database does not exist, it will be created. If the `database` property is not provided it will default to `data`. +* table _(required)_ - name of the table you are creating +* primary_key _(required)_ - primary key for the table +* attributes _(optional)_ - an array of attributes that specifies the schema for the table, that is the set of attributes for the table. When attributes are supplied the table will not be considered a "dynamic schema" table, and attributes will not be auto-added when records with new properties are inserted. Each attribute is specified as: + * name _(required)_ - the name of the attribute + * indexed _(optional)_ - indicates if the attribute should be indexed + * type _(optional)_ - specifies the data type of the attribute (can be String, Int, Float, Date, ID, Any) +* expiration _(optional)_ - specifies the time-to-live or expiration of records in the table before they are evicted (records are not evicted on any timer if not specified). This is specified in seconds. + +### Body +```json +{ + "operation": "create_table", + "database": "dev", + "table": "dog", + "primary_key": "id" +} +``` + +### Response: 200 +```json +{ + "message": "table 'dev.dog' successfully created." +} +``` + +--- + +## Drop Table +Drop an existing database table. NOTE: Dropping a table will delete all associated records in that table. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `drop_table` +* database _(optional)_ - database where the table you are dropping lives. The default is `data` +* table _(required)_ - name of the table you are dropping + +### Body + +```json +{ + "operation": "drop_table", + "database": "dev", + "table": "dog" +} +``` + +### Response: 200 +```json +{ + "message": "successfully deleted table 'dev.dog'" +} +``` + +--- + +## Create Attribute +Create a new attribute within the specified table. **The create_attribute operation can be used for admins wishing to pre-define database values for setting role-based permissions or for any other reason.** + +_Note: HarperDB will automatically create new attributes on insert and update if they do not already exist within the database._ + +* operation _(required)_ - must always be `create_attribute` +* database _(optional)_ - name of the database of the table you want to add your attribute. The default is `data` +* table _(required)_ - name of the table where you want to add your attribute to live +* attribute _(required)_ - name for the attribute + +### Body +```json +{ + "operation": "create_attribute", + "database": "dev", + "table": "dog", + "attribute": "is_adorable" +} +``` + +### Response: 200 +```json +{ + "message": "inserted 1 of 1 records", + "skipped_hashes": [], + "inserted_hashes": [ + "383c0bef-5781-4e1c-b5c8-987459ad0831" + ] +} +``` + +--- + +## Drop Attribute +Drop an existing attribute from the specified table. NOTE: Dropping an attribute will delete all associated attribute values in that table. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `drop_attribute` +* database _(optional)_ - database where the table you are dropping lives. The default is `data` +* table _(required)_ - table where the attribute you are dropping lives +* attribute _(required)_ - attribute that you intend to drop + +### Body + +```json +{ + "operation": "drop_attribute", + "database": "dev", + "table": "dog", + "attribute": "is_adorable" +} +``` + +### Response: 200 +```json +{ + "message": "successfully deleted attribute 'is_adorable'" +} +``` + +--- + +## Get Backup +This will return a snapshot of the requested database. This provides a means for backing up the database through the operations API. The response will be the raw database file (in binary format), which can later be restored as a database file by copying into the appropriate hdb/databases directory (with HarperDB not running). The returned file is a snapshot of the database at the moment in time that the get_backup operation begins. This also supports backing up individual tables in a database. However, this is a more expensive operation than backing up a database in whole, and will lose any transactional atomicity between writes across tables, so generally it is recommended that you backup the entire database. + +It is important to note that trying to copy a database file that is in use (HarperDB actively running and writing to the file) using standard file copying tools is not safe (the copied file will likely be corrupt), which is why using this snapshot operation is recommended for backups (volume snapshots are also a good way to backup HarperDB databases). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `get_backup` +* database _(required)_ - this is the database that will be snapshotted and returned +* table _(optional)_ - this will specify a specific table to backup +* tables _(optional)_ - this will specify a specific set of tables to backup + +### Body + +```json +{ + "operation": "get_backup", + "database": "dev" +} +``` + +### Response: 200 +``` +The database in raw binary data format +``` diff --git a/site/versioned_docs/version-4.3/developers/operations-api/index.md b/site/versioned_docs/version-4.3/developers/operations-api/index.md new file mode 100644 index 00000000..cf2db22d --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/index.md @@ -0,0 +1,51 @@ +--- +title: Operations API +--- + +# Operations API + +The operations API provides a full set of capabilities for configuring, deploying, administering, and controlling HarperDB. To send operations to the operations API, you send a POST request to the operations API endpoint, which [defaults to port 9925](../../../deployments/configuration), on the root path, where the body is the operations object. These requests need to authenticated, which can be done with [basic auth](../../../developers/security/basic-auth) or [JWT authentication](../../../developers/security/jwt-auth). For example, a request to create a table would be performed as: + +```http +POST http:/my-harperdb-server:9925/ +Authorization: Basic YourBase64EncodedInstanceUser:Pass +Content-Type: application/json + +{ + "operation": "create_table", + "table": "my-table" +} +``` + +The operations API reference is available below and categorized by topic: + +* [Quick Start Examples](./quickstart-examples) +* [Databases and Tables](./databases-and-tables) +* [NoSQL Operations](./nosql-operations) +* [Bulk Operations](./bulk-operations) +* [Users and Roles](./users-and-roles) +* [Clustering](./clustering) +* [Components](./components) +* [Registration](./registration) +* [Jobs](./jobs) +* [Logs](./logs) +* [Utilities](./utilities) +* [Token Authentication](./token-authentication) +* [SQL Operations](./sql-operations) +* [Advanced JSON SQL Examples](./advanced-json-sql-examples) + +• [Past Release API Documentation](https:/olddocs.harperdb.io) + +## More Examples + +Here is an example of using `curl` to make an operations API request: + +```bash +curl --location --request POST 'https:/instance-subdomain.harperdbcloud.com' \ +--header 'Authorization: Basic YourBase64EncodedInstanceUser:Pass' \ +--header 'Content-Type: application/json' \ +--data-raw '{ +"operation": "create_schema", +"schema": "dev" +}' +``` diff --git a/site/versioned_docs/version-4.3/developers/operations-api/jobs.md b/site/versioned_docs/version-4.3/developers/operations-api/jobs.md new file mode 100644 index 00000000..8b05357f --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/jobs.md @@ -0,0 +1,82 @@ +--- +title: Jobs +--- + +# Jobs + +## Get Job +Returns job status, metrics, and messages for the specified job ID. + +* operation _(required)_ - must always be `get_job` +* id _(required)_ - the id of the job you wish to view + +### Body + +```json +{ + "operation": "get_job", + "id": "4a982782-929a-4507-8794-26dae1132def" +} +``` + +### Response: 200 +```json +[ + { + "__createdtime__": 1611615798782, + "__updatedtime__": 1611615801207, + "created_datetime": 1611615798774, + "end_datetime": 1611615801206, + "id": "4a982782-929a-4507-8794-26dae1132def", + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "start_datetime": 1611615798805, + "status": "COMPLETE", + "type": "csv_url_load", + "user": "HDB_ADMIN", + "start_datetime_converted": "2021-01-25T23:03:18.805Z", + "end_datetime_converted": "2021-01-25T23:03:21.206Z" + } +] +``` + +--- + +## Search Jobs By Start Date +Returns a list of job statuses, metrics, and messages for all jobs executed within the specified time window. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `search_jobs_by_start_date` +* from_date _(required)_ - the date you wish to start the search +* to_date _(required)_ - the date you wish to end the search + +### Body +```json +{ + "operation": "search_jobs_by_start_date", + "from_date": "2021-01-25T22:05:27.464+0000", + "to_date": "2021-01-25T23:05:27.464+0000" +} +``` + +### Response: 200 +```json +[ + { + "id": "942dd5cb-2368-48a5-8a10-8770ff7eb1f1", + "user": "HDB_ADMIN", + "type": "csv_url_load", + "status": "COMPLETE", + "start_datetime": 1611613284781, + "end_datetime": 1611613287204, + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "created_datetime": 1611613284764, + "__createdtime__": 1611613284767, + "__updatedtime__": 1611613287207, + "start_datetime_converted": "2021-01-25T22:21:24.781Z", + "end_datetime_converted": "2021-01-25T22:21:27.204Z" + } +] +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/developers/operations-api/logs.md b/site/versioned_docs/version-4.3/developers/operations-api/logs.md new file mode 100644 index 00000000..a6c39c46 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/logs.md @@ -0,0 +1,755 @@ +--- +title: Logs +--- + +# Logs + +## Read HarperDB Log +Returns log outputs from the primary HarperDB log based on the provided search criteria. Read more about HarperDB logging here: https:/docs.harperdb.io/docs/logging#read-logs-via-the-api. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `read_Log` +* start _(optional)_ - result to start with. Default is 0, the first log in `hdb.log`. Must be a number +* limit _(optional)_ - number of results returned. Default behavior is 1000. Must be a number +* level _(optional)_ - error level to filter on. Default behavior is all levels. Must be `notify`, `error`, `warn`, `info`, `debug` or `trace` +* from _(optional)_ - date to begin showing log results. Must be `YYYY-MM-DD` or `YYYY-MM-DD hh:mm:ss`. Default is first log in `hdb.log` +* until _(optional)_ - date to end showing log results. Must be `YYYY-MM-DD` or `YYYY-MM-DD hh:mm:ss`. Default is last log in `hdb.log` +* order _(optional)_ - order to display logs desc or asc by timestamp. By default, will maintain `hdb.log` order +### Body + +```json +{ + "operation": "read_log", + "start": 0, + "limit": 1000, + "level": "error", + "from": "2021-01-25T22:05:27.464+0000", + "until": "2021-01-25T23:05:27.464+0000", + "order": "desc" +} +``` + +### Response: 200 +```json +[ + { + "level": "notify", + "message": "Connected to cluster server.", + "timestamp": "2021-01-25T23:03:20.710Z", + "thread": "main/0", + "tags": [] + }, + { + "level": "warn", + "message": "Login failed", + "timestamp": "2021-01-25T22:24:45.113Z", + "thread": "http/9", + "tags": [] + }, + { + "level": "error", + "message": "unknown attribute 'name and breed'", + "timestamp": "2021-01-25T22:23:24.167Z", + "thread": "http/9", + "tags": [] + } +] + +``` + + +--- + +## Read Transaction Log +Returns all transactions logged for the specified database table. You may filter your results with the optional from, to, and limit fields. Read more about HarperDB transaction logs here: https:/docs.harperdb.io/docs/transaction-logging#read_transaction_log. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `read_transaction_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* from _(optional)_ - time format must be millisecond-based epoch in UTC +* to _(optional)_ - time format must be millisecond-based epoch in UTC +* limit _(optional)_ - max number of logs you want to receive. Must be a number + +### Body + +```json +{ + "operation": "read_transaction_log", + "schema": "dev", + "table": "dog", + "from": 1560249020865, + "to": 1660585656639, + "limit": 10 +} +``` + +### Response: 200 +```json +[ + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619736, + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38, + "__updatedtime__": 1660165619688, + "__createdtime__": 1660165619688 + } + ] + }, + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619813, + "records": [ + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "breed_id": 346, + "age": 7, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 3, + "dog_name": "Alby", + "owner_name": "Kaylan", + "breed_id": 348, + "age": 7, + "weight_lbs": 84, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "breed_id": 347, + "age": 6, + "weight_lbs": 60, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "breed_id": 348, + "age": 8, + "weight_lbs": 15, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 6, + "dog_name": "Kato", + "owner_name": "Kyle", + "breed_id": 351, + "age": 6, + "weight_lbs": 32, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 7, + "dog_name": "Simon", + "owner_name": "Fred", + "breed_id": 349, + "age": 3, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "breed_id": 350, + "age": 5, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 9, + "dog_name": "Yeti", + "owner_name": "Jaxon", + "breed_id": 200, + "age": 5, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 10, + "dog_name": "Monkey", + "owner_name": "Aron", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 11, + "dog_name": "Bode", + "owner_name": "Margo", + "breed_id": 104, + "age": 8, + "weight_lbs": 75, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 12, + "dog_name": "Tucker", + "owner_name": "David", + "breed_id": 346, + "age": 2, + "weight_lbs": 60, + "adorable": true, + "__updatedtime__": 1660165619798, + "__createdtime__": 1660165619798 + }, + { + "id": 13, + "dog_name": "Jagger", + "owner_name": "Margo", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619798, + "__createdtime__": 1660165619798 + } + ] + }, + { + "operation": "update", + "user": "admin", + "timestamp": 1660165620040, + "records": [ + { + "id": 1, + "dog_name": "Penny B", + "__updatedtime__": 1660165620036 + } + ] + } +] +``` + +--- + +## Delete Transaction Logs Before +Deletes transaction log data for the specified database table that is older than the specified timestamp. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `delete_transaction_log_before` +* schema _(required)_ - schema under which the transaction log resides. Must be a string +* table _(required)_ - table under which the transaction log resides. Must be a string +* timestamp _(required)_ - records older than this date will be deleted. Format is millisecond-based epoch in UTC + +### Body +```json +{ + "operation": "delete_transaction_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1598290282817 +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 26a6d3a6-6d77-40f9-bee7-8d6ef479a126" +} +``` + +--- + +## Read Audit Log +AuditLog must be enabled in the HarperDB configuration file to make this request. Returns a verbose history of all transactions logged for the specified database table, including original data records. You may filter your results with the optional search_type and search_values fields. Read more about HarperDB transaction logs here: https:/docs.harperdb.io/docs/transaction-logging#read_audit_log. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search_type _(optional)_ - possibilities are `hash_value`, `timestamp` and `username` +* search_values _(optional)_ - an array of string or numbers relating to search_type + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog" +} +``` + +### Response: 200 +```json +[ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [ + 444 + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + } +] +``` + + +--- + +## Read Audit Log by timestamp +AuditLog must be enabled in the HarperDB configuration file to make this request. Returns the transactions logged for the specified database table between the specified time window. Read more about HarperDB transaction logs here: https:/docs.harperdb.io/docs/transaction-logging#read_audit_log. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search_type _(optional)_ - timestamp +* search_values _(optional)_ - an array containing a maximum of two values [`from_timestamp`, `to_timestamp`] defining the range of transactions you would like to view. + * Timestamp format is millisecond-based epoch in UTC + * If no items are supplied then all transactions are returned + * If only one entry is supplied then all transactions after the supplied timestamp will be returned + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "timestamp", + "search_values": [ + 1660585740558, + 1660585759710.56 + ] +} +``` + +### Response: 200 +```json +[ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [ + 444 + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } +] +``` + + +--- + +## Read Audit Log by username +AuditLog must be enabled in the HarperDB configuration file to make this request. Returns the transactions logged for the specified database table which were committed by the specified user. Read more about HarperDB transaction logs here: https:/docs.harperdb.io/docs/transaction-logging#read_audit_log. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search_type _(optional)_ - username +* search_values _(optional)_ - the HarperDB user for whom you would like to view transactions + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "username", + "search_values": [ + "admin" + ] +} +``` + +### Response: 200 +```json +{ + "admin": [ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [ + 444 + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } + ] +} +``` + + +--- + +## Read Audit Log by hash_value +AuditLog must be enabled in the HarperDB configuration file to make this request. Returns the transactions logged for the specified database table which were committed to the specified hash value(s). Read more about HarperDB transaction logs here: https:/docs.harperdb.io/docs/transaction-logging#read_audit_log. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search_type _(optional)_ - hash_value +* search_values _(optional)_ - an array of hash_attributes for which you wish to see transaction logs + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "hash_value", + "search_values": [ + 318 + ] +} +``` + +### Response: 200 +```json +{ + "318": [ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } + ] +} +``` + +--- + +## Delete Audit Logs Before +AuditLog must be enabled in the HarperDB configuration file to make this request. Deletes audit log data for the specified database table that is older than the specified timestamp. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `delete_audit_logs_before` +* schema _(required)_ - schema under which the transaction log resides. Must be a string +* table _(required)_ - table under which the transaction log resides. Must be a string +* timestamp _(required)_ - records older than this date will be deleted. Format is millisecond-based epoch in UTC + +### Body +```json +{ + "operation": "delete_audit_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1660585759710.56 +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 7479e5f8-a86e-4fc9-add7-749493bc100f" +} +``` + + diff --git a/site/versioned_docs/version-4.3/developers/operations-api/nosql-operations.md b/site/versioned_docs/version-4.3/developers/operations-api/nosql-operations.md new file mode 100644 index 00000000..47db9d1e --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/nosql-operations.md @@ -0,0 +1,413 @@ +--- +title: NoSQL Operations +--- + +# NoSQL Operations + +## Insert + +Adds one or more rows of data to a database table. Primary keys of the inserted JSON record may be supplied on insert. If a primary key is not provided, then a GUID will be generated for each record. + +* operation _(required)_ - must always be `insert` +* database _(optional)_ - database where the table you are inserting records into lives. The default is `data` +* table _(required)_ - table where you want to insert records +* records _(required)_ - array of one or more records for insert + +### Body + +```json +{ + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 8, + "dog_name": "Harper", + "breed_id": 346, + "age": 7 + }, + { + "id": 9, + "dog_name": "Penny", + "breed_id": 154, + "age": 7 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 2 of 2 records", + "inserted_hashes": [ + 8, + 9 + ], + "skipped_hashes": [] +} +``` + +--- + +## Update + +Changes the values of specified attributes in one or more rows in a database table as identified by the primary key. NOTE: Primary key of the updated JSON record(s) MUST be supplied on update. + +* operation _(required)_ - must always be `update` +* database _(optional)_ - database of the table you are updating records in. The default is `data` +* table _(required)_ - table where you want to update records +* records _(required)_ - array of one or more records for update + +### Body + +```json +{ + "operation": "update", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 1, + "weight_lbs": 55 + }, + { + "id": 2, + "owner": "Kyle B", + "weight_lbs": 35 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "updated 2 of 2 records", + "update_hashes": [ + 1, + 3 + ], + "skipped_hashes": [] +} +``` + +--- + +## Upsert + +Changes the values of specified attributes for rows with matching primary keys that exist in the table. Adds rows to the database table for primary keys that do not exist or are not provided. + +* operation _(required)_ - must always be `upsert` +* database _(optional)_ - database of the table you are updating records in. The default is `data` +* table _(required)_ - table where you want to update records +* records _(required)_ - array of one or more records for update + +### Body + +```json +{ + "operation": "upsert", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 8, + "weight_lbs": 155 + }, + { + "name": "Bill", + "breed": "Pit Bull", + "id": 10, + "Age": 11, + "weight_lbs": 155 + }, + { + "name": "Harper", + "breed": "Mutt", + "age": 5, + "weight_lbs": 155 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "upserted 3 of 3 records", + "upserted_hashes": [ + 8, + 10, + "ea06fc8e-717b-4c6c-b69d-b29014054ab7" + ] +} +``` + +--- + +## Delete + +Removes one or more rows of data from a specified table. + +* operation _(required)_ - must always be `delete` +* database _(optional)_ - database where the table you are deleting records lives. The default is `data` +* table _(required)_ - table where you want to deleting records +* ids _(required)_ - array of one or more primary key values, which identifies records to delete + +### Body + +```json +{ + "operation": "delete", + "database": "dev", + "table": "dog", + "ids": [ + 1, + 2 + ] +} +``` + +### Response: 200 + +```json +{ + "message": "2 of 2 records successfully deleted", + "deleted_hashes": [ + 1, + 2 + ], + "skipped_hashes": [] +} +``` + +--- + +## Search By ID + +Returns data from a table for one or more primary keys. + +* operation _(required)_ - must always be `search_by_id` +* database _(optional)_ - database where the table you are searching lives. The default is `data` +* table _(required)_ - table you wish to search +* ids _(required)_ - array of primary keys to retrieve +* get_attributes _(required)_ - define which attributes you want returned. _Use `['*']` to return all attributes_ + +### Body + +```json +{ + "operation": "search_by_id", + "database": "dev", + "table": "dog", + "ids": [ + 1, + 2 + ], + "get_attributes": [ + "dog_name", + "breed_id" + ] +} +``` + +### Response: 200 + +```json +[ + { + "dog_name": "Penny", + "breed_id": 154 + }, + { + "dog_name": "Harper", + "breed_id": 346 + } +] +``` + +--- + +## Search By Value + +Returns data from a table for a matching value. + +* operation _(required)_ - must always be `search_by_value` +* database _(optional)_ - database where the table you are searching lives. The default is `data` +* table _(required)_ - table you wish to search +* search_attribute _(required)_ - attribute you wish to search can be any attribute +* search_value _(required)_ - value you wish to search - wild cards are allowed +* get_attributes _(required)_ - define which attributes you want returned. Use `['*']` to return all attributes + +### Body + +```json +{ + "operation": "search_by_value", + "database": "dev", + "table": "dog", + "search_attribute": "owner_name", + "search_value": "Ky*", + "get_attributes": [ + "id", + "dog_name" + ] +} +``` + +### Response: 200 + +```json +[ + { + "dog_name": "Penny" + }, + { + "dog_name": "Kato" + } +] +``` + +--- + +## Search By Conditions + +Returns data from a table for one or more matching conditions. This supports grouping of conditions to indicate order of operations as well. + +* operation _(required)_ - must always be `search_by_conditions` +* database _(optional)_ - database where the table you are searching lives. The default is `data` +* table _(required)_ - table you wish to search +* operator _(optional)_ - the operator used between each condition - `and`, `or`. The default is `and` +* offset _(optional)_ - the number of records that the query results will skip. The default is `0` +* limit _(optional)_ - the number of records that the query results will include. The default is `null`, resulting in no limit +* sort _optional_ - This is an object that indicates the sort order. It has the following properties: + * attribute _(required)_ - The attribute to sort by + * descending _(optional)_ - If true, will sort in descending order (defaults to ascending order) + * next _(optional)_ - This can define the next sort object that will be used to break ties for sorting when there are multiple records with the same value for the first attribute (follows the same structure as `sort`, and can recursive additional attributes). +* get_attributes _(required)_ - define which attributes you want returned. Use `['*']` to return all attributes +* conditions _(required)_ - the array of conditions objects, specified below, to filter by. Must include one or more object in the array that are a condition or a grouped set of conditions. A condition has the following properties: + * search_attribute _(required)_ - the attribute you wish to search, can be any attribute + * search_type _(required)_ - the type of search to perform - `equals`, `contains`, `starts_with`, `ends_with`, `greater_than`, `greater_than_equal`, `less_than`, `less_than_equal`, `between` + * search_value _(required)_ - case-sensitive value you wish to search. If the `search_type` is `between` then use an array of two values to search between + Or a set of grouped conditions has the following properties: + * operator _(optional)_ - the operator used between each condition - `and`, `or`. The default is `and` + * conditions _(required)_ - the array of conditions objects as described above. +### Body + +```json +{ + "operation": "search_by_conditions", + "database": "dev", + "table": "dog", + "operator": "and", + "offset": 0, + "limit": 10, + "sort": { + "attribute": "id", + "next": { + "dog_name": "age", + "descending": true + } + }, + "get_attributes": [ + "*" + ], + "conditions": [ + { + "search_attribute": "age", + "search_type": "between", + "search_value": [ + 5, + 8 + ] + }, + { + "search_attribute": "weight_lbs", + "search_type": "greater_than", + "search_value": 40 + }, + { + "operator": "or", + "conditions": [ + { + "search_attribute": "adorable", + "search_type": "equals", + "search_value": true + }, + { + "search_attribute": "lovable", + "search_type": "equals", + "search_value": true + } + ] + } + ] +} +``` + +### Response: 200 + +```json +[ + { + "__createdtime__": 1620227719791, + "__updatedtime__": 1620227719791, + "adorable": true, + "age": 7, + "breed_id": 346, + "dog_name": "Harper", + "id": 2, + "owner_name": "Stephen", + "weight_lbs": 55 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 7, + "breed_id": 348, + "dog_name": "Alby", + "id": 3, + "owner_name": "Kaylan", + "weight_lbs": 84 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 6, + "breed_id": 347, + "dog_name": "Billy", + "id": 4, + "owner_name": "Zach", + "weight_lbs": 60 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 5, + "breed_id": 250, + "dog_name": "Gemma", + "id": 8, + "owner_name": "Stephen", + "weight_lbs": 55 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 8, + "breed_id": 104, + "dog_name": "Bode", + "id": 11, + "owner_name": "Margo", + "weight_lbs": 75 + } +] +``` diff --git a/site/versioned_docs/version-4.3/developers/operations-api/quickstart-examples.md b/site/versioned_docs/version-4.3/developers/operations-api/quickstart-examples.md new file mode 100644 index 00000000..e1ef734a --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/quickstart-examples.md @@ -0,0 +1,387 @@ +--- +title: Quick Start Examples +--- + +# Quick Start Examples + +HarperDB recommends utilizing [HarperDB Applications](../../developers/applications/) for defining databases, tables, and other functionality. However, this guide is a great way to get started using on the HarperDB Operations API. + +## Create dog Table + +We first need to create a table. Since our company is named after our CEO's dog, lets create a table to store all our employees' dogs. We'll call this table, `dogs`. + +Tables in HarperDB are schema-less, so we don't need to add any attributes other than a primary_key (in pre 4.2 versions this was referred to as the hash_attribute) to create this table. + +HarperDB does offer a `database` parameter that can be used to hold logical groupings of tables. The parameter is optional and if not provided the operation will default to using a database named `data`. + +If you receive an error response, make sure your Basic Authentication user and password match those you entered during the installation process. + +### Body + +```json +{ + "operation": "create_table", + "table": "dog", + "primary_key": "id" +} +``` + +### Response: 200 + +```json +{ + "message": "table 'data.dog' successfully created." +} +``` + +--- + +## Create breed Table +Now that we have a table to store our dog data, we also want to create a table to track known breeds. Just as with the dog table, the only attribute we need to specify is the `primary_key`. + +### Body + +```json +{ + "operation": "create_table", + "table": "breed", + "primary_key": "id" +} +``` + +### Response: 200 + +```json +{ + "message": "table 'data.breed' successfully created." +} +``` + +--- + +## Insert 1 Dog + +We're ready to add some dog data. Penny is our CTO's pup, so she gets ID 1 or we're all fired. We are specifying attributes in this call, but this doesn't prevent us from specifying additional attributes in subsequent calls. + +### Body + +```json +{ + "operation": "insert", + "table": "dog", + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 1 of 1 records", + "inserted_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` + +--- + +## Insert Multiple Dogs + +Let's add some more Harper doggies! We can add as many dog objects as we want into the records collection. If you're adding a lot of objects, we would recommend using the .csv upload option (see the next section where we populate the breed table). + +### Body + +```json +{ + "operation": "insert", + "table": "dog", + "records": [ + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "breed_id": 346, + "age": 7, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 3, + "dog_name": "Alby", + "owner_name": "Kaylan", + "breed_id": 348, + "age": 7, + "weight_lbs": 84, + "adorable": true + }, + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "breed_id": 347, + "age": 6, + "weight_lbs": 60, + "adorable": true + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "breed_id": 348, + "age": 8, + "weight_lbs": 15, + "adorable": true + }, + { + "id": 6, + "dog_name": "Kato", + "owner_name": "Kyle", + "breed_id": 351, + "age": 6, + "weight_lbs": 32, + "adorable": true + }, + { + "id": 7, + "dog_name": "Simon", + "owner_name": "Fred", + "breed_id": 349, + "age": 3, + "weight_lbs": 35, + "adorable": true + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "breed_id": 350, + "age": 5, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 9, + "dog_name": "Yeti", + "owner_name": "Jaxon", + "breed_id": 200, + "age": 5, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 10, + "dog_name": "Monkey", + "owner_name": "Aron", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true + }, + { + "id": 11, + "dog_name": "Bode", + "owner_name": "Margo", + "breed_id": 104, + "age": 8, + "weight_lbs": 75, + "adorable": true + }, + { + "id": 12, + "dog_name": "Tucker", + "owner_name": "David", + "breed_id": 346, + "age": 2, + "weight_lbs": 60, + "adorable": true + }, + { + "id": 13, + "dog_name": "Jagger", + "owner_name": "Margo", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 12 of 12 records", + "inserted_hashes": [ + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13 + ], + "skipped_hashes": [] +} +``` + +--- + +## Bulk Insert Breeds Via CSV + +We need to populate the 'breed' table with some data so we can reference it later. For larger data sets, we recommend using our CSV upload option. + +Each header in a column will be considered as an attribute, and each row in the file will be a row in the table. Simply specify the file path and the table to upload to, and HarperDB will take care of the rest. You can pull the breeds.csv file from here: https:/s3.amazonaws.com/complimentarydata/breeds.csv + +### Body + +```json +{ + "operation": "csv_url_load", + "table": "breed", + "csv_url": "https:/s3.amazonaws.com/complimentarydata/breeds.csv" +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id e77d63b9-70d5-499c-960f-6736718a4369", + "job_id": "e77d63b9-70d5-499c-960f-6736718a4369" +} +``` + +--- + +## Update 1 Dog Using NoSQL + +HarperDB supports NoSQL and SQL commands. We're going to update the dog table to show Penny's last initial using our NoSQL API. + +### Body + +```json +{ + "operation": "update", + "table": "dog", + "records": [ + { + "id": 1, + "dog_name": "Penny B" + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "updated 1 of 1 records", + "update_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` + +--- + +## Select a Dog by ID Using SQL + +Now we're going to use a simple SQL SELECT call to pull Penny's updated data. Note we now see Penny's last initial in the dog name. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT * FROM data.dog where id = 1" +} +``` + +### Response: 200 + +```json +[ + { + "owner_name": "Kyle", + "adorable": null, + "breed_id": 154, + "__updatedtime__": 1610749428575, + "dog_name": "Penny B", + "weight_lbs": 38, + "id": 1, + "age": 7, + "__createdtime__": 1610749386566 + } +] +``` + +--- + +## Select Dogs and Join Breed + +Here's a more complex SQL command joining the breed table with the dog table. We will also pull only the pups belonging to Kyle, Zach, and Stephen. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT d.id, d.dog_name, d.owner_name, b.name, b.section FROM data.dog AS d INNER JOIN data.breed AS b ON d.breed_id = b.id WHERE d.owner_name IN ('Kyle', 'Zach', 'Stephen') AND b.section = 'Mutt' ORDER BY d.dog_name" +} +``` + +### Response: 200 + +```json +[ + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "name": "LABRADOR / GREAT DANE MIX", + "section": "Mutt" + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "name": "SHORT HAIRED SETTER MIX", + "section": "Mutt" + }, + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "name": "HUSKY MIX", + "section": "Mutt" + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "name": "TERRIER MIX", + "section": "Mutt" + } +] + +``` diff --git a/site/versioned_docs/version-4.3/developers/operations-api/registration.md b/site/versioned_docs/version-4.3/developers/operations-api/registration.md new file mode 100644 index 00000000..53d953af --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/registration.md @@ -0,0 +1,67 @@ +--- +title: Registration +--- + +# Registration + + +## Registration Info +Returns the registration data of the HarperDB instance. + +* operation _(required)_ - must always be `registration_info` + +### Body +```json +{ + "operation": "registration_info" +} +``` + +### Response: 200 +```json +{ + "registered": true, + "version": "4.2.0", + "ram_allocation": 2048, + "license_expiration_date": "2022-01-15" +} +``` + +--- + +## Get Fingerprint +Returns the HarperDB fingerprint, uniquely generated based on the machine, for licensing purposes. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_fingerprint` + +### Body + +```json +{ + "operation": "get_fingerprint" +} +``` + +--- + +## Set License +Sets the HarperDB license as generated by HarperDB License Management software. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_license` +* key _(required)_ - your license key +* company _(required)_ - the company that was used in the license + +### Body + +```json +{ + "operation": "set_license", + "key": "", + "company": "" +} +``` + diff --git a/site/versioned_docs/version-4.3/developers/operations-api/sql-operations.md b/site/versioned_docs/version-4.3/developers/operations-api/sql-operations.md new file mode 100644 index 00000000..5958805e --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/sql-operations.md @@ -0,0 +1,122 @@ +--- +title: SQL Operations +--- + +:::warning +HarperDB encourages developers to utilize other querying tools over SQL for performance purposes. HarperDB SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL Operations + +## Select +Executes the provided SQL statement. The SELECT statement is used to query data from the database. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT * FROM dev.dog WHERE id = 1" +} +``` + +### Response: 200 +```json +[ + { + "id": 1, + "age": 7, + "dog_name": "Penny", + "weight_lbs": 38, + "breed_id": 154, + "owner_name": "Kyle", + "adorable": true, + "__createdtime__": 1611614106043, + "__updatedtime__": 1611614119507 + } +] +``` + +--- + +## Insert +Executes the provided SQL statement. The INSERT statement is used to add one or more rows to a database table. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body + +```json +{ + "operation": "sql", + "sql": "INSERT INTO dev.dog (id, dog_name) VALUE (22, 'Simon')" +} +``` + +### Response: 200 +```json +{ + "message": "inserted 1 of 1 records", + "inserted_hashes": [ + 22 + ], + "skipped_hashes": [] +} +``` +--- + +## Update +Executes the provided SQL statement. The UPDATE statement is used to change the values of specified attributes in one or more rows in a database table. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body +```json +{ + "operation": "sql", + "sql": "UPDATE dev.dog SET dog_name = 'penelope' WHERE id = 1" +} +``` + +### Response: 200 +```json +{ + "message": "updated 1 of 1 records", + "update_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` + +--- + +## Delete +Executes the provided SQL statement. The DELETE statement is used to remove one or more rows of data from a database table. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body +```json +{ + "operation": "sql", + "sql": "DELETE FROM dev.dog WHERE id = 1" +} +``` + +### Response: 200 +```json +{ + "message": "1 of 1 record successfully deleted", + "deleted_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` diff --git a/site/versioned_docs/version-4.3/developers/operations-api/token-authentication.md b/site/versioned_docs/version-4.3/developers/operations-api/token-authentication.md new file mode 100644 index 00000000..161c69b5 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/token-authentication.md @@ -0,0 +1,54 @@ +--- +title: Token Authentication +--- + +# Token Authentication + +## Create Authentication Tokens +Creates the tokens needed for authentication: operation & refresh token. + +_Note - this operation does not require authorization to be set_ + +* operation _(required)_ - must always be `create_authentication_tokens` +* username _(required)_ - username of user to generate tokens for +* password _(required)_ - password of user to generate tokens for + +### Body +```json +{ + "operation": "create_authentication_tokens", + "username": "", + "password": "" +} +``` + +### Response: 200 +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6IkhEQl9BRE1JTiIsImlhdCI6MTYwNTA2Mzk0OSwiZXhwIjoxNjA1MTUwMzQ5LCJzdWIiOiJvcGVyYXRpb24ifQ.TlV93BqavQVQntXTt_WeY5IjAuCshfd6RzhihLWFWhu1qEKLHdwg9o5Z4ASaNmfuyKBqbFw65IbOYKd348EXeC_T6d0GO3yUhICYWXkqhQnxVW_T-ECKc7m5Bty9HTgfeaJ2e2yW55nbZYWG_gLtNgObUjCziX20-gGGR25sNTRm78mLQPYQkBJph6WXwAuyQrX704h0NfvNqyAZSwjxgtjuuEftTJ7FutLrQSLGIBIYq9nsHrFkheiDSn-C8_WKJ_zATa4YIofjqn9g5wA6o_7kSNaU2-gWnCm_jbcAcfvOmXh6rd89z8pwPqnC0f131qHIBps9UHaC1oozzmu_C6bsg7905OoAdFFY42Vojs98SMbfRApRvwaS4SprBsam3izODNI64ZUBREu3l4SZDalUf2kN8XPVWkI1LKq_mZsdtqr1r11Z9xslI1wVdxjunYeanjBhs7_j2HTX7ieVGn1a23cWceUk8F1HDGe_KEuPQs03R73V8acq_freh-kPhIa4eLqmcHeBw3WcyNGW8GuP8kyQRkGuO5sQSzZqbr_YSbZdSShZWTWDE6RYYC9ZV9KJtHVxhs0hexUpcoqO8OtJocyltRjtDjhSm9oUxszYRaALu-h8YadZT9dEKzsyQIt30d7LS9ETmmGWx4nKSTME2bV21PnDv_rEc5R6gnE", + "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6IkhEQl9BRE1JTiIsImlhdCI6MTYwNTA2Mzk0OSwiZXhwIjoxNjA3NjU1OTQ5LCJzdWIiOiJyZWZyZXNoIn0.znhJhkdSROBPP_GLRzAxYdjgQ3BuqpAbQB7zMSSOQJ3s83HnmZ10Bnpw_3L2aF-tOFgz_t6HUAvn26fNOLsspJD2aOvHPcVS4yLKS5nagpA6ar_pqng9f6Ebfs8ohguLCfHnHRJ8poLxuWRvWW9_9pIlDiwsj4yo3Mbxi3mW8Bbtnk2MwiNHFxTksD12Ne8EWz8q2jic5MjArqBBgR373oYoWU1oxpTM6gIsZCBRowXcc9XFy2vyRoggEUU4ISRFQ4ZY9ayJ-_jleSDCUamJSNQsdb1OUTvc6CxeYlLjCoV0ijRUB6p2XWNVezFhDu8yGqOeyGFJzArhxbVc_pl4UYd5aUVxhrO9DdhG29cY_mHV0FqfXphR9QllK--LJFTP4aFqkCxnVr7HSa17hL0ZVK1HaKrx21PAdCkVNZpD6J3RtRbTkfnIB_C3Be9jhOV3vpTf7ZGn_Bs3CPJi_sL313Z1yKSDAS5rXTPceEOcTPHjzkMP9Wz19KfFq_0kuiZdDmeYNqJeFPAgGJ-S0tO51krzyGqLyCCA32_W104GR8OoQi2gEED6HIx2G0-1rnLnefN6eHQiY5r-Q3Oj9e2y3EvqqgWOmEDw88-SjPTwQVnMbBHYN2RfluU7EmvDh6Saoe79Lhlu8ZeSJ1x6ZgA8-Cirraz1_526Tn8v5FGDfrc" +} +``` + +--- + +## Refresh Operation Token +This operation creates a new operation token. + +* operation _(required)_ - must always be `refresh_operation_token` +* refresh_token _(required)_ - the refresh token that was provided when tokens were created + +### Body +```json +{ + "operation": "refresh_operation_token", + "refresh_token": "EXISTING_REFRESH_TOKEN" +} +``` + +### Response: 200 +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6eyJfX2NyZWF0ZWR0aW1lX18iOjE2MDQ1MTc4Nzk1MjMsIl9fdXBkYXRlZHRpbWVfXyI6MTYwNDUxNzg3OTUyMywiYWN0aXZlIjp0cnVlLCJhdXRoX3Rva2VuIjpudWxsLCJyb2xlIjp7Il9fY3JlYXRlZHRpbWVfXyI6MTYwNDUxNzg3OTUyMSwiX191cGRhdGVkdGltZV9fIjoxNjA0NTE3ODc5NTIxLCJpZCI6IjZhYmRjNGJhLWU5MjQtNDlhNi1iOGY0LWM1NWUxYmQ0OTYzZCIsInBlcm1pc3Npb24iOnsic3VwZXJfdXNlciI6dHJ1ZSwic3lzdGVtIjp7InRhYmxlcyI6eyJoZGJfdGFibGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9hdHRyaWJ1dGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9zY2hlbWEiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl91c2VyIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfcm9sZSI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2pvYiI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2xpY2Vuc2UiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9pbmZvIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfbm9kZXMiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl90ZW1wIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119fX19LCJyb2xlIjoic3VwZXJfdXNlciJ9LCJ1c2VybmFtZSI6IkhEQl9BRE1JTiJ9LCJpYXQiOjE2MDUwNjQ0MjMsImV4cCI6MTYwNTE1MDgyMywic3ViIjoib3BlcmF0aW9uIn0.VVZdhlh7_xFEaGPwhAh6VJ1d7eisiF3ok3ZwLTQAMWZB6umb2S7pPSTbXAmqAGHRlFAK3BYfnwT3YWt0gZbHvk24_0x3s_dej3PYJ8khIxzMjqpkR6qSjQIC2dhKqpwRPNtoqW_xnep9L-qf5iPtqkwsqWhF1c5VSN8nFouLWMZSuJ6Mag04soNhFvY0AF6QiTyzajMTb6uurRMWOnxk8hwMrY_5xtupabqtZheXP_0DV8l10B7GFi_oWf_lDLmwRmNbeUfW8ZyCIJMj36bjN3PsfVIxog87SWKKCwbWZWfJWw0KEph-HvU0ay35deyGWPIaDQmujuh2vtz-B0GoIAC58PJdXNyQRzES_nSb6Oqc_wGZsLM6EsNn_lrIp3mK_3a5jirZ8s6Z2SfcYKaLF2hCevdm05gRjFJ6ijxZrUSOR2S415wLxmqCCWCp_-sEUz8erUrf07_aj-Bv99GUub4b_znOsQF3uABKd4KKff2cNSMhAa-6sro5GDRRJg376dcLi2_9HOZbnSo90zrpVq8RNV900aydyzDdlXkZja8jdHBk4mxSSewYBvM7up6I0G4X-ZlzFOp30T7kjdLa6480Qp34iYRMMtq0Htpb5k2jPt8dNFnzW-Q2eRy1wNBbH3cCH0rd7_BIGuTCrl4hGU8QjlBiF7Gj0_-uJYhKnhg" +} +``` diff --git a/site/versioned_docs/version-4.3/developers/operations-api/users-and-roles.md b/site/versioned_docs/version-4.3/developers/operations-api/users-and-roles.md new file mode 100644 index 00000000..f5f35d56 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/users-and-roles.md @@ -0,0 +1,484 @@ +--- +title: Users and Roles +--- + +# Users and Roles + +## List Roles +Returns a list of all roles. [Learn more about HarperDB roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `list_roles` + +### Body +```json +{ + "operation": "list_roles" +} +``` + +### Response: 200 +```json +[ + { + "__createdtime__": 1611615061106, + "__updatedtime__": 1611615061106, + "id": "05c2ffcd-f780-40b1-9432-cfe8ba5ad890", + "permission": { + "super_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + }, + "role": "developer" + }, + { + "__createdtime__": 1610749235614, + "__updatedtime__": 1610749235614, + "id": "136f03fa-a0e9-46c3-bd5d-7f3e7dd5b564", + "permission": { + "cluster_user": true + }, + "role": "cluster_user" + }, + { + "__createdtime__": 1610749235609, + "__updatedtime__": 1610749235609, + "id": "745b3138-a7cf-455a-8256-ac03722eef12", + "permission": { + "super_user": true + }, + "role": "super_user" + } +] +``` + +--- + +## Add Role +Creates a new role with the specified permissions. [Learn more about HarperDB roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_role` +* role _(required)_ - name of role you are defining +* permission _(required)_ - object defining permissions for users associated with this role: + * super_user _(optional)_ - boolean which, if set to true, gives users associated with this role full access to all operations and methods. If not included, value will be assumed to be false. + * structure_user (optional) - boolean OR array of database names (as strings). If boolean, user can create new databases and tables. If array of strings, users can only manage tables within the specified databases. This overrides any individual table permissions for specified databases, or for all databases if the value is true. + +### Body +```json +{ + "operation": "add_role", + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + } +} +``` + +### Response: 200 +```json +{ + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + }, + "id": "0a9368b0-bd81-482f-9f5a-8722e3582f96", + "__updatedtime__": 1598549532897, + "__createdtime__": 1598549532897 +} +``` + +--- + +## Alter Role +Modifies an existing role with the specified permissions. updates permissions from an existing role. [Learn more about HarperDB roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `alter_role` +* id _(required)_ - the id value for the role you are altering +* role _(optional)_ - name value to update on the role you are altering +* permission _(required)_ - object defining permissions for users associated with this role: + * super_user _(optional)_ - boolean which, if set to true, gives users associated with this role full access to all operations and methods. If not included, value will be assumed to be false. + * structure_user (optional) - boolean OR array of database names (as strings). If boolean, user can create new databases and tables. If array of strings, users can only manage tables within the specified databases. This overrides any individual table permissions for specified databases, or for all databases if the value is true. + +### Body + +```json +{ + "operation": "alter_role", + "id": "f92162e2-cd17-450c-aae0-372a76859038", + "role": "another_developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": false, + "insert": true, + "update": true + } + ] + } + } + } + } +} +``` + +### Response: 200 +```json +{ + "id": "a7cb91e9-32e4-4dbf-a327-fab4fa9191ea", + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": false, + "insert": true, + "update": true + } + ] + } + } + } + }, + "__updatedtime__": 1598549996106 +} +``` + +--- + +## Drop Role +Deletes an existing role from the database. NOTE: Role with associated users cannot be dropped. [Learn more about HarperDB roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this must always be `drop_role` +* id _(required)_ - this is the id of the role you are dropping + +### Body +```json +{ + "operation": "drop_role", + "id": "developer" +} +``` + +### Response: 200 +```json +{ + "message": "developer successfully deleted" +} +``` + +--- + +## List Users +Returns a list of all users. [Learn more about HarperDB roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `list_users` + +### Body +```json +{ + "operation": "list_users" +} +``` + +### Response: 200 +```json +[ + { + "__createdtime__": 1635520961165, + "__updatedtime__": 1635520961165, + "active": true, + "role": { + "__createdtime__": 1635520961161, + "__updatedtime__": 1635520961161, + "id": "7c78ef13-c1f3-4063-8ea3-725127a78279", + "permission": { + "super_user": true, + "system": { + "tables": { + "hdb_table": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_attribute": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_schema": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_user": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_role": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_job": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_license": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_info": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_nodes": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_temp": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + } + } + } + }, + "role": "super_user" + }, + "username": "HDB_ADMIN" + } +] +``` + +--- + +## User Info +Returns user data for the associated user credentials. + +* operation _(required)_ - must always be `user_info` + +### Body +```json +{ + "operation": "user_info" +} +``` + +### Response: 200 +```json +{ + "__createdtime__": 1610749235611, + "__updatedtime__": 1610749235611, + "active": true, + "role": { + "__createdtime__": 1610749235609, + "__updatedtime__": 1610749235609, + "id": "745b3138-a7cf-455a-8256-ac03722eef12", + "permission": { + "super_user": true + }, + "role": "super_user" + }, + "username": "HDB_ADMIN" +} +``` + +--- + +## Add User +Creates a new user with the specified role and credentials. [Learn more about HarperDB roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_user` +* role _(required)_ - 'role' name value of the role you wish to assign to the user. See `add_role` for more detail +* username _(required)_ - username assigned to the user. It can not be altered after adding the user. It serves as the hash +* password _(required)_ - clear text for password. HarperDB will encrypt the password upon receipt +* active _(required)_ - boolean value for status of user's access to your HarperDB instance. If set to false, user will not be able to access your instance of HarperDB. + +### Body +```json +{ + "operation": "add_user", + "role": "role_name", + "username": "hdb_user", + "password": "password", + "active": true +} +``` + +### Response: 200 +```json +{ + "message": "hdb_user successfully added" +} +``` + +--- + +## Alter User +Modifies an existing user's role and/or credentials. [Learn more about HarperDB roles here.](../security/users-and-roles) + +_Operation is restricted to super\_user roles only_ + + * operation _(required)_ - must always be `alter_user` + * username _(required)_ - username assigned to the user. It can not be altered after adding the user. It serves as the hash. + * password _(optional)_ - clear text for password. HarperDB will encrypt the password upon receipt + * role _(optional)_ - `role` name value of the role you wish to assign to the user. See `add_role` for more detail + * active _(optional)_ - status of user's access to your HarperDB instance. See `add_role` for more detail + +### Body +```json +{ + "operation": "alter_user", + "role": "role_name", + "username": "hdb_user", + "password": "password", + "active": true +} +``` + +### Response: 200 +```json +{ + "message": "updated 1 of 1 records", + "new_attributes": [], + "txn_time": 1611615114397.988, + "update_hashes": [ + "hdb_user" + ], + "skipped_hashes": [] +} +``` + +--- + +## Drop User +Deletes an existing user by username. [Learn more about HarperDB roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `drop_user` +* username _(required)_ - username assigned to the user + +### Body +```json +{ + "operation": "drop_user", + "username": "sgoldberg" +} +``` + +### Response: 200 +```json +{ + "message": "sgoldberg successfully deleted" +} +``` diff --git a/site/versioned_docs/version-4.3/developers/operations-api/utilities.md b/site/versioned_docs/version-4.3/developers/operations-api/utilities.md new file mode 100644 index 00000000..7ba696ae --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/operations-api/utilities.md @@ -0,0 +1,359 @@ +--- +title: Utilities +--- + +# Utilities + +## Restart +Restarts the HarperDB instance. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `restart` + +### Body +```json +{ + "operation": "restart" +} +``` + +### Response: 200 +```json +{ + "message": "Restarting HarperDB. This may take up to 60 seconds." +} +``` +--- + +## Restart Service +Restarts servers for the specified HarperDB service. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `restart_service` +* service _(required)_ - must be one of: `http_workers`, `clustering_config` or `clustering` + +### Body +```json +{ + "operation": "restart_service", + "service": "http_workers" +} +``` + +### Response: 200 +```json +{ + "message": "Restarting http_workers" +} +``` + +--- +## System Information +Returns detailed metrics on the host system. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `system_information` +* attributes _(optional)_ - string array of top level attributes desired in the response, if no value is supplied all attributes will be returned. Available attributes are: ['system', 'time', 'cpu', 'memory', 'disk', 'network', 'harperdb_processes', 'table_size', 'replication'] + +### Body +```json +{ + "operation": "system_information" +} +``` + +--- + +## Delete Records Before + +Delete data before the specified timestamp on the specified database table exclusively on the node where it is executed. Any clustered nodes with replicated data will retain that data. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `delete_records_before` +* date _(required)_ - records older than this date will be deleted. Supported format looks like: `YYYY-MM-DDThh:mm:ss.sZ` +* schema _(required)_ - name of the schema where you are deleting your data +* table _(required)_ - name of the table where you are deleting your data + +### Body +```json +{ + "operation": "delete_records_before", + "date": "2021-01-25T23:05:27.464", + "schema": "dev", + "table": "breed" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id d3aed926-e9fe-4ec1-aea7-0fb4451bd373", + "job_id": "d3aed926-e9fe-4ec1-aea7-0fb4451bd373" +} +``` + +--- + +## Export Local +Exports data based on a given search operation to a local file in JSON or CSV format. + +* operation _(required)_ - must always be `export_local` +* format _(required)_ - the format you wish to export the data, options are `json` & `csv` +* path _(required)_ - path local to the server to export the data +* search_operation _(required)_ - search_operation of `search_by_hash`, `search_by_value`, `search_by_conditions` or `sql` +* filename _(optional)_ - the name of the file where your export will be written to (do not include extension in filename). If one is not provided it will be autogenerated based on the epoch. + +### Body +```json +{ + "operation": "export_local", + "format": "json", + "path": "/data/", + "search_operation": { + "operation": "sql", + "sql": "SELECT * FROM dev.breed" + } +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 6fc18eaa-3504-4374-815c-44840a12e7e5" +} +``` + +--- + +## Export To S3 +Exports data based on a given search operation from table to AWS S3 in JSON or CSV format. + +* operation _(required)_ - must always be `export_to_s3` +* format _(required)_ - the format you wish to export the data, options are `json` & `csv` +* s3 _(required)_ - details your access keys, bucket, bucket region and key for saving the data to S3 +* search_operation _(required)_ - search_operation of `search_by_hash`, `search_by_value`, `search_by_conditions` or `sql` + +### Body +```json +{ + "operation": "export_to_s3", + "format": "json", + "s3": { + "aws_access_key_id": "YOUR_KEY", + "aws_secret_access_key": "YOUR_SECRET_KEY", + "bucket": "BUCKET_NAME", + "key": "OBJECT_NAME", + "region": "BUCKET_REGION" + }, + "search_operation": { + "operation": "sql", + "sql": "SELECT * FROM dev.dog" + } +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 9fa85968-4cb1-4008-976e-506c4b13fc4a", + "job_id": "9fa85968-4cb1-4008-976e-506c4b13fc4a" +} +``` + +--- + +## Install Node Modules +Executes npm install against specified custom function projects. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `install_node_modules` +* projects _(required)_ - must ba an array of custom functions projects. +* dry_run _(optional)_ - refers to the npm --dry-run flag: [https:/docs.npmjs.com/cli/v8/commands/npm-install#dry-run](https:/docs.npmjs.com/cli/v8/commands/npm-install#dry-run). Defaults to false. + +### Body +```json +{ + "operation": "install_node_modules", + "projects": [ + "dogs", + "cats" + ], + "dry_run": true +} +``` + +--- + +## Set Configuration + +Modifies the HarperDB configuration file parameters. Must follow with a restart or restart_service operation. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_configuration` +* logging_level _(example/optional)_ - one or more configuration keywords to be updated in the HarperDB configuration file +* clustering_enabled _(example/optional)_ - one or more configuration keywords to be updated in the HarperDB configuration file + +### Body +```json +{ + "operation": "set_configuration", + "logging_level": "trace", + "clustering_enabled": true +} +``` + +### Response: 200 +```json +{ + "message": "Configuration successfully set. You must restart HarperDB for new config settings to take effect." +} +``` + +--- + +## Get Configuration +Returns the HarperDB configuration parameters. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_configuration` + +### Body +```json +{ + "operation": "get_configuration" +} +``` + +### Response: 200 +```json +{ + "http": { + "compressionThreshold": 1200, + "cors": false, + "corsAccessList": [ + null + ], + "keepAliveTimeout": 30000, + "port": 9926, + "securePort": null, + "timeout": 120000 + }, + "threads": 11, + "authentication": { + "cacheTTL": 30000, + "enableSessions": true, + "operationTokenTimeout": "1d", + "refreshTokenTimeout": "30d" + }, + "analytics": { + "aggregatePeriod": 60 + }, + "clustering": { + "enabled": true, + "hubServer": { + "cluster": { + "name": "harperdb", + "network": { + "port": 12345, + "routes": null + } + }, + "leafNodes": { + "network": { + "port": 9931 + } + }, + "network": { + "port": 9930 + } + }, + "leafServer": { + "network": { + "port": 9940, + "routes": null + }, + "streams": { + "maxAge": null, + "maxBytes": null, + "maxMsgs": null, + "path": "/Users/hdb/clustering/leaf" + } + }, + "logLevel": "info", + "nodeName": "node1", + "republishMessages": false, + "databaseLevel": false, + "tls": { + "certificate": "/Users/hdb/keys/certificate.pem", + "certificateAuthority": "/Users/hdb/keys/ca.pem", + "privateKey": "/Users/hdb/keys/privateKey.pem", + "insecure": true, + "verify": true + }, + "user": "cluster_user" + }, + "componentsRoot": "/Users/hdb/components", + "localStudio": { + "enabled": false + }, + "logging": { + "auditAuthEvents": { + "logFailed": false, + "logSuccessful": false + }, + "auditLog": true, + "auditRetention": "3d", + "file": true, + "level": "error", + "root": "/Users/hdb/log", + "rotation": { + "enabled": false, + "compress": false, + "interval": null, + "maxSize": null, + "path": "/Users/hdb/log" + }, + "stdStreams": false + }, + "mqtt": { + "network": { + "port": 1883, + "securePort": 8883 + }, + "webSocket": true, + "requireAuthentication": true + }, + "operationsApi": { + "network": { + "cors": true, + "corsAccessList": [ + "*" + ], + "domainSocket": "/Users/hdb/operations-server", + "port": 9925, + "securePort": null + } + }, + "rootPath": "/Users/hdb", + "storage": { + "writeAsync": false, + "caching": true, + "compression": false, + "noReadAhead": true, + "path": "/Users/hdb/database", + "prefetchWrites": true + }, + "tls": { + "certificate": "/Users/hdb/keys/certificate.pem", + "certificateAuthority": "/Users/hdb/keys/ca.pem", + "privateKey": "/Users/hdb/keys/privateKey.pem" + } +} +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/developers/real-time.md b/site/versioned_docs/version-4.3/developers/real-time.md new file mode 100644 index 00000000..dd2d88c9 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/real-time.md @@ -0,0 +1,160 @@ +--- +title: Real-Time +--- + +# Real-Time + +## Real-Time + +HarperDB provides real-time access to data and messaging. This allows clients to monitor and subscribe to data for changes in real-time as well as handling data-oriented messaging. HarperDB supports multiple standardized protocols to facilitate diverse standards-based client interaction. + +HarperDB real-time communication is based around database tables. Declared tables are the basis for monitoring data, and defining "topics" for publishing and subscribing to messages. Declaring a table that establishes a topic can be as simple as adding a table with no attributes to your [schema.graphql in a HarperDB application folder](./applications/): +``` +type MyTopic @table @export +``` +You can then subscribe to records or sub-topics in this topic/namespace, as well as save data and publish messages, with the protocols discussed below. + +### Content Negotiation + +HarperDB is a database, not a generic broker, and therefore highly adept at handling _structured_ data. Data can be published and subscribed in all supported structured/object formats, including JSON, CBOR, and MessagePack, and the data will be stored and handled as structured data. This means that different clients can individually choose which format they prefer, both for inbound and outbound messages. One client could publish in JSON, and another client could choose to receive messages in CBOR. + +## Protocols + +### MQTT + +HarperDB supports MQTT as an interface to this real-time data delivery. It is important to note that MQTT in HarperDB is not just a generic pub/sub hub, but is deeply integrated with the database providing subscriptions directly to database records, and publishing to these records. In this document we will explain how MQTT pub/sub concepts are aligned and integrated with database functionality. + +#### Configuration + +HarperDB supports MQTT with its `mqtt` server module and HarperDB supports MQTT over standard TCP sockets or over WebSockets. This is enabled by default, but can be configured in your `harperdb-config.yaml` configuration, allowing you to change which ports it listens on, if secure TLS connections are used, and MQTT is accepted over WebSockets: + +```yaml +mqtt: + network: + port: 1883 + securePort: 8883 # for TLS + webSocket: true # will also enable WS support through the default HTTP interface/port + mTLS: false + requireAuthentication: true +``` + +Note that if you are using WebSockets for MQTT, the sub-protocol should be set to "mqtt" (this is required by the MQTT specification, and should be included by any conformant client): `Sec-WebSocket-Protocol: mqtt`. mTLS is also supported by enabling it in the configuration and using the certificate authority from the TLS section of the configuration. See the [configuration documentation for more information](../deployments/configuration). + +#### Capabilities + +HarperDB's MQTT capabilities includes support for MQTT versions v3.1 and v5 with standard publish and subscription capabilities with multi-level topics, QoS 0 and 1 levels, and durable (non-clean) sessions. MQTT supports QoS 2 interaction, but doesn't guarantee exactly once delivery (although any guarantees of exactly once over unstable networks is a fictional aspiration). MQTT doesn't currently support last will, nor single-level wildcards (only multi-level wildcards). + +### Topics + +In MQTT, messages are published to, and subscribed from, topics. In HarperDB topics are aligned with resource endpoint paths in exactly the same way as the REST endpoints. If you define a table or resource in your schema, with a path/endpoint of "my-resource", that means that this can be addressed as a topic just like a URL path. So a topic of "my-resource/some-id" would correspond to the record in the my-resource table (or custom resource) with a record id of "some-id". + +This means that you can subscribe to "my-resource/some-id" and making this subscription means you will receive notification messages for any updates to this record. If this record is modified or deleted, a message will be sent to listeners of this subscription. + +The current value of this record is also treated as the "retained" message for this topic. When you subscribe to "my-resource/some-id", you will immediately receive the record for this id, through a "publish" command from the server, as the initial "retained" message that is first delivered. This provides a simple and effective way to get the current state of a record and future updates to that record without having to worry about timing issues of aligning a retrieval and subscription separately. + +Similarly, publishing a message to a "topic" also interacts with the database. Publishing a message with "retain" flag enabled is interpreted as an update or put to that record. The published message will replace the current record with the contents of the published message. + +If a message is published without a `retain` flag, the message will not alter the record at all, but will still be published to any subscribers to that record. + +HarperDB supports QoS 0 and 1 for publishing and subscribing. + +HarperDB supports multi-level topics, both for subscribing and publishing. HarperDB also supports multi-level wildcards, so you can subscribe to /`my-resource/#` to receive notifications for `my-resource/some-id` as well as `my-resource/nested/id`, or you can subscribe to `my-resource/nested/#` and receive the latter, but not the former, topic messages. HarperDB currently only supports trailing multi-level wildcards (no single-level wildcards with '\*'). + +### Ordering + +HarperDB is designed to be a distributed database, and an intrinsic characteristic of distributed servers is that messages may take different amounts of time to traverse the network and may arrive in a different order depending on server location and network topology. HarperDB is designed for distributed data with minimal latency, and so messages are delivered to subscribers immediately when they arrive, HarperDB does not delay messages for coordinating confirmation or consensus among other nodes, which would significantly increase latency, messages are delivered as quickly as possible. + +As an example, let's consider message #1 is published to node A, which then sends the message to node B and node C, but the message takes a while to get there. Slightly later, while the first message is still in transit, message #2 is published to node B, which then replicates it to A and C, and because of network conditions, message #2 arrives at node C before message #1. Because HarperDB prioritizes low latency, when node C receives message #2, it immediately publishes it to all its local subscribers (it has no knowledge that message #1 is in transit). + +When message #1 is received by node C, the behavior of what it does with this message is dependent on whether the message is a "retained" message (was published with a retain flag set to true, or was put/update/upsert/inserted into the database) or was a non-retained message. In the case of a non-retained message, this message will be delivered to all local subscribers (even though it had been published earlier), thereby prioritizing the delivery of every message. On the other hand, a retained message will not deliver the earlier out-of-order message to clients, and HarperDB will keep the message with the latest timestamp as the "winning" record state (and will be retained message for any subsequent subscriptions). Retained messages maintain (eventual) consistency across the entire cluster of servers, all nodes will converge to the same message as the being the latest and retained message (#2 in this case). + +Non-retained messages are generally a good choice for applications like chat, where every message needs to be delivered even if they might arrive out-of-order (the order may not be consistent across all servers). Retained messages can be thought of a "superseding" messages, and are a good fit for applications like instrument measurements like temperature readings, where the priority to provide the _latest_ temperature and older temperature readings are not important to publish after a new reading, and consistency of the most-recent record (across the network) is important. + +### WebSockets + +WebSockets are supported through the REST interface and go through the `connect(incomingMessages)` method on resources. By default, making a WebSockets connection to a URL will subscribe to the referenced resource. For example, making a WebSocket connection to `new WebSocket('wss:/server/my-resource/341')` will access the resource defined for 'my-resource' and the resource id of 341 and connect to it. On the web platform this could be: + +```javascript +let ws = new WebSocket('wss:/server/my-resource/341'); +ws.onmessage = (event) => { + / received a notification from the server + let data = JSON.parse(event.data); +}; +``` + +By default, the resources will make a subscription to that resource, monitoring any changes to the records or messages published to it, and will return events on the WebSockets connection. You can also override `connect(incomingMessages)` with your own handler. The `connect` method simply needs to return an iterable (asynchronous iterable) that represents the stream of messages to be sent to the client. One easy way to create an iterable stream is to define the `connect` method as a generator and `yield` messages as they become available. For example, a simple WebSockets echo server for a resource could be written: + +```javascript +export class Echo extends Resource { + async *connect(incomingMessages) { + for await (let message of incomingMessages) { / wait for each incoming message from the client + / and send the message back to the client + yield message; + } + } +``` + +You can also call the default `connect` and it will provide a convenient streaming iterable with events for the outgoing messages, with a `send` method that you can call to send messages on the iterable, and a `close` event for determining when the connection is closed. The incoming messages iterable is also an event emitter, and you can listen for `data` events to get the incoming messages using event style: + +```javascript +export class Example extends Resource { + connect(incomingMessages) { + let outgoingMessages = super.connect(); + let timer = setInterval(() => { + outgoingMessages.send({greeting: 'hi again!'}); + }, 1000); / send a message once a second + incomingMessages.on('data', (message) => { + / another way of echo-ing the data back to the client + outgoingMessages.send(message); + }); + outgoingMessages.on('close', () => { + / make sure we end the timer once the connection is closed + clearInterval(timer); + }); + return outgoingMessages; + } +``` + +### Server Sent Events + +Server Sent Events (SSE) are also supported through the REST server interface, and provide a simple and efficient mechanism for web-based applications to receive real-time updates. For consistency of push delivery, SSE connections go through the `connect()` method on resources, much like WebSockets. The primary difference is that `connect` is called without any `incomingMessages` argument, since SSE is a one-directional transport mechanism. This can be used much like WebSockets, specifying a resource URL path will connect to that resource, and by default provides a stream of messages for changes and messages for that resource. For example, you can connect to receive notification in a browser for a resource like: + +```javascript +let eventSource = new EventSource('https:/server/my-resource/341', { withCredentials: true }); +eventSource.onmessage = (event) => { + / received a notification from the server + let data = JSON.parse(event.data); +}; +``` + +### MQTT Feature Support Matrix + +| Feature | Support | +|--------------------------------------------------------------------|----------------------------------------------------------------| +| Connections, protocol negotiation, and acknowledgement with v3.1.1 | :heavy_check_mark: | +| Connections, protocol negotiation, and acknowledgement with v5 | :heavy_check_mark: | +| Secure MQTTS | :heavy_check_mark: | +| MQTTS over WebSockets | :heavy_check_mark: | +| MQTT authentication via user/pass | :heavy_check_mark: | +| MQTT authentication via mTLS | :heavy_check_mark: | +| Publish | :heavy_check_mark: | +| Subscribe | :heavy_check_mark: | +| Multi-level wildcard | :heavy_check_mark: | +| Single-level wildcard | :heavy_check_mark: | +| QoS 0 | :heavy_check_mark: | +| QoS 1 | :heavy_check_mark: | +| QoS 2 | Not fully supported, can perform conversation but does persist | +| Keep-Alive monitoring | | +| Clean session | :heavy_check_mark: | +| Durable session | :heavy_check_mark: | +| Distributed durable session | | +| Will | :heavy_check_mark: | +| MQTT V5 User properties | | +| MQTT V5 Will properties | | +| MQTT V5 Connection properties | | +| MQTT V5 Connection acknowledgement properties | | +| MQTT V5 Publish properties | | +| MQTT V5 Subscribe properties | | +| MQTT V5 Ack properties | | +| MQTT V5 AUTH command | | +| MQTT V5 Shared Subscriptions | | \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/developers/rest.md b/site/versioned_docs/version-4.3/developers/rest.md new file mode 100644 index 00000000..90348887 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/rest.md @@ -0,0 +1,357 @@ +--- +title: REST +--- + +# REST + +HarperDB provides a powerful, efficient, and standard-compliant HTTP REST interface for interacting with tables and other resources. The REST interface is the recommended interface for data access, querying, and manipulation (for HTTP interactions), providing the best performance and HTTP interoperability with different clients. + +Resources, including tables, can be configured as RESTful endpoints. Make sure you review the [application introduction](./applications/) and [defining schemas](./applications/defining-schemas) to properly define your schemas and select which tables are exported and available through REST interface, as tables are not exported by default. The name of the [exported](./applications/defining-schemas#export) resource defines the basis of the endpoint path available at the application HTTP server port [configured here](../../deployments/configuration#http) (the default being `9926`). From there, a record id or query can be appended. Following uniform interface principles, HTTP methods define different actions with resources. For each method, this describes the default action. + +The default path structure provides access to resources at several levels: + +* `/my-resource` - The root path of a resource usually has a description of the resource (like a describe operation for a table). +* `/my-resource/` - The trailing slash in a path indicates it is a collection of the records. The root collection for a table represents all the records in a table, and usually you will append query parameters to query and search for more specific records. +* `/my-resource/record-id` - This resource locator represents a specific record, referenced by its id. This is typically how you can retrieve, update, and delete individual records. +* `/my-resource/record-id/` - Again, a trailing slash indicates a collection; here it is the collection of the records that begin with the specified id prefix. +* `/my-resource/record-id/with/multiple/parts` - A record id can consist of multiple path segments. + +## GET + +These can be used to retrieve individual records or perform searches. This is handled by the Resource method `get()` (and can be overridden). + +### `GET /my-resource/` + +This can be used to retrieve a record by its primary key. The response will include the record as the body. + +#### Caching/Conditional Requests + +A `GET` response for a record will include an encoded version, a timestamp of the last modification, of this record in the `ETag` request headers (or any accessed record when used in a custom get method). On subsequent requests, a client (that has a cached copy) may include an `If-None-Match` request header with this tag. If the record has not been updated since this date, the response will have a 304 status and no body. This facilitates significant performance gains since the response data doesn't need to be serialized and transferred over the network. + +### `GET /my-resource/?property=value` + +This can be used to search for records by the specified property name and value. See the querying section for more information. + +### `GET /my-resource/.property` + +This can be used to retrieve the specified property of the specified record. + +## PUT + +This can be used to create or update a record with the provided object/data (similar to an "upsert") with a specified key. This is handled by the Resource method `put(record)`. + +### `PUT /my-resource/` + +This will create or update the record with the URL path that maps to the record's primary key. The record will be replaced with the contents of the data in the request body. The new record will exactly match the data that was sent (this will remove any properties that were present in the previous record and not included in the body). Future GETs will return the exact data that was provided by PUT (what you PUT is what you GET). For example: + +```http +PUT /MyTable/123 +Content-Type: application/json + +{ "name": "some data" } +``` + +This will create or replace the record with a primary key of "123" with the object defined by the JSON in the body. This is handled by the Resource method `put()`. + +## DELETE + +This can be used to delete a record or records. + +## `DELETE /my-resource/` + +This will delete a record with the given primary key. This is handled by the Resource's `delete` method. For example: + +```http +DELETE /MyTable/123 +``` + +This will delete the record with the primary key of "123". + +## `DELETE /my-resource/?property=value` + +This will delete all the records that match the provided query. + +## POST + +Generally the POST method can be used for custom actions since POST has the broadest semantics. For tables that are expost\ed as endpoints, this also can be used to create new records. + +### `POST /my-resource/` + +This is handled by the Resource method `post(data)`, which is a good method to extend to make various other types of modifications. Also, with a table you can create a new record without specifying a primary key, for example: + +````http +````http +POST /MyTable/ +Content-Type: application/json + +`{ "name": "some data" }` +```` + +This will create a new record, auto-assigning a primary key, which will be returned in the `Location` header. + +## Querying through URL query parameters + +URL query parameters provide a powerful language for specifying database queries in HarperDB. This can be used to search by a single attribute name and value, to find all records which provide value for the given property/attribute. It is important to note that this attribute must be configured to be indexed to search on it. For example: + +````http +GET /my-resource/?property=value +``` + +We can specify multiple properties that must match: + +```http +GET /my-resource/?property=value&property2=another-value +``` + +Note that only one of the attributes needs to be indexed for this query to execute. + +We can also specify different comparators such as less than and greater than queries using [FIQL](https:/datatracker.ietf.org/doc/html/draft-nottingham-atompub-fiql-00) syntax. If we want to specify records with an `age` value greater than 20: + +```http +GET /my-resource/?age=gt=20 +``` + +Or less than or equal to 20: + +```http +GET /my-resource/?age=le=20 +``` + +The comparison operators include standard FIQL operators, `lt` (less than), `le` (less than or equal), `gt` (greater than), `ge` (greater than or equal), and `ne` (not equal). These comparison operators can also be combined with other query parameters with `&`. For example, if we wanted products with a category of software and price between 100 and 200, we could write: + +```http +GET /Product/?category=software&price=gt=100&price=lt=200 +``` + +Comparison operators can also be used on Date fields, however, we have to ensure that the date format is properly escaped. For example, if we are looking for a listing date greater than `2017-03-08T09:00:00.000Z` we must escape the colons as `%3A`: + +``` +GET /Product/?listDate=gt=2017-03-08T09%3A30%3A00.000Z +``` + +You can also search for attributes that start with a specific string, by using the == comparator and appending a `*` to the attribute value: + +```http +GET /Product/?name==Keyboard* +``` + +Note that some HTTP clients may be overly aggressive in encoding query parameters, and you may need to disable extra encoding of query parameters, to ensure operators are passed through without manipulation. + +Here is a full list of the supported FIQL-style operators/comparators: +* `==`: equal +* `=lt=`: less than +* `=le=`: less than or equal +* `=gt=`: greater than +* `=ge=`: greater than or equal +* `=ne=`, !=: not equal +* `=ct=`: contains the value (for strings) +* `=sw=`, `==*`: starts with the value (for strings) +* `=ew=`: ends with the value (for strings) +* `=`, `===`: strict equality (no type conversion) +* `!==`: strict inequality (no type conversion) + +### Unions +Conditions can also be applied with `OR` logic, returning the union of records that match either condition. This can be specified by using the `|` operator instead of `&`. For example, to return any product a rating of `5` _or_ a `featured` attribute that is `true`, we could write: +```http +GET /Product/?rating=5|featured=true +``` + +### Grouping of Operators +Multiple conditions with different operators can be combined with grouping of conditions to indicate the order of operation. Grouping conditions can be done with parenthesis, with standard grouping conventions as used in query and mathematical expressions. For example, a query to find products with a rating of 5 OR a price between 100 and 200 could be written: +```http +GET /Product/?rating=5|(price=gt=100&price=lt=200) +``` +Grouping conditions can also be done with square brackets, which function the same as parenthesis for grouping conditions. The advantage of using square brackets is that you can include user provided values that might have parenthesis in them, and use standard URI component encoding functionality, which will safely escape/encode square brackets, but not parenthesis. For example, if we were constructing a query for products with a rating of a 5 and matching one of a set of user provided tags, a query could be built like: +```http +GET /Product/?rating=5&[tag=fast|tag=scalable|tag=efficient] +``` +And the tags could be safely generated from user inputs in a tag array like: +```javascript +let url = `/Product/?rating=5[${tags.map(encodeURIComponent).join('|')}]` +``` +More complex queries can be created by further nesting groups: +```http +GET /Product/?price=lt=100|[rating=5&[tag=fast|tag=scalable|tag=efficient]&inStock=true] +``` + +## Query Calls + +HarperDB has several special query functions that use "call" syntax. These can be included in the query string as its own query entry (separated from other query conditions with an `&`). These include: + +### `select(properties)` + +This function allows you to specify which properties should be included in the responses. This takes several forms: + +* `?select(property)`: This will return the values of the specified property directly in the response (will not be put in an object). +* `?select(property1,property2)`: This returns the records as objects, but limited to the specified properties. +* `?select([property1,property2,...])`: This returns the records as arrays of the property values in the specified properties. +* `?select(property1,)`: This can be used to specify that objects should be returned with the single specified property. +* `?select(property{subProperty1,subProperty2{subSubProperty,..}},...)`: This can be used to specify which sub-properties should be included in nested objects and joined/references records. + +To get a list of product names with a category of software: + +```http +GET /Product/?category=software&select(name) +``` + +### `limit(start,end)` or `limit(end)` + +This function specifies a limit on the number of records returned, optionally providing a starting offset. + +For example, to find the first twenty records with a `rating` greater than 3, `inStock` equal to true, only returning the `rating` and `name` properties, you could use: + +```http +GET /Product/?rating=gt=3&inStock=true&select(rating,name)&limit(20) +``` + +### `sort(property)`, `sort(+property,-property,...)` + +This function allows you to indicate the sort order for the returned results. The argument for `sort()` is one or more properties that should be used to sort. If the property is prefixed with '+' or no prefix, the sort will be performed in ascending order by the indicated attribute/property. If the property is prefixed with '-', it will be sorted in descending order. If the multiple properties are specified, the sort will be performed on the first property, and for records with the same value for that property, the next property will be used to break the tie and sort results. This tie breaking will continue through any provided properties. + +For example, to sort by product name (in ascending order): +```http +GET /Product?rating=gt=3&sort(+name) +``` +To sort by rating in ascending order, then by price in descending order for products with the same rating: +```http +GET /Product?sort(+rating,-price) +``` + +# Relationships +HarperDB supports relationships in its data models, allowing for tables to define a relationship with data from other tables (or even itself) through foreign keys. These relationships can be one-to-many, many-to-one, or many-to-many (and even with ordered relationships). These relationships are defined in the schema, and then can easily be queried through chained attributes that act as "join" queries, allowing related attributes to referenced in conditions and selected for returned results. + +## Chained Attributes and Joins +To support relationships and hierarchical data structures, in addition to querying on top-level attributes, you can also query on chained attributes. Most importantly, this provides HarperDB's "join" functionality, allowing related tables to be queried and joined in the results. Chained properties are specified by using dot syntax. In order to effectively leverage join functionality, you need to define a relationship in your schema: +```graphql +type Product @table @export { + id: ID @primaryKey + name: String + brandId: ID @indexed + brand: Brand @relationship(from: "brandId") +} +type Brand @table @export { + id: ID @primaryKey + name: String + products: [Product] @relationship(to: "brandId") +} +``` +And then you could query a product by brand name: +```http +GET /Product/?brand.name=Microsoft +``` +This will query for products for which the `brandId` references a `Brand` record with a `name` of `"Microsoft"`. + +The `brand` attribute in `Product` is a "computed" attribute from the foreign key (`brandId`), for the many-to-one relationship to the `Brand`. In the schema above, we also defined the reverse one-to-many relationship from a `Brand` to a `Product`, and we could likewise query that: +```http +GET /Brand/?products.name=Keyboard +``` +This would return any `Brand` with at least one product with a name `"Keyboard"`. Note, that both of these queries are effectively acting as an "INNER JOIN". + +### Chained/Nested Select +Computed relationship attributes are not included by default in query results. However, we can include them by specifying them in a select: +```http +GET /Product/?brand.name=Microsoft&select(name,brand) +``` +We can also do a "nested" select and specify which sub-attributes to include. For example, if we only wanted to include the name property from the brand, we could do so: +```http +GET /Product/?brand.name=Microsoft&select(name,brand{name}) +``` +Or to specify multiple sub-attributes, we can comma delimit them. Note that selects can "join" to another table without any constraint/filter on the related/joined table: +```http +GET /Product/?name=Keyboard&select(name,brand{name,id}) +``` +When selecting properties from a related table without any constraints on the related table, this effectively acts like a "LEFT JOIN" and will omit the `brand` property if the brandId is `null` or references a non-existent brand. + + +### Many-to-many Relationships (Array of Foreign Keys) +Many-to-many relationships are also supported, and can easily be created using an array of foreign key values, without requiring the traditional use of a junction table. This can be done by simply creating a relationship on an array-typed property that references a local array of foreign keys. For example, we could create a relationship to the resellers of a product (each product can have multiple resellers, each ) + +```graphql +type Product @table @export { + id: ID @primaryKey + name: String + resellerIds: [ID] @indexed + resellers: [Reseller] @relationship(from: "resellerId") +} +type Reseller @table { + id: ID @primaryKey + name: String + ... +} +``` +The product record can then hold an array of the reseller ids. When the `reseller` property is accessed (either through code or through select, conditions), the array of ids is resolved to an array of reseller records. We can also query through the resellers relationships like with the other relationships. For example, to query the products that are available through the "Cool Shop": +```http +GET /Product/?resellers.name=Cool Shop&select(id,name,resellers{name,id}) +``` +One of the benefits of using an array of foreign key values is that the this can be manipulated using standard array methods (in JavaScript), and the array can dictate an order to keys and therefore to the resulting records. For example, you may wish to define a specific order to the resellers and how they are listed (which comes first, last): +```http +PUT /Product/123 +Content-Type: application/json + +{ "id": "123", "resellerIds": ["first-reseller-id", "second-reseller-id", "last-reseller-id"], +...} +``` + +### Type Conversion +Queries parameters are simply text, so there are several features for converting parameter values to properly typed values for performing correct searches. For the FIQL comparators, which includes `==`, `!=`, `=gt=`, `=lt=`, `=ge=`, `=gt=`, the parser will perform type conversion, according to the following rules: +* `name==null`: Will convert the value to `null` for searching. +* `name==123`: Will convert the value to a number _if_ the attribute is untyped (there is no type specified in a GraphQL schema, or the type is specified to be `Any`). +* `name==true`: Will convert the value to a boolean _if_ the attribute is untyped (there is no type specified in a GraphQL schema, or the type is specified to be `Any`). +* `name==number:123`: Will explicitly convert the value after "number:" to a number. +* `name==boolean:true`: Will explicitly convert the value after "boolean:" to a boolean. +* `name==string:some%20text`: Will explicitly keep the value after "string:" as a string (and perform URL component decoding) +* `name==date:2024-01-05T20%3A07%3A27.955Z`: Will explicitly convert the value after "date:" to a Date object. + +If the attribute specifies a type (like `Float`) in the schema definition, the value will always be converted to the specified type before searching. + +For "strict" operators, which includes `=`, `===`, and `!==`, no automatic type conversion will be applied, the value will be decoded as string with URL component decoding, and have type conversion applied if the attribute specifies a type, in which case the attribute type will specify the type conversion. + +### Content Types and Negotiation + +HTTP defines a couple of headers for indicating the (preferred) content type of the request and response. The `Content-Type` request header can be used to specify the content type of the request body (for PUT, PATCH, and POST). The `Accept` request header indicates the preferred content type of the response. For general records with object structures, HarperDB supports the following content types: `application/json` - Common format, easy to read, with great tooling support. `application/cbor` - Recommended binary format for optimal encoding efficiency and performance. `application/x-msgpack` - This is also an efficient format, but CBOR is preferable, as it has better streaming capabilities and faster time-to-first-byte. `text/csv` - CSV, lacks explicit typing, not well suited for heterogeneous data structures, but good for moving data to and from a spreadsheet. + +CBOR is generally the most efficient and powerful encoding format, with the best performance, most compact encoding, and most expansive ability to encode different data types like Dates, Maps, and Sets. MessagePack is very similar and tends to have broader adoption. However, JSON can be easier to work with and may have better tooling. Also, if you are using compression for data transfer (gzip or brotli), JSON will often result in more compact compressed data due to character frequencies that better align with Huffman coding, making JSON a good choice for web applications that do not require specific data types beyond the standard JSON types. + +Requesting a specific content type can also be done in a URL by suffixing the path with extension for the content type. If you want to retrieve a record in CSV format, you could request: + +```http +GET /product/some-id.csv +``` + +Or you could request a query response in MessagePack: + +```http +GET /product/.msgpack?category=software +``` + +However, generally it is not recommended that you use extensions in paths and it is best practice to use the `Accept` header to specify acceptable content types. + +### Specific Content Objects + +You can specify other content types, and the data will be stored as a record or object that holds the type and contents of the data. For example, if you do: + +``` +PUT /my-resource/33 +Content-Type: text/calendar + +BEGIN:VCALENDAR +VERSION:2.0 +... +``` + +This would store a record equivalent to JSON: + +``` +{ "contentType": "text/calendar", data: "BEGIN:VCALENDAR\nVERSION:2.0\n... +``` + +Retrieving a record with `contentType` and `data` properties will likewise return a response with the specified `Content-Type` and body. If the `Content-Type` is not of the `text` family, the data will be treated as binary data (a Node.js `Buffer`). + +You can also use `application/octet-stream` to indicate that the request body should be preserved in binary form. This also useful for uploading to a specific property: + +``` +PUT /my-resource/33/image +Content-Type: image/gif + +...image data... +``` diff --git a/site/versioned_docs/version-4.3/developers/security/basic-auth.md b/site/versioned_docs/version-4.3/developers/security/basic-auth.md new file mode 100644 index 00000000..56367bb2 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/security/basic-auth.md @@ -0,0 +1,62 @@ +--- +title: Basic Authentication +--- + +# Basic Authentication + +HarperDB uses Basic Auth and JSON Web Tokens (JWTs) to secure our HTTP requests. In the context of an HTTP transaction, **basic access authentication** is a method for an HTTP user agent to provide a username and password when making a request. + +** _**You do not need to log in separately. Basic Auth is added to each HTTP request like create\_database, create\_table, insert etc… via headers.**_ ** + +A header is added to each HTTP request. The header key is **“Authorization”** the header value is **“Basic <<your username and password buffer token>>”** + +## Authentication in HarperDB Studio + +In the below code sample, you can see where we add the authorization header to the request. This needs to be added for each and every HTTP request for HarperDB. + +_Note: This function uses btoa. Learn about_ [_btoa here_](https:/developer.mozilla.org/en-US/docs/Web/API/btoa)_._ + +```javascript +function callHarperDB(call_object, operation, callback){ + + const options = { + "method": "POST", + "hostname": call_object.endpoint_url, + "port": call_object.endpoint_port, + "path": "/", + "headers": { + "content-type": "application/json", + "authorization": "Basic " + btoa(call_object.username + ':' + call_object.password), + "cache-control": "no-cache" + + } + }; + + const http_req = http.request(options, function (hdb_res) { + let chunks = []; + + hdb_res.on("data", function (chunk) { + chunks.push(chunk); + }); + + hdb_res.on("end", function () { + const body = Buffer.concat(chunks); + if (isJson(body)) { + return callback(null, JSON.parse(body)); + } else { + return callback(body, null); + + } + + }); + }); + + http_req.on("error", function (chunk) { + return callback("Failed to connect", null); + }); + + http_req.write(JSON.stringify(operation)); + http_req.end(); + +} +``` diff --git a/site/versioned_docs/version-4.3/developers/security/certificate-management.md b/site/versioned_docs/version-4.3/developers/security/certificate-management.md new file mode 100644 index 00000000..eb69df74 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/security/certificate-management.md @@ -0,0 +1,62 @@ +--- +title: Certificate Management +--- + +# Certificate Management + +This document is information on managing certificates for HarperDB external facing APIs. For information on certificate management for clustering see [clustering certificate management](../clustering/certificate-management). + +## Development + +An out of the box install of HarperDB does not have HTTPS enabled (see [configuration](../../deployments/configuration) for relevant configuration file settings.) This is great for local development. If you are developing using a remote server and your requests are traversing the Internet, we recommend that you enable HTTPS. + +To enable HTTPS, set `http.securePort` in `harperdb-config.yaml` to the port you wish to use for HTTPS connections and restart HarperDB. + +By default HarperDB will generate certificates and place them at `/keys/`. These certificates will not have a valid Common Name (CN) for your HarperDB node, so you will be able to use HTTPS, but your HTTPS client must be configured to accept the invalid certificate. + +## Production + +For production deployments, in addition to using HTTPS, we recommend using your own certificate authority (CA) or a public CA such as Let's Encrypt, to generate certificates with CNs that match the Fully Qualified Domain Name (FQDN) of your HarperDB node. + +We have a few recommended options for enabling HTTPS in a production setting. + +### Option: Enable HarperDB HTTPS and Replace Certificates + +To enable HTTPS, set `http.securePort` in `harperdb-config.yaml` to the port you wish to use for HTTPS connections and restart HarperDB. + +To replace the certificates, either replace the contents of the existing certificate files at `/keys/`, or update the HarperDB configuration with the path of your new certificate files, and then restart HarperDB. + +```yaml +tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`operationsApi.tls` configuration is optional. If it is not set HarperDB will default to the values in the `tls` section. + +```yaml +operationsApi: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +### Option: Nginx Reverse Proxy + +Instead of enabling HTTPS for HarperDB, Nginx can be used as a reverse proxy for HarperDB. + +Install Nginx, configure Nginx to use certificates issued from your own CA or a public CA, then configure Nginx to listen for HTTPS requests and forward to HarperDB as HTTP requests. + +[Certbot](https:/certbot.eff.org/) is a great tool for automatically requesting and renewing Let’s Encrypt certificates used by Nginx. + +### Option: External Reverse Proxy + +Instead of enabling HTTPS for HarperDB, a number of different external services can be used as a reverse proxy for HarperDB. These services typically have integrated certificate management. Configure the service to listen for HTTPS requests and forward (over a private network) to HarperDB as HTTP requests. + +Examples of these types of services include an AWS Application Load Balancer or a GCP external HTTP(S) load balancer. + +### Additional Considerations + +It is possible to use different certificates for the Operations API and the Custom Functions API. In scenarios where only your Custom Functions endpoints need to be exposed to the Internet and the Operations API is reserved for HarperDB administration, you may want to use a private CA to issue certificates for the Operations API and a public CA for the Custom Functions API certificates. diff --git a/site/versioned_docs/version-4.3/developers/security/configuration.md b/site/versioned_docs/version-4.3/developers/security/configuration.md new file mode 100644 index 00000000..67d959fd --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/security/configuration.md @@ -0,0 +1,39 @@ +--- +title: Configuration +--- + +# Configuration + +HarperDB was set up to require very minimal configuration to work out of the box. There are, however, some best practices we encourage for anyone building an app with HarperDB. + +## CORS + +HarperDB allows for managing [cross-origin HTTP requests](https:/developer.mozilla.org/en-US/docs/Web/HTTP/Access\_control\_CORS). By default, HarperDB enables CORS for all domains if you need to disable CORS completely or set up an access list of domains you can do the following: + +1. Open the harperdb-config.yaml file, which can be found in \, the location you specified during install. +1. In harperdb-config.yaml there should be 2 entries under `operationsApi.network`: cors and corsAccessList. + * `cors` + 1. To turn off, change to: `cors: false` + 1. To turn on, change to: `cors: true` + * `corsAccessList` + 1. The `corsAccessList` will only be recognized by the system when `cors` is `true` + 1. To create an access list you set `corsAccessList` to a comma-separated list of domains. + + i.e. `corsAccessList` is `http:/harperdb.io,http:/products.harperdb.io` + 1. To clear out the access list and allow all domains: `corsAccessList` is `[null]` + +## SSL + +HarperDB provides the option to use an HTTP or HTTPS and HTTP/2 interface. The default port for the server is 9925. + +These default ports can be changed by updating the `operationsApi.network.port` value in `/harperdb-config.yaml` + +By default, HTTPS is turned off and HTTP is turned on. It is recommended that you never directly expose HarperDB's HTTP interface through a publicly available port. HTTP is intended for local or private network use. + +You can toggle HTTPS and HTTP in the settings file. By setting `operationsApi.network.https` to true/false. When `https` is set to `false`, the server will use HTTP (version 1.1). Enabling HTTPS will enable both HTTPS/1.1 and HTTPS/2. + +HarperDB automatically generates a certificate (certificate.pem), a certificate authority (ca.pem) and a private key file (privateKey.pem) which live at `/keys/`. + +You can replace these with your own certificates and key. + +**Changes to these settings require a restart. Use operation `harperdb restart` from HarperDB Operations API.** diff --git a/site/versioned_docs/version-4.3/developers/security/index.md b/site/versioned_docs/version-4.3/developers/security/index.md new file mode 100644 index 00000000..6f3ab721 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/security/index.md @@ -0,0 +1,13 @@ +--- +title: Security +--- + +# Security + +HarperDB uses role-based, attribute-level security to ensure that users can only gain access to the data they’re supposed to be able to access. Our granular permissions allow for unparalleled flexibility and control, and can actually lower the total cost of ownership compared to other database solutions, since you no longer have to replicate subsets of your data to isolate use cases. + +* [JWT Authentication](./jwt-auth) +* [Basic Authentication](./basic-auth) +* [mTLS Authentication](./mtls-auth) +* [Configuration](./configuration) +* [Users and Roles](./users-and-roles) diff --git a/site/versioned_docs/version-4.3/developers/security/jwt-auth.md b/site/versioned_docs/version-4.3/developers/security/jwt-auth.md new file mode 100644 index 00000000..f48fe0ee --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/security/jwt-auth.md @@ -0,0 +1,96 @@ +--- +title: JWT Authentication +--- + +# JWT Authentication + +HarperDB uses token based authentication with JSON Web Tokens, JWTs. + +This consists of two primary operations `create_authentication_tokens` and `refresh_operation_token`. These generate two types of tokens, as follows: + +* The `operation_token` which is used to authenticate all HarperDB operations in the Bearer Token Authorization Header. The default expiry is one day. +* The `refresh_token` which is used to generate a new `operation_token` upon expiry. This token is used in the Bearer Token Authorization Header for the `refresh_operation_token` operation only. The default expiry is thirty days. + +The `create_authentication_tokens` operation can be used at any time to refresh both tokens in the event that both have expired or been lost. + +## Create Authentication Tokens + +Users must initially create tokens using their HarperDB credentials. The following POST body is sent to HarperDB. No headers are required for this POST operation. + +```json +{ + "operation": "create_authentication_tokens", + "username": "username", + "password": "password" +} +``` + +A full cURL example can be seen here: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "operation": "create_authentication_tokens", + "username": "username", + "password": "password" +}' +``` + +An example expected return object is: + +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDUwNjQ2MDAsInN1YiI6Im9wZXJhdGlvbiJ9.MpQA-9CMjA-mn-7mHyUXSuSC_-kqMqJXp_NDiKLFtbtMRbodCuY3DzH401rvy_4vb0yCELf0B5EapLVY1545sv80nxSl6FoZFxQaDWYXycoia6zHpiveR8hKlmA6_XTWHJbY2FM1HAFrdtt3yUTiF-ylkdNbPG7u7fRjTmHfsZ78gd2MNWIDkHoqWuFxIyqk8XydQpsjULf2Uacirt9FmHfkMZ-Jr_rRpcIEW0FZyLInbm6uxLfseFt87wA0TbZ0ofImjAuaW_3mYs-3H48CxP152UJ0jByPb0kHsk1QKP7YHWx1-Wce9NgNADfG5rfgMHANL85zvkv8sJmIGZIoSpMuU3CIqD2rgYnMY-L5dQN1fgfROrPMuAtlYCRK7r-IpjvMDQtRmCiNG45nGsM4DTzsa5GyDrkGssd5OBhl9gr9z9Bb5HQVYhSKIOiy72dK5dQNBklD4eGLMmo-u322zBITmE0lKaBcwYGJw2mmkYcrjDOmsDseU6Bf_zVUd9WF3FqwNkhg4D7nrfNSC_flalkxPHckU5EC_79cqoUIX2ogufBW5XgYbU4WfLloKcIpb51YTZlZfwBHlHPSyaq_guaXFaeCUXKq39_i1n0HRF_mRaxNru0cNDFT9Fm3eD7V8axFijSVAMDyQs_JR7SY483YDKUfN4l-vw-EVynImr4", + "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDc1NzAyMDAsInN1YiI6InJlZnJlc2gifQ.acaCsk-CJWIMLGDZdGnsthyZsJfQ8ihXLyE8mTji8PgGkpbwhs7e1O0uitMgP_pGjHq2tey1BHSwoeCL49b18WyMIB10hK-q2BXGKQkykltjTrQbg7VsdFi0h57mGfO0IqAwYd55_hzHZNnyJMh4b0iPQFDwU7iTD7x9doHhZAvzElpkWbc_NKVw5_Mw3znjntSzbuPN105zlp4Niurin-_5BnukwvoJWLEJ-ZlF6hE4wKhaMB1pWTJjMvJQJE8khTTvlUN8tGxmzoaDYoe1aCGNxmDEQnx8Y5gKzVd89sylhqi54d2nQrJ2-ElfEDsMoXpR01Ps6fNDFtLTuPTp7ixj8LvgL2nCjAg996Ga3PtdvXJAZPDYCqqvaBkZZcsiqOgqLV0vGo3VVlfrcgJXQImMYRr_Inu0FCe47A93IAWuQTs-KplM1KdGJsHSnNBV6oe6QEkROJT5qZME-8xhvBYvOXqp9Znwg39bmiBCMxk26Ce66_vw06MNgoa3D5AlXPWemfdVKPZDnj_aLVjZSs0gAfFElcVn7l9yjWJOaT2Muk26U8bJl-2BEq_DSclqKHODuYM5kkPKIdE4NFrsqsDYuGxcA25rlNETFyl0q-UXj1aoz_joy5Hdnr4mFELmjnoo4jYQuakufP9xeGPsj1skaodKl0mmoGcCD6v1F60" +} +``` + +## Using JWT Authentication Tokens + +The `operation_token` value is used to authenticate all operations in place of our standard Basic auth. In order to pass the token you will need to create an Bearer Token Authorization Header like the following request: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDUwNjQ2MDAsInN1YiI6Im9wZXJhdGlvbiJ9.MpQA-9CMjA-mn-7mHyUXSuSC_-kqMqJXp_NDiKLFtbtMRbodCuY3DzH401rvy_4vb0yCELf0B5EapLVY1545sv80nxSl6FoZFxQaDWYXycoia6zHpiveR8hKlmA6_XTWHJbY2FM1HAFrdtt3yUTiF-ylkdNbPG7u7fRjTmHfsZ78gd2MNWIDkHoqWuFxIyqk8XydQpsjULf2Uacirt9FmHfkMZ-Jr_rRpcIEW0FZyLInbm6uxLfseFt87wA0TbZ0ofImjAuaW_3mYs-3H48CxP152UJ0jByPb0kHsk1QKP7YHWx1-Wce9NgNADfG5rfgMHANL85zvkv8sJmIGZIoSpMuU3CIqD2rgYnMY-L5dQN1fgfROrPMuAtlYCRK7r-IpjvMDQtRmCiNG45nGsM4DTzsa5GyDrkGssd5OBhl9gr9z9Bb5HQVYhSKIOiy72dK5dQNBklD4eGLMmo-u322zBITmE0lKaBcwYGJw2mmkYcrjDOmsDseU6Bf_zVUd9WF3FqwNkhg4D7nrfNSC_flalkxPHckU5EC_79cqoUIX2ogufBW5XgYbU4WfLloKcIpb51YTZlZfwBHlHPSyaq_guaXFaeCUXKq39_i1n0HRF_mRaxNru0cNDFT9Fm3eD7V8axFijSVAMDyQs_JR7SY483YDKUfN4l-vw-EVynImr4' \ +--data-raw '{ + "operation":"search_by_hash", + "schema":"dev", + "table":"dog", + "hash_values":[1], + "get_attributes": ["*"] +}' +``` + +## Token Expiration + +`operation_token` expires at a set interval. Once it expires it will no longer be accepted by HarperDB. This duration defaults to one day, and is configurable in [harperdb-config.yaml](../../deployments/configuration). To generate a new `operation_token`, the `refresh_operation_token` operation is used, passing the `refresh_token` in the Bearer Token Authorization Header. A full cURL example can be seen here: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDc1NzAyMDAsInN1YiI6InJlZnJlc2gifQ.acaCsk-CJWIMLGDZdGnsthyZsJfQ8ihXLyE8mTji8PgGkpbwhs7e1O0uitMgP_pGjHq2tey1BHSwoeCL49b18WyMIB10hK-q2BXGKQkykltjTrQbg7VsdFi0h57mGfO0IqAwYd55_hzHZNnyJMh4b0iPQFDwU7iTD7x9doHhZAvzElpkWbc_NKVw5_Mw3znjntSzbuPN105zlp4Niurin-_5BnukwvoJWLEJ-ZlF6hE4wKhaMB1pWTJjMvJQJE8khTTvlUN8tGxmzoaDYoe1aCGNxmDEQnx8Y5gKzVd89sylhqi54d2nQrJ2-ElfEDsMoXpR01Ps6fNDFtLTuPTp7ixj8LvgL2nCjAg996Ga3PtdvXJAZPDYCqqvaBkZZcsiqOgqLV0vGo3VVlfrcgJXQImMYRr_Inu0FCe47A93IAWuQTs-KplM1KdGJsHSnNBV6oe6QEkROJT5qZME-8xhvBYvOXqp9Znwg39bmiBCMxk26Ce66_vw06MNgoa3D5AlXPWemfdVKPZDnj_aLVjZSs0gAfFElcVn7l9yjWJOaT2Muk26U8bJl-2BEq_DSclqKHODuYM5kkPKIdE4NFrsqsDYuGxcA25rlNETFyl0q-UXj1aoz_joy5Hdnr4mFELmjnoo4jYQuakufP9xeGPsj1skaodKl0mmoGcCD6v1F60' \ +--data-raw '{ + "operation":"refresh_operation_token" +}' +``` + +This will return a new `operation_token`. An example expected return object is: + +```bash +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6eyJfX2NyZWF0ZWR0aW1lX18iOjE2MDQ5NzgxODkxNTEsIl9fdXBkYXRlZHRpbWVfXyI6MTYwNDk3ODE4OTE1MSwiYWN0aXZlIjp0cnVlLCJyb2xlIjp7Il9fY3JlYXRlZHRpbWVfXyI6MTYwNDk0NDE1MTM0NywiX191cGRhdGVkdGltZV9fIjoxNjA0OTQ0MTUxMzQ3LCJpZCI6IjdiNDNlNzM1LTkzYzctNDQzYi05NGY3LWQwMzY3Njg5NDc4YSIsInBlcm1pc3Npb24iOnsic3VwZXJfdXNlciI6dHJ1ZSwic3lzdGVtIjp7InRhYmxlcyI6eyJoZGJfdGFibGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9hdHRyaWJ1dGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9zY2hlbWEiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl91c2VyIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfcm9sZSI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2pvYiI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2xpY2Vuc2UiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9pbmZvIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfbm9kZXMiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl90ZW1wIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119fX19LCJyb2xlIjoic3VwZXJfdXNlciJ9LCJ1c2VybmFtZSI6InVzZXJuYW1lIn0sImlhdCI6MTYwNDk3ODcxMywiZXhwIjoxNjA1MDY1MTEzLCJzdWIiOiJvcGVyYXRpb24ifQ.qB4FS7fzryCO5epQlFCQe4mQcUEhzXjfsXRFPgauXrGZwSeSr2o2a1tE1xjiI3qjK0r3f2bdi2xpFlDR1thdY-m0mOpHTICNOae4KdKzp7cyzRaOFurQnVYmkWjuV_Ww4PJgr6P3XDgXs5_B2d7ZVBR-BaAimYhVRIIShfpWk-4iN1XDk96TwloCkYx01BuN87o-VOvAnOG-K_EISA9RuEBpSkfUEuvHx8IU4VgfywdbhNMh6WXM0VP7ZzSpshgsS07MGjysGtZHNTVExEvFh14lyfjfqKjDoIJbo2msQwD2FvrTTb0iaQry1-Wwz9QJjVAUtid7tJuP8aBeNqvKyMIXRVnl5viFUr-Gs-Zl_WtyVvKlYWw0_rUn3ucmurK8tTy6iHyJ6XdUf4pYQebpEkIvi2rd__e_Z60V84MPvIYs6F_8CAy78aaYmUg5pihUEehIvGRj1RUZgdfaXElw90-m-M5hMOTI04LrzzVnBu7DcMYg4UC1W-WDrrj4zUq7y8_LczDA-yBC2-bkvWwLVtHLgV5yIEuIx2zAN74RQ4eCy1ffWDrVxYJBau4yiIyCc68dsatwHHH6bMK0uI9ib6Y9lsxCYjh-7MFcbP-4UBhgoDDXN9xoUToDLRqR9FTHqAHrGHp7BCdF5d6TQTVL5fmmg61MrLucOo-LZBXs1NY" +} +``` + +The `refresh_token` also expires at a set interval, but a longer interval. Once it expires it will no longer be accepted by HarperDB. This duration defaults to thirty days, and is configurable in [harperdb-config.yaml](../../deployments/configuration). To generate a new `operation_token` and a new `refresh_token` the `create_authentication_tokensoperation` is called. + +## Configuration + +Token timeouts are configurable in [harperdb-config.yaml](../../deployments/configuration) with the following parameters: + +* `operationsApi.authentication.operationTokenTimeout`: Defines the length of time until the operation\_token expires (default 1d). +* `operationsApi.authentication.refreshTokenTimeout`: Defines the length of time until the refresh\_token expires (default 30d). + +A full list of valid values for both parameters can be found [here](https:/github.com/vercel/ms). diff --git a/site/versioned_docs/version-4.3/developers/security/mtls-auth.md b/site/versioned_docs/version-4.3/developers/security/mtls-auth.md new file mode 100644 index 00000000..8c063693 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/security/mtls-auth.md @@ -0,0 +1,7 @@ +--- +title: mTLS Authentication +--- + +# mTLS Authentication + +HarperDB supports mTLS authentication for incoming connections. When enabled in the [HTTP config settings](../../deployments/configuration#http) the client certificate will be checked against the certificate authority specified with `tls.certificateAuthority`. If the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. The [HTTP config settings](../../deployments/configuration#http) allow you to determine if mTLS is required for all connections or optional. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/developers/security/users-and-roles.md b/site/versioned_docs/version-4.3/developers/security/users-and-roles.md new file mode 100644 index 00000000..d490edf0 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/security/users-and-roles.md @@ -0,0 +1,267 @@ +--- +title: Users & Roles +--- + +# Users & Roles + +HarperDB utilizes a Role-Based Access Control (RBAC) framework to manage access to HarperDB instances. A user is assigned a role that determines the user’s permissions to access database resources and run core operations. + +## Roles in HarperDB + +Role permissions in HarperDB are broken into two categories – permissions around database manipulation and permissions around database definition. + +**Database Manipulation**: A role defines CRUD (create, read, update, delete) permissions against database resources (i.e. data) in a HarperDB instance. + +1. At the table-level access, permissions must be explicitly defined when adding or altering a role – _i.e. HarperDB will assume CRUD access to be FALSE if not explicitly provided in the permissions JSON passed to the `add_role` and/or `alter_role` API operations._ +1. At the attribute-level, permissions for attributes in all tables included in the permissions set will be assigned based on either the specific attribute-level permissions defined in the table’s permission set or, if there are no attribute-level permissions defined, permissions will be based on the table’s CRUD set. + +**Database Definition**: Permissions related to managing databases, tables, roles, users, and other system settings and operations are restricted to the built-in `super_user` role. + +**Built-In Roles** + +There are three built-in roles within HarperDB. See full breakdown of operations restricted to only super\_user roles [here](./users-and-roles#role-based-operation-restrictions). + +* `super_user` - This role provides full access to all operations and methods within a HarperDB instance, this can be considered the admin role. + * This role provides full access to all Database Definition operations and the ability to run Database Manipulation operations across the entire database schema with no restrictions. +* `cluster_user` - This role is an internal system role type that is managed internally to allow clustered instances to communicate with one another. + * This role is an internally managed role to facilitate communication between clustered instances. +* `structure_user` - This role provides specific access for creation and deletion of data. + * When defining this role type you can either assign a value of true which will allow the role to create and drop databases & tables. Alternatively the role type can be assigned a string array. The values in this array are databases and allows the role to only create and drop tables in the designated databases. + +**User-Defined Roles** + +In addition to built-in roles, admins (i.e. users assigned to the super\_user role) can create customized roles for other users to interact with and manipulate the data within explicitly defined tables and attributes. + +* Unless the user-defined role is given `super_user` permissions, permissions must be defined explicitly within the request body JSON. +* Describe operations will return metadata for all databases, tables, and attributes that a user-defined role has CRUD permissions for. + +**Role Permissions** + +When creating a new, user-defined role in a HarperDB instance, you must provide a role name and the permissions to assign to that role. _Reminder, only super users can create and manage roles._ + +* `role` name used to easily identify the role assigned to individual users. + + _Roles can be altered/dropped based on the role name used in and returned from a successful `add_role` , `alter_role`, or `list_roles` operation._ +* `permissions` used to explicitly define CRUD access to existing table data. + +Example JSON for `add_role` request + +```json +{ + "operation":"add_role", + "role":"software_developer", + "permission":{ + "super_user":false, + "database_name":{ + "tables": { + "table_name1": { + "read":true, + "insert":true, + "update":true, + "delete":false, + "attribute_permissions":[ + { + "attribute_name":"attribute1", + "read":true, + "insert":true, + "update":true + } + ] + }, + "table_name2": { + "read":true, + "insert":true, + "update":true, + "delete":false, + "attribute_permissions":[] + } + } + } + } +} +``` + +**Setting Role Permissions** + +There are two parts to a permissions set: + +* `super_user` – boolean value indicating if role should be provided super\_user access. + + _If `super_user` is set to true, there should be no additional database-specific permissions values included since the role will have access to the entire database schema. If permissions are included in the body of the operation, they will be stored within HarperDB, but ignored, as super\_users have full access to the database._ +* `permissions`: Database tables that a role should have specific CRUD access to should be included in the final, database-specific `permissions` JSON. + + _For user-defined roles (i.e. non-super\_user roles, blank permissions will result in the user being restricted from accessing any of the database schema._ + +**Table Permissions JSON** + +Each table that a role should be given some level of CRUD permissions to must be included in the `tables` array for its database in the roles permissions JSON passed to the API (_see example above_). + +```json +{ + "table_name": { / the name of the table to define CRUD perms for + "read": boolean, / access to read from this table + "insert": boolean, / access to insert data to table + "update": boolean, / access to update data in table + "delete": boolean, / access to delete row data in table + "attribute_permissions": [ / permissions for specific table attributes + { + "attribute_name": "attribute_name", / attribute to assign permissions to + "read": boolean, / access to read this attribute from table + "insert": boolean, / access to insert this attribute into the table + "update": boolean / access to update this attribute in the table + } + ] +} +``` + +**Important Notes About Table Permissions** + +1. If a database and/or any of its tables are not included in the permissions JSON, the role will not have any CRUD access to the database and/or tables. +1. If a table-level CRUD permission is set to false, any attribute-level with that same CRUD permission set to true will return an error. + +**Important Notes About Attribute Permissions** + +1. If there are attribute-specific CRUD permissions that need to be enforced on a table, those need to be explicitly described in the `attribute_permissions` array. +1. If a non-hash attribute is given some level of CRUD access, that same access will be assigned to the table’s `hash_attribute` (also referred to as the `primary_key`), even if it is not explicitly defined in the permissions JSON. + + _See table\_name1’s permission set for an example of this – even though the table’s hash attribute is not specifically defined in the attribute\_permissions array, because the role has CRUD access to ‘attribute1’, the role will have the same access to the table’s hash attribute._ +1. If attribute-level permissions are set – _i.e. attribute\_permissions.length > 0_ – any table attribute not explicitly included will be assumed to have not CRUD access (with the exception of the `hash_attribute` described in #2). + + _See table\_name1’s permission set for an example of this – in this scenario, the role will have the ability to create, insert and update ‘attribute1’ and the table’s hash attribute but no other attributes on that table._ +1. If an `attribute_permissions` array is empty, the role’s access to a table’s attributes will be based on the table-level CRUD permissions. + + _See table\_name2’s permission set for an example of this._ +1. The `__createdtime__` and `__updatedtime__` attributes that HarperDB manages internally can have read perms set but, if set, all other attribute-level permissions will be ignored. +1. Please note that DELETE permissions are not included as a part of an individual attribute-level permission set. That is because it is not possible to delete individual attributes from a row, rows must be deleted in full. + * If a role needs the ability to delete rows from a table, that permission should be set on the table-level. + * The practical approach to deleting an individual attribute of a row would be to set that attribute to null via an update statement. + +## `Role-Based Operation Restrictions ` + +The table below includes all API operations available in HarperDB and indicates whether or not the operation is restricted to super\_user roles. + +_Keep in mind that non-super\_user roles will also be restricted within the operations they do have access to by the database-level CRUD permissions set for the roles._ + +| Databases and Tables | Restricted to Super\_Users | +|----------------------| :------------------------: | +| describe\_all | | +| describe\_database | | +| describe\_table | | +| create\_database | X | +| drop\_database | X | +| create\_table | X | +| drop\_table | X | +| create\_attribute | | +| drop\_attribute | X | + +| NoSQL Operations | Restricted to Super\_Users | +| ---------------------- | :------------------------: | +| insert | | +| update | | +| upsert | | +| delete | | +| search\_by\_hash | | +| search\_by\_value | | +| search\_by\_conditions | | + +| SQL Operations | Restricted to Super\_Users | +| -------------- | :------------------------: | +| select | | +| insert | | +| update | | +| delete | | + +| Bulk Operations | Restricted to Super\_Users | +| ---------------- | :------------------------: | +| csv\_data\_load | | +| csv\_file\_load | | +| csv\_url\_load | | +| import\_from\_s3 | | + +| Users and Roles | Restricted to Super\_Users | +| --------------- | :------------------------: | +| list\_roles | X | +| add\_role | X | +| alter\_role | X | +| drop\_role | X | +| list\_users | X | +| user\_info | | +| add\_user | X | +| alter\_user | X | +| drop\_user | X | + +| Clustering | Restricted to Super\_Users | +| ----------------------- | :------------------------: | +| cluster\_set\_routes | X | +| cluster\_get\_routes | X | +| cluster\_delete\_routes | X | +| add\_node | X | +| update\_node | X | +| cluster\_status | X | +| remove\_node | X | +| configure\_cluster | X | + +| Components | Restricted to Super\_Users | +| -------------------- | :------------------------: | +| get\_components | X | +| get\_component\_file | X | +| set\_component\_file | X | +| drop\_component | X | +| add\_component | X | +| package\_component | X | +| deploy\_component | X | + +| Custom Functions | Restricted to Super\_Users | +| ---------------------------------- | :------------------------: | +| custom\_functions\_status | X | +| get\_custom\_functions | X | +| get\_custom\_function | X | +| set\_custom\_function | X | +| drop\_custom\_function | X | +| add\_custom\_function\_project | X | +| drop\_custom\_function\_project | X | +| package\_custom\_function\_project | X | +| deploy\_custom\_function\_project | X | + +| Registration | Restricted to Super\_Users | +| ------------------ | :------------------------: | +| registration\_info | | +| get\_fingerprint | X | +| set\_license | X | + +| Jobs | Restricted to Super\_Users | +| ----------------------------- | :------------------------: | +| get\_job | | +| search\_jobs\_by\_start\_date | X | + +| Logs | Restricted to Super\_Users | +| --------------------------------- | :------------------------: | +| read\_log | X | +| read\_transaction\_log | X | +| delete\_transaction\_logs\_before | X | +| read\_audit\_log | X | +| delete\_audit\_logs\_before | X | + +| Utilities | Restricted to Super\_Users | +| ----------------------- | :------------------------: | +| delete\_records\_before | X | +| export\_local | X | +| export\_to\_s3 | X | +| system\_information | X | +| restart | X | +| restart\_service | X | +| get\_configuration | X | +| configure\_cluster | X | + +| Token Authentication | Restricted to Super\_Users | +| ------------------------------ | :------------------------: | +| create\_authentication\_tokens | | +| refresh\_operation\_token | | + +## Error: Must execute as User + +**You may have gotten an error like,** `Error: Must execute as <>`. + +This means that you installed HarperDB as `<>`. Because HarperDB stores files natively on the operating system, we only allow the HarperDB executable to be run by a single user. This prevents permissions issues on files. + +For example if you installed as user\_a, but later wanted to run as user\_b. User\_b may not have access to the hdb files HarperDB needs. This also keeps HarperDB more secure as it allows you to lock files down to a specific user and prevents other users from accessing your files. diff --git a/site/versioned_docs/version-4.3/developers/sql-guide/date-functions.md b/site/versioned_docs/version-4.3/developers/sql-guide/date-functions.md new file mode 100644 index 00000000..2ae9addf --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/sql-guide/date-functions.md @@ -0,0 +1,226 @@ +--- +title: SQL Date Functions +--- + +:::warning +HarperDB encourages developers to utilize other querying tools over SQL for performance purposes. HarperDB SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL Date Functions + +HarperDB utilizes [Coordinated Universal Time (UTC)](https:/en.wikipedia.org/wiki/Coordinated_Universal_Time) in all internal SQL operations. This means that date values passed into any of the functions below will be assumed to be in UTC or in a format that can be translated to UTC. + +When parsing date values passed to SQL date functions in HDB, we first check for [ISO 8601](https:/en.wikipedia.org/wiki/ISO_8601) formats, then for [RFC 2822](https:/tools.ietf.org/html/rfc2822#section-3.3) date-time format and then fall back to new Date(date_string)if a known format is not found. + +### CURRENT_DATE() + +Returns the current date in UTC in `YYYY-MM-DD` String format. + +``` +"SELECT CURRENT_DATE() AS current_date_result" returns + { + "current_date_result": "2020-04-22" + } +``` + +### CURRENT_TIME() + +Returns the current time in UTC in `HH:mm:ss.SSS` String format. + +``` +"SELECT CURRENT_TIME() AS current_time_result" returns + { + "current_time_result": "15:18:14.639" + } +``` + +### CURRENT_TIMESTAMP + +Referencing this variable will evaluate as the current Unix Timestamp in milliseconds. + +``` +"SELECT CURRENT_TIMESTAMP AS current_timestamp_result" returns + { + "current_timestamp_result": 1587568845765 + } +``` +### DATE([date_string]) + +Formats and returns the date_string argument in UTC in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. + +If a date_string is not provided, the function will return the current UTC date/time value in the return format defined above. + +``` +"SELECT DATE(1587568845765) AS date_result" returns + { + "date_result": "2020-04-22T15:20:45.765+0000" + } +``` + +``` +"SELECT DATE(CURRENT_TIMESTAMP) AS date_result2" returns + { + "date_result2": "2020-04-22T15:20:45.765+0000" + } +``` + +### DATE_ADD(date, value, interval) + +Adds the defined amount of time to the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted interval values: Either string value (key or shorthand) can be passed as the interval argument. + + +| Key | Shorthand | +|--------------|-----------| +| years | y | +| quarters | Q | +| months | M | +| weeks | w | +| days | d | +| hours | h | +| minutes | m | +| seconds | s | +| milliseconds | ms | + + +``` +"SELECT DATE_ADD(1587568845765, 1, 'days') AS date_add_result" AND +"SELECT DATE_ADD(1587568845765, 1, 'd') AS date_add_result" both return + { + "date_add_result": 1587655245765 + } +``` + +``` +"SELECT DATE_ADD(CURRENT_TIMESTAMP, 2, 'years') +AS date_add_result2" returns + { + "date_add_result2": 1650643129017 + } +``` + +### DATE_DIFF(date_1, date_2[, interval]) + +Returns the difference between the two date values passed based on the interval as a Number. If an interval is not provided, the function will return the difference value in milliseconds. + +Accepted interval values: +* years +* months +* weeks +* days +* hours +* minutes +* seconds + +``` +"SELECT DATE_DIFF(CURRENT_TIMESTAMP, 1650643129017, 'hours') +AS date_diff_result" returns + { + "date_diff_result": -17519.753333333334 + } +``` + +### DATE_FORMAT(date, format) + +Formats and returns a date value in the String format provided. Find more details on accepted format values in the [moment.js docs](https:/momentjs.com/docs/#/displaying/format/). + +``` +"SELECT DATE_FORMAT(1524412627973, 'YYYY-MM-DD HH:mm:ss') +AS date_format_result" returns + { + "date_format_result": "2018-04-22 15:57:07" + } +``` + +### DATE_SUB(date, value, interval) + +Subtracts the defined amount of time from the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted date_sub interval values- Either string value (key or shorthand) can be passed as the interval argument. + +| Key | Shorthand | +|--------------|-----------| +| years | y | +| quarters | Q | +| months | M | +| weeks | w | +| days | d | +| hours | h | +| minutes | m | +| seconds | s | +| milliseconds | ms | + + +``` +"SELECT DATE_SUB(1587568845765, 2, 'years') AS date_sub_result" returns + { + "date_sub_result": 1524410445765 + } +``` + +### EXTRACT(date, date_part) + +Extracts and returns the date_part requested as a String value. Accepted date_part values below show value returned for date = “2020-03-26T15:13:02.041+000” + +| date_part | Example return value* | +|--------------|------------------------| +| year | “2020” | +| month | “3” | +| day | “26” | + | hour | “15” | +| minute | “13” | +| second | “2” | +| millisecond | “41” | + +``` +"SELECT EXTRACT(1587568845765, 'year') AS extract_result" returns + { + "extract_result": "2020" + } +``` + +### GETDATE() + +Returns the current Unix Timestamp in milliseconds. + +``` +"SELECT GETDATE() AS getdate_result" returns + { + "getdate_result": 1587568845765 + } +``` + +### GET_SERVER_TIME() +Returns the current date/time value based on the server’s timezone in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. + +``` +"SELECT GET_SERVER_TIME() AS get_server_time_result" returns + { + "get_server_time_result": "2020-04-22T15:20:45.765+0000" + } +``` + +### OFFSET_UTC(date, offset) +Returns the UTC date time value with the offset provided included in the return String value formatted as `YYYY-MM-DDTHH:mm:ss.SSSZZ`. The offset argument will be added as minutes unless the value is less than 16 and greater than -16, in which case it will be treated as hours. + +``` +"SELECT OFFSET_UTC(1587568845765, 240) AS offset_utc_result" returns + { + "offset_utc_result": "2020-04-22T19:20:45.765+0400" + } +``` + +``` +"SELECT OFFSET_UTC(1587568845765, 10) AS offset_utc_result2" returns + { + "offset_utc_result2": "2020-04-23T01:20:45.765+1000" + } +``` + +### NOW() +Returns the current Unix Timestamp in milliseconds. + +``` +"SELECT NOW() AS now_result" returns + { + "now_result": 1587568845765 + } +``` + diff --git a/site/versioned_docs/version-4.3/developers/sql-guide/features-matrix.md b/site/versioned_docs/version-4.3/developers/sql-guide/features-matrix.md new file mode 100644 index 00000000..7856dbfd --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/sql-guide/features-matrix.md @@ -0,0 +1,87 @@ +--- +title: SQL Features Matrix +--- + +:::warning +HarperDB encourages developers to utilize other querying tools over SQL for performance purposes. HarperDB SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL Features Matrix + +HarperDB provides access to most SQL functions, and we’re always expanding that list. Check below to see if we cover what you need. If not, feel free to [add a Feature Request](https:/feedback.harperdb.io/). + + +| INSERT | | +|------------------------------------|-----| +| Values - multiple values supported | ✔ | +| Sub-SELECT | ✗ | + +| UPDATE | | +|-----------------|-----| +| SET | ✔ | +| Sub-SELECT | ✗ | +| Conditions | ✔ | +| Date Functions* | ✔ | +| Math Functions | ✔ | + +| DELETE | | +|------------|-----| +| FROM | ✔ | +| Sub-SELECT | ✗ | +| Conditions | ✔ | + +| SELECT | | +|-----------------------|-----| +| Column SELECT | ✔ | +| Aliases | ✔ | +| Aggregator Functions | ✔ | +| Date Functions* | ✔ | +| Math Functions | ✔ | +| Constant Values | ✔ | +| Distinct | ✔ | +| Sub-SELECT | ✗ | + +| FROM | | +|-------------------|-----| +| Multi-table JOIN | ✔ | +| INNER JOIN | ✔ | +| LEFT OUTER JOIN | ✔ | +| LEFT INNER JOIN | ✔ | +| RIGHT OUTER JOIN | ✔ | +| RIGHT INNER JOIN | ✔ | +| FULL JOIN | ✔ | +| UNION | ✗ | +| Sub-SELECT | ✗ | +| TOP | ✔ | + +| WHERE | | +|----------------------------|-----| +| Multi-Conditions | ✔ | +| Wildcards | ✔ | +| IN | ✔ | +| LIKE | ✔ | +| Bit-wise Operators AND, OR | ✔ | +| Bit-wise Operators NOT | ✔ | +| NULL | ✔ | +| BETWEEN | ✔ | +| EXISTS,ANY,ALL | ✔ | +| Compare columns | ✔ | +| Compare constants | ✔ | +| Date Functions* | ✔ | +| Math Functions | ✔ | +| Sub-SELECT | ✗ | + +| GROUP BY | | +|-----------------------|-----| +| Multi-Column GROUP BY | ✔ | + +| HAVING | | +|--------------------------------|-----| +| Aggregate function conditions | ✔ | + +| ORDER BY | | +|-----------------------|-----| +| Multi-Column ORDER BY | ✔ | +| Aliases | ✔ | +| Date Functions* | ✔ | +| Math Functions | ✔ | \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/developers/sql-guide/functions.md b/site/versioned_docs/version-4.3/developers/sql-guide/functions.md new file mode 100644 index 00000000..d7957037 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/sql-guide/functions.md @@ -0,0 +1,157 @@ +--- +title: HarperDB SQL Functions +--- + +:::warning +HarperDB encourages developers to utilize other querying tools over SQL for performance purposes. HarperDB SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# HarperDB SQL Functions + +This SQL keywords reference contains the SQL functions available in HarperDB. + +## Functions +### Aggregate + +| Keyword | Syntax | Description | +|-----------------|---------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------| +| AVG | AVG(_expression_) | Returns the average of a given numeric expression. | +| COUNT | SELECT COUNT(_column_name_) FROM _database.table_ WHERE _condition_ | Returns the number records that match the given criteria. Nulls are not counted. | +| GROUP_CONCAT | GROUP_CONCAT(_expression_) | Returns a string with concatenated values that are comma separated and that are non-null from a group. Will return null when there are non-null values. | +| MAX | SELECT MAX(_column_name_) FROM _database.table_ WHERE _condition_ | Returns largest value in a specified column. | +| MIN | SELECT MIN(_column_name_) FROM _database.table_ WHERE _condition_ | Returns smallest value in a specified column. | +| SUM | SUM(_column_name_) | Returns the sum of the numeric values provided. | +| ARRAY* | ARRAY(_expression_) | Returns a list of data as a field. | +| DISTINCT_ARRAY* | DISTINCT_ARRAY(_expression_) | When placed around a standard ARRAY() function, returns a distinct (deduplicated) results set. | + +*For more information on ARRAY() and DISTINCT_ARRAY() see [this blog](https:/www.harperdb.io/post/sql-queries-to-complex-objects). + +### Conversion + +| Keyword | Syntax | Description | +|---------|--------------------------------------------------|------------------------------------------------------------------------| +| CAST | CAST(_expression AS datatype(length)_) | Converts a value to a specified datatype. | +| CONVERT | CONVERT(_data_type(length), expression, style_) | Converts a value from one datatype to a different, specified datatype. | + + +### Date & Time + +| Keyword | Syntax | Description | +|-------------------|-----------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CURRENT_DATE | CURRENT_DATE() | Returns the current date in UTC in “YYYY-MM-DD” String format. | +| CURRENT_TIME | CURRENT_TIME() | Returns the current time in UTC in “HH:mm:ss.SSS” string format. | +| CURRENT_TIMESTAMP | CURRENT_TIMESTAMP | Referencing this variable will evaluate as the current Unix Timestamp in milliseconds. For more information, go here. | +| +| DATE | DATE([_date_string_]) | Formats and returns the date_string argument in UTC in ‘YYYY-MM-DDTHH:mm:ss.SSSZZ’ string format. If a date_string is not provided, the function will return the current UTC date/time value in the return format defined above. For more information, go here. | +| +| DATE_ADD | DATE_ADD(_date, value, interval_) | Adds the defined amount of time to the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted interval values: Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | +| +| DATE_DIFF | DATEDIFF(_date_1, date_2[, interval]_) | Returns the difference between the two date values passed based on the interval as a Number. If an interval is not provided, the function will return the difference value in milliseconds. For more information, go here. | +| +| DATE_FORMAT | DATE_FORMAT(_date, format_) | Formats and returns a date value in the String format provided. Find more details on accepted format values in the moment.js docs. For more information, go here. | +| +| DATE_SUB | DATE_SUB(_date, format_) | Subtracts the defined amount of time from the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted date_sub interval values- Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | +| +| DAY | DAY(_date_) | Return the day of the month for the given date. | +| +| DAYOFWEEK | DAYOFWEEK(_date_) | Returns the numeric value of the weekday of the date given(“YYYY-MM-DD”).NOTE: 0=Sunday, 1=Monday, 2=Tuesday, 3=Wednesday, 4=Thursday, 5=Friday, and 6=Saturday. | +| EXTRACT | EXTRACT(_date, date_part_) | Extracts and returns the date_part requested as a String value. Accepted date_part values below show value returned for date = “2020-03-26T15:13:02.041+000” For more information, go here. | +| +| GETDATE | GETDATE() | Returns the current Unix Timestamp in milliseconds. | +| GET_SERVER_TIME | GET_SERVER_TIME() | Returns the current date/time value based on the server’s timezone in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. | +| OFFSET_UTC | OFFSET_UTC(_date, offset_) | Returns the UTC date time value with the offset provided included in the return String value formatted as `YYYY-MM-DDTHH:mm:ss.SSSZZ`. The offset argument will be added as minutes unless the value is less than 16 and greater than -16, in which case it will be treated as hours. | +| NOW | NOW() | Returns the current Unix Timestamp in milliseconds. | +| +| HOUR | HOUR(_datetime_) | Returns the hour part of a given date in range of 0 to 838. | +| +| MINUTE | MINUTE(_datetime_) | Returns the minute part of a time/datetime in range of 0 to 59. | +| +| MONTH | MONTH(_date_) | Returns month part for a specified date in range of 1 to 12. | +| +| SECOND | SECOND(_datetime_) | Returns the seconds part of a time/datetime in range of 0 to 59. | +| YEAR | YEAR(_date_) | Returns the year part for a specified date. | +| + +### Logical + +| Keyword | Syntax | Description | +|---------|--------------------------------------------------|--------------------------------------------------------------------------------------------| +| IF | IF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | +| IIF | IIF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | +| IFNULL | IFNULL(_expression, alt_value_) | Returns a specified value if the expression is null. | +| NULLIF | NULLIF(_expression_1, expression_2_) | Returns null if expression_1 is equal to expression_2, if not equal, returns expression_1. | + +### Mathematical + +| Keyword | Syntax | Description | +|---------|---------------------------------|-----------------------------------------------------------------------------------------------------| +| ABS | ABS(_expression_) | Returns the absolute value of a given numeric expression. | +| CEIL | CEIL(_number_) | Returns integer ceiling, the smallest integer value that is bigger than or equal to a given number. | +| EXP | EXP(_number_) | Returns e to the power of a specified number. | +| FLOOR | FLOOR(_number_) | Returns the largest integer value that is smaller than, or equal to, a given number. | +| RANDOM | RANDOM(_seed_) | Returns a pseudo random number. | +| ROUND | ROUND(_number,decimal_places_) | Rounds a given number to a specified number of decimal places. | +| SQRT | SQRT(_expression_) | Returns the square root of an expression. | + + +### String + +| Keyword | Syntax | Description | +|-------------|------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CONCAT | CONCAT(_string_1, string_2, ...., string_n_) | Concatenates, or joins, two or more strings together, resulting in a single string. | +| CONCAT_WS | CONCAT_WS(_separator, string_1, string_2, ...., string_n_) | Concatenates, or joins, two or more strings together with a separator, resulting in a single string. | +| INSTR | INSTR(_string_1, string_2_) | Returns the first position, as an integer, of string_2 within string_1. | +| LEN | LEN(_string_) | Returns the length of a string. | +| LOWER | LOWER(_string_) | Converts a string to lower-case. | +| REGEXP | SELECT _column_name_ FROM _database.table_ WHERE _column_name_ REGEXP _pattern_ | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | +| REGEXP_LIKE | SELECT _column_name_ FROM _database.table_ WHERE REGEXP_LIKE(_column_name, pattern_) | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | +| REPLACE | REPLACE(_string, old_string, new_string_) | Replaces all instances of old_string within new_string, with string. | +| SUBSTRING | SUBSTRING(_string, string_position, length_of_substring_) | Extracts a specified amount of characters from a string. | +| TRIM | TRIM([_character(s) FROM_] _string_) | Removes leading and trailing spaces, or specified character(s), from a string. | +| UPPER | UPPER(_string_) | Converts a string to upper-case. | + +## Operators +### Logical Operators + +| Keyword | Syntax | Description | +|----------|--------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------| +| BETWEEN | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ BETWEEN _value_1_ AND _value_2_ | (inclusive) Returns values(numbers, text, or dates) within a given range. | +| IN | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IN(_value(s)_) | Used to specify multiple values in a WHERE clause. | +| LIKE | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_n_ LIKE _pattern_ | Searches for a specified pattern within a WHERE clause. | + +## Queries +### General + +| Keyword | Syntax | Description | +|-----------|--------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------| +| DISTINCT | SELECT DISTINCT _column_name(s)_ FROM _database.table_ | Returns only unique values, eliminating duplicate records. | +| FROM | FROM _database.table_ | Used to list the database(s), table(s), and any joins required for a SQL statement. | +| GROUP BY | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ GROUP BY _column_name(s)_ ORDER BY _column_name(s)_ | Groups rows that have the same values into summary rows. | +| HAVING | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ GROUP BY _column_name(s)_ HAVING _condition_ ORDER BY _column_name(s)_ | Filters data based on a group or aggregate function. | +| SELECT | SELECT _column_name(s)_ FROM _database.table_ | Selects data from table. | +| WHERE | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ | Extracts records based on a defined condition. | + +### Joins + +| Keyword | Syntax | Description | +|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CROSS JOIN | SELECT _column_name(s)_ FROM _database.table_1_ CROSS JOIN _database.table_2_ | Returns a paired combination of each row from _table_1_ with row from _table_2_. _Note: CROSS JOIN can return very large result sets and is generally considered bad practice._ | +| FULL OUTER | SELECT _column_name(s)_ FROM _database.table_1_ FULL OUTER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ WHERE _condition_ | Returns all records when there is a match in either _table_1_ (left table) or _table_2_ (right table). | +| [INNER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ INNER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return only matching records from _table_1_ (left table) and _table_2_ (right table). The INNER keyword is optional and does not affect the result. | +| LEFT [OUTER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ LEFT OUTER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return all records from _table_1_ (left table) and matching data from _table_2_ (right table). The OUTER keyword is optional and does not affect the result. | +| RIGHT [OUTER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ RIGHT OUTER JOIN _database.table_2_ ON _table_1.column_name = table_2.column_name_ | Return all records from _table_2_ (right table) and matching data from _table_1_ (left table). The OUTER keyword is optional and does not affect the result. | + +### Predicates + +| Keyword | Syntax | Description | +|--------------|------------------------------------------------------------------------------|----------------------------| +| IS NOT NULL | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IS NOT NULL | Tests for non-null values. | +| IS NULL | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IS NULL | Tests for null values. | + +### Statements + +| Keyword | Syntax | Description | +|---------|---------------------------------------------------------------------------------------------|-------------------------------------| +| DELETE | DELETE FROM _database.table_ WHERE condition | Deletes existing data from a table. | +| INSERT | INSERT INTO _database.table(column_name(s))_ VALUES(_value(s)_) | Inserts new records into a table. | +| UPDATE | UPDATE _database.table_ SET _column_1 = value_1, column_2 = value_2, ....,_ WHERE _condition_ | Alters existing records in a table. | diff --git a/site/versioned_docs/version-4.3/developers/sql-guide/index.md b/site/versioned_docs/version-4.3/developers/sql-guide/index.md new file mode 100644 index 00000000..ae274bd3 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/sql-guide/index.md @@ -0,0 +1,88 @@ +--- +title: SQL Guide +--- + +# SQL Guide + +:::warning +HarperDB encourages developers to utilize other querying tools over SQL for performance purposes. HarperDB SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +## HarperDB SQL Guide + +The purpose of this guide is to describe the available functionality of HarperDB as it relates to supported SQL functionality. The SQL parser is still actively being developed, many SQL features may not be optimized or utilize indexes. This document will be updated as more features and functionality becomes available. Generally, the REST interface provides a more stable, secure, and performant interface for data interaction, but the SQL functionality can be useful for administrative ad-hoc querying, and utilizing existing SQL statements. **A high-level view of supported features can be found** [**here**](./features-matrix)**.** + +HarperDB adheres to the concept of database & tables. This allows developers to isolate table structures from each other all within one database. + +## Select + +HarperDB has robust SELECT support, from simple queries all the way to complex joins with multi-conditions, aggregates, grouping & ordering. + +All results are returned as JSON object arrays. + +Query for all records and attributes in the dev.dog table: + +``` +SELECT * FROM dev.dog +``` + +Query specific columns from all rows in the dev.dog table: + +``` +SELECT id, dog_name, age FROM dev.dog +``` + +Query for all records and attributes in the dev.dog table ORDERED BY age in ASC order: + +``` +SELECT * FROM dev.dog ORDER BY age +``` + +_The ORDER BY keyword sorts in ascending order by default. To sort in descending order, use the DESC keyword._ + +## Insert + +HarperDB supports inserting 1 to n records into a table. The primary key must be unique (not used by any other record). If no primary key is provided, it will be assigned an auto-generated UUID. HarperDB does not support selecting from one table to insert into another at this time. + +``` +INSERT INTO dev.dog (id, dog_name, age, breed_id) + VALUES(1, 'Penny', 5, 347), (2, 'Kato', 4, 347) +``` + +## Update + +HarperDB supports updating existing table row(s) via UPDATE statements. Multiple conditions can be applied to filter the row(s) to update. At this time selecting from one table to update another is not supported. + +``` +UPDATE dev.dog + SET owner_name = 'Kyle' + WHERE id IN (1, 2) +``` + +## Delete + +HarperDB supports deleting records from a table with condition support. + +``` +DELETE FROM dev.dog + WHERE age < 4 +``` + +## Joins + +HarperDB allows developers to join any number of tables and currently supports the following join types: + +* INNER JOIN LEFT +* INNER JOIN LEFT +* OUTER JOIN + +Here’s a basic example joining two tables from our Get Started example- joining a dogs table with a breeds table: + +``` +SELECT d.id, d.dog_name, d.owner_name, b.name, b.section + FROM dev.dog AS d + INNER JOIN dev.breed AS b ON d.breed_id = b.id + WHERE d.owner_name IN ('Kyle', 'Zach', 'Stephen') + AND b.section = 'Mutt' + ORDER BY d.dog_name +``` diff --git a/site/versioned_docs/version-4.3/developers/sql-guide/json-search.md b/site/versioned_docs/version-4.3/developers/sql-guide/json-search.md new file mode 100644 index 00000000..86010c5c --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/sql-guide/json-search.md @@ -0,0 +1,177 @@ +--- +title: SQL JSON Search +--- + +:::warning +HarperDB encourages developers to utilize other querying tools over SQL for performance purposes. HarperDB SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL JSON Search + +HarperDB automatically indexes all top level attributes in a row / object written to a table. However, any attributes which hold JSON data do not have their nested attributes indexed. In order to make searching and/or transforming these JSON documents easy, HarperDB offers a special SQL function called SEARCH\_JSON. The SEARCH\_JSON function works in SELECT & WHERE clauses allowing queries to perform powerful filtering on any element of your JSON by implementing the [JSONata library](http:/docs.jsonata.org/overview.html) into our SQL engine. + +## Syntax + +SEARCH\_JSON(_expression, attribute_) + +Executes the supplied string _expression_ against data of the defined top level _attribute_ for each row. The expression both filters and defines output from the JSON document. + +### Example 1 + +#### Search a string array + +Here are two records in the database: + +```json +[ + { + "id": 1, + "name": ["Harper", "Penny"] + }, + { + "id": 2, + "name": ["Penny"] + } +] +``` + +Here is a simple query that gets any record with "Harper" found in the name. + +``` +SELECT * +FROM dev.dog +WHERE search_json('"Harper" in *', name) +``` + +### Example 2 + +The purpose of this query is to give us every movie where at least two of our favorite actors from Marvel films have acted together. The results will return the movie title, the overview, release date and an object array of the actor’s name and their character name in the movie. + +Both function calls evaluate the credits.cast attribute, this attribute is an object array of every cast member in a movie. + +``` +SELECT m.title, + m.overview, + m.release_date, + SEARCH_JSON($[name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"]].{"actor": name, "character": character}, c.`cast`) AS characters +FROM movies.credits c + INNER JOIN movies.movie m + ON c.movie_id = m.id +WHERE SEARCH_JSON($count($[name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"]]), c.`cast`) >= 2 +``` + +A sample of this data from the movie The Avengers looks like + +```json +[ + { + "cast_id": 46, + "character": "Tony Stark / Iron Man", + "credit_id": "52fe4495c3a368484e02b251", + "gender": "male", + "id": 3223, + "name": "Robert Downey Jr.", + "order": 0 + }, + { + "cast_id": 2, + "character": "Steve Rogers / Captain America", + "credit_id": "52fe4495c3a368484e02b19b", + "gender": "male", + "id": 16828, + "name": "Chris Evans", + "order": 1 + }, + { + "cast_id": 307, + "character": "Bruce Banner / The Hulk", + "credit_id": "5e85e8083344c60015411cfa", + "gender": "male", + "id": 103, + "name": "Mark Ruffalo", + "order": 2 + } +] +``` + +Let’s break down the SEARCH\_JSON function call in the SELECT: + +``` +SEARCH_JSON( + $[name in [ + "Robert Downey Jr.", + "Chris Evans", + "Scarlett Johansson", + "Mark Ruffalo", + "Chris Hemsworth", + "Jeremy Renner", + "Clark Gregg", + "Samuel L. Jackson", + "Gwyneth Paltrow", + "Don Cheadle" + ]].{ + "actor": name, + "character": character + }, + c.`cast` +) +``` + +The first argument passed to SEARCH\_JSON is the expression to execute against the second argument which is the cast attribute on the credits table. This expression will execute for every row. Looking into the expression it starts with “$\[…]” this tells the expression to iterate all elements of the cast array. + +Then the expression tells the function to only return entries where the name attribute matches any of the actors defined in the array: + +``` +name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"] +``` + +So far, we’ve iterated the array and filtered out rows, but we also want the results formatted in a specific way, so we’ve chained an expression on our filter with: `{“actor”: name, “character”: character}`. This tells the function to create a specific object for each matching entry. + +**Sample Result** + +```json +[ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + } +] +``` + +Just having the SEARCH\_JSON function in our SELECT is powerful, but given our criteria it would still return every other movie that doesn’t have our matching actors, in order to filter out the movies we do not want we also use SEARCH\_JSON in the WHERE clause. + +This function call in the WHERE clause is similar, but we don’t need to perform the same transformation as occurred in the SELECT: + +``` +SEARCH_JSON( + $count( + $[name in [ + "Robert Downey Jr.", + "Chris Evans", + "Scarlett Johansson", + "Mark Ruffalo", + "Chris Hemsworth", + "Jeremy Renner", + "Clark Gregg", + "Samuel L. Jackson", + "Gwyneth Paltrow", + "Don Cheadle" + ]] + ), + c.`cast` +) >= 2 +``` + +As seen above we execute the same name filter against the cast array, the primary difference is we are wrapping the filtered results in $count(…). As it looks this returns a count of the results back which we then use against our SQL comparator of >= 2. + +To see further SEARCH\_JSON examples in action view our Postman Collection that provides a [sample database & data with query examples](../operations-api/advanced-json-sql-examples). + +To learn more about how to build expressions check out the JSONata documentation: [http:/docs.jsonata.org/overview](http:/docs.jsonata.org/overview) diff --git a/site/versioned_docs/version-4.3/developers/sql-guide/reserved-word.md b/site/versioned_docs/version-4.3/developers/sql-guide/reserved-word.md new file mode 100644 index 00000000..3794a7ae --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/sql-guide/reserved-word.md @@ -0,0 +1,207 @@ +--- +title: HarperDB SQL Reserved Words +--- + +:::warning +HarperDB encourages developers to utilize other querying tools over SQL for performance purposes. HarperDB SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# HarperDB SQL Reserved Words + +This is a list of reserved words in the SQL Parser. Use of these words or symbols may result in unexpected behavior or inaccessible tables/attributes. If any of these words must be used, any SQL call referencing a database, table, or attribute must have backticks (`…`) or brackets ([…]) around the variable. + +For Example, for a table called `ASSERT` in the `data` database, a SQL select on that table would look like: + +``` +SELECT * from data.`ASSERT` +``` + +Alternatively: + +``` +SELECT * from data.[ASSERT] +``` + +### RESERVED WORD LIST + +* ABSOLUTE +* ACTION +* ADD +* AGGR +* ALL +* ALTER +* AND +* ANTI +* ANY +* APPLY +* ARRAY +* AS +* ASSERT +* ASC +* ATTACH +* AUTOINCREMENT +* AUTO_INCREMENT +* AVG +* BEGIN +* BETWEEN +* BREAK +* BY +* CALL +* CASE +* CAST +* CHECK +* CLASS +* CLOSE +* COLLATE +* COLUMN +* COLUMNS +* COMMIT +* CONSTRAINT +* CONTENT +* CONTINUE +* CONVERT +* CORRESPONDING +* COUNT +* CREATE +* CROSS +* CUBE +* CURRENT_TIMESTAMP +* CURSOR +* DATABASE +* DECLARE +* DEFAULT +* DELETE +* DELETED +* DESC +* DETACH +* DISTINCT +* DOUBLEPRECISION +* DROP +* ECHO +* EDGE +* END +* ENUM +* ELSE +* EXCEPT +* EXISTS +* EXPLAIN +* FALSE +* FETCH +* FIRST +* FOREIGN +* FROM +* GO +* GRAPH +* GROUP +* GROUPING +* HAVING +* HDB_HASH +* HELP +* IF +* IDENTITY +* IS +* IN +* INDEX +* INNER +* INSERT +* INSERTED +* INTERSECT +* INTO +* JOIN +* KEY +* LAST +* LET +* LEFT +* LIKE +* LIMIT +* LOOP +* MATCHED +* MATRIX +* MAX +* MERGE +* MIN +* MINUS +* MODIFY +* NATURAL +* NEXT +* NEW +* NOCASE +* NO +* NOT +* NULL +* OFF +* ON +* ONLY +* OFFSET +* OPEN +* OPTION +* OR +* ORDER +* OUTER +* OVER +* PATH +* PARTITION +* PERCENT +* PLAN +* PRIMARY +* PRINT +* PRIOR +* QUERY +* READ +* RECORDSET +* REDUCE +* REFERENCES +* RELATIVE +* REPLACE +* REMOVE +* RENAME +* REQUIRE +* RESTORE +* RETURN +* RETURNS +* RIGHT +* ROLLBACK +* ROLLUP +* ROW +* SCHEMA +* SCHEMAS +* SEARCH +* SELECT +* SEMI +* SET +* SETS +* SHOW +* SOME +* SOURCE +* STRATEGY +* STORE +* SYSTEM +* SUM +* TABLE +* TABLES +* TARGET +* TEMP +* TEMPORARY +* TEXTSTRING +* THEN +* TIMEOUT +* TO +* TOP +* TRAN +* TRANSACTION +* TRIGGER +* TRUE +* TRUNCATE +* UNION +* UNIQUE +* UPDATE +* USE +* USING +* VALUE +* VERTEX +* VIEW +* WHEN +* WHERE +* WHILE +* WITH +* WORK diff --git a/site/versioned_docs/version-4.3/developers/sql-guide/sql-geospatial-functions.md b/site/versioned_docs/version-4.3/developers/sql-guide/sql-geospatial-functions.md new file mode 100644 index 00000000..df398174 --- /dev/null +++ b/site/versioned_docs/version-4.3/developers/sql-guide/sql-geospatial-functions.md @@ -0,0 +1,384 @@ +--- +title: SQL Geospatial Functions +--- + +:::warning +HarperDB encourages developers to utilize other querying tools over SQL for performance purposes. HarperDB SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL Geospatial Functions + +HarperDB geospatial features require data to be stored in a single column using the [GeoJSON standard](http:/geojson.org/), a standard commonly used in geospatial technologies. Geospatial functions are available to be used in SQL statements. + + + +If you are new to GeoJSON you should check out the full specification here: http:/geojson.org/. There are a few important things to point out before getting started. + + + +1) All GeoJSON coordinates are stored in `[longitude, latitude]` format. +2) Coordinates or GeoJSON geometries must be passed as string when written directly in a SQL statement. +3) Note if you are using Postman for you testing. Due to limitations in the Postman client, you will need to escape quotes in your strings and your SQL will need to be passed on a single line. + + +In the examples contained in the left-hand navigation, database and table names may change, but all GeoJSON data will be stored in a column named geo_data. + +# geoArea + +The geoArea() function returns the area of one or more features in square meters. + +### Syntax +geoArea(_geoJSON_) + +### Parameters +| Parameter | Description | +|-----------|---------------------------------| +| geoJSON | Required. One or more features. | + +#### Example 1 +Calculate the area, in square meters, of a manually passed GeoJSON polygon. + +``` +SELECT geoArea('{ + "type":"Feature", + "geometry":{ + "type":"Polygon", + "coordinates":[[ + [0,0], + [0.123456,0], + [0.123456,0.123456], + [0,0.123456] + ]] + } +}') +``` + +#### Example 2 +Find all records that have an area less than 1 square mile (or 2589988 square meters). + +``` +SELECT * FROM dev.locations +WHERE geoArea(geo_data) < 2589988 +``` + +# geoLength +Takes a GeoJSON and measures its length in the specified units (default is kilometers). + +## Syntax +geoLength(_geoJSON_[_, units_]) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------| +| geoJSON | Required. GeoJSON to measure. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 +Calculate the length, in kilometers, of a manually passed GeoJSON linestring. + +``` +SELECT geoLength('{ + "type": "Feature", + "geometry": { + "type": "LineString", + "coordinates": [ + [-104.97963309288025,39.76163265441438], + [-104.9823260307312,39.76365323407955], + [-104.99193906784058,39.75616442110704] + ] + } +}') +``` + +### Example 2 +Find all data plus the calculated length in miles of the GeoJSON, restrict the response to only lengths less than 5 miles, and return the data in order of lengths smallest to largest. + +``` +SELECT *, geoLength(geo_data, 'miles') as length +FROM dev.locations +WHERE geoLength(geo_data, 'miles') < 5 +ORDER BY length ASC +``` +# geoDifference +Returns a new polygon with the difference of the second polygon clipped from the first polygon. + +## Syntax +geoDifference(_polygon1, polygon2_) + +## Parameters +| Parameter | Description | +|------------|----------------------------------------------------------------------------| +| polygon1 | Required. Polygon or MultiPolygon GeoJSON feature. | +| polygon2 | Required. Polygon or MultiPolygon GeoJSON feature to remove from polygon1. | + +### Example +Return a GeoJSON Polygon that removes City Park (_polygon2_) from Colorado (_polygon1_). + +``` +SELECT geoDifference('{ + "type": "Feature", + "properties": { + "name":"Colorado" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-109.072265625,37.00255267215955], + [-102.01904296874999,37.00255267215955], + [-102.01904296874999,41.0130657870063], + [-109.072265625,41.0130657870063], + [-109.072265625,37.00255267215955] + ]] + } + }', + '{ + "type": "Feature", + "properties": { + "name":"City Park" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.95973110198975,39.7543828214657], + [-104.95955944061278,39.744781185675386], + [-104.95904445648193,39.74422022399989], + [-104.95835781097412,39.74402223643582], + [-104.94097709655762,39.74392324244047], + [-104.9408483505249,39.75434982844515], + [-104.95973110198975,39.7543828214657] + ]] + } + }' +) +``` + +# geoDistance +Calculates the distance between two points in units (default is kilometers). + +## Syntax +geoDistance(_point1, point2_[_, units_]) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------| +| point1 | Required. GeoJSON Point specifying the origin. | +| point2 | Required. GeoJSON Point specifying the destination. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 +Calculate the distance, in miles, between HarperDB’s headquarters and the Washington Monument. + +``` +SELECT geoDistance('[-104.979127,39.761563]', '[-77.035248,38.889475]', 'miles') +``` + +### Example 2 +Find all locations that are within 40 kilometers of a given point, return that distance in miles, and sort by distance in an ascending order. + +``` +SELECT *, geoDistance('[-104.979127,39.761563]', geo_data, 'miles') as distance +FROM dev.locations +WHERE geoDistance('[-104.979127,39.761563]', geo_data, 'kilometers') < 40 +ORDER BY distance ASC +``` + +# geoNear +Determines if point1 and point2 are within a specified distance from each other, default units are kilometers. Returns a Boolean. + +## Syntax +geoNear(_point1, point2, distance_[_, units_]) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------| +| point1 | Required. GeoJSON Point specifying the origin. | +| point2 | Required. GeoJSON Point specifying the destination. | +| distance | Required. The maximum distance in units as an integer or decimal. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 +Return all locations within 50 miles of a given point. + +``` +SELECT * +FROM dev.locations +WHERE geoNear('[-104.979127,39.761563]', geo_data, 50, 'miles') +``` + +### Example 2 +Return all locations within 2 degrees of the earth of a given point. (Each degree lat/long is about 69 miles [111 kilometers]). Return all data and the distance in miles, sorted by ascending distance. + +``` +SELECT *, geoDistance('[-104.979127,39.761563]', geo_data, 'miles') as distance +FROM dev.locations +WHERE geoNear('[-104.979127,39.761563]', geo_data, 2, 'degrees') +ORDER BY distance ASC +``` + +# geoContains +Determines if geo2 is completely contained by geo1. Returns a Boolean. + +## Syntax +geoContains(_geo1, geo2_) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------| +| geo1 | Required. Polygon or MultiPolygon GeoJSON feature. | +| geo2 | Required. Polygon or MultiPolygon GeoJSON feature tested to be contained by geo1. | + +### Example 1 +Return all locations within the state of Colorado (passed as a GeoJSON string). + +``` +SELECT * +FROM dev.locations +WHERE geoContains('{ + "type": "Feature", + "properties": { + "name":"Colorado" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-109.072265625,37.00255267], + [-102.01904296874999,37.00255267], + [-102.01904296874999,41.01306579], + [-109.072265625,41.01306579], + [-109.072265625,37.00255267] + ]] + } +}', geo_data) +``` + +### Example 2 +Return all locations which contain HarperDB Headquarters. + +``` +SELECT * +FROM dev.locations +WHERE geoContains(geo_data, '{ + "type": "Feature", + "properties": { + "name": "HarperDB Headquarters" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.98060941696167,39.760704817357905], + [-104.98053967952728,39.76065120861263], + [-104.98055577278137,39.760642961109674], + [-104.98037070035934,39.76049450588716], + [-104.9802714586258,39.76056254790385], + [-104.9805235862732,39.76076461167841], + [-104.98060941696167,39.760704817357905] + ]] + } +}') +``` + +# geoEqual +Determines if two GeoJSON features are the same type and have identical X,Y coordinate values. For more information see https:/developers.arcgis.com/documentation/spatial-references/. Returns a Boolean. + +## Syntax +geoEqual(_geo1_, _geo2_) + +## Parameters +| Parameter | Description | +|------------|----------------------------------------| +| geo1 | Required. GeoJSON geometry or feature. | +| geo2 | Required. GeoJSON geometry or feature. | + +### Example +Find HarperDB Headquarters within all locations within the database. + +``` +SELECT * +FROM dev.locations +WHERE geoEqual(geo_data, '{ + "type": "Feature", + "properties": { + "name": "HarperDB Headquarters" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.98060941696167,39.760704817357905], + [-104.98053967952728,39.76065120861263], + [-104.98055577278137,39.760642961109674], + [-104.98037070035934,39.76049450588716], + [-104.9802714586258,39.76056254790385], + [-104.9805235862732,39.76076461167841], + [-104.98060941696167,39.760704817357905] + ]] + } +}') +``` + +# geoCrosses +Determines if the geometries cross over each other. Returns boolean. + +## Syntax +geoCrosses(_geo1, geo2_) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------| +| geo1 | Required. GeoJSON geometry or feature. | +| geo2 | Required. GeoJSON geometry or feature. | + +### Example +Find all locations that cross over a highway. + +``` +SELECT * +FROM dev.locations +WHERE geoCrosses( + geo_data, + '{ + "type": "Feature", + "properties": { + "name": "Highway I-25" + }, + "geometry": { + "type": "LineString", + "coordinates": [ + [-104.9139404296875,41.00477542222947], + [-105.0238037109375,39.715638134796336], + [-104.853515625,39.53370327008705], + [-104.853515625,38.81403111409755], + [-104.61181640625,38.39764411353178], + [-104.8974609375,37.68382032669382], + [-104.501953125,37.00255267215955] + ] + } + }' +) +``` + +# geoConvert + +Converts a series of coordinates into a GeoJSON of the specified type. + +## Syntax +geoConvert(_coordinates, geo_type_[, _properties_]) + +## Parameters +| Parameter | Description | +|--------------|------------------------------------------------------------------------------------------------------------------------------------| +| coordinates | Required. One or more coordinates | +| geo_type | Required. GeoJSON geometry type. Options are ‘point’, ‘lineString’, ‘multiLineString’, ‘multiPoint’, ‘multiPolygon’, and ‘polygon’ | +| properties | Optional. Escaped JSON array with properties to be added to the GeoJSON output. | + +### Example +Convert a given coordinate into a GeoJSON point with specified properties. + +``` +SELECT geoConvert( + '[-104.979127,39.761563]', + 'point', + '{ + "name": "HarperDB Headquarters" + }' +) +``` diff --git a/site/versioned_docs/version-4.3/getting-started.md b/site/versioned_docs/version-4.3/getting-started.md new file mode 100644 index 00000000..fa4edb5d --- /dev/null +++ b/site/versioned_docs/version-4.3/getting-started.md @@ -0,0 +1,84 @@ +--- +title: Getting Started +--- + +# Getting Started + +HarperDB is designed for quick and simple setup and deployment, with smart defaults that lead to fast, scalable, and globally distributed database applications. + +You can easily create a HarperDB database in the cloud through our studio or install it locally. The quickest way to get HarperDB up and running is with [HarperDB Cloud](./deployments/harperdb-cloud/), our database-as-a-service offering. However, HarperDB is a [database application platform](./developers/applications/), and to leverage HarperDB’s full application development capabilities of defining schemas, endpoints, messaging, and gateway capabilities, you may wish to install and run HarperDB locally so that you can use your standard local IDE tools, debugging, and version control. + +### Installing a HarperDB Instance + +You can simply install HarperDB with npm (or yarn, or other package managers): + +```shell +npm install -g harperdb +``` + +Here we installed HarperDB globally (and we recommend this) to make it easy to run a single HarperDB instance with multiple projects, but you can install it locally (not globally) as well. + +You can run HarperDB by running: + +```javascript +harperdb +``` + +You can now use HarperDB as a standalone database. You can also create a cloud instance (see below), which is also an easy way to get started. + +#### Developing Database Applications with HarperDB + +HarperDB is more than just a database, with HarperDB you build "database applications" which package your schema, endpoints, and application logic together. You can then deploy your application to an entire cluster of HarperDB instances, ready to scale to on-the-edge delivery of data and application endpoints directly to your users. To get started with HarperDB, take a look at our application development guide, with quick and easy examples: + +[Database application development guide](./developers/applications/) + +### Setting up a Cloud Instance + +To set up a HarperDB cloud instance, simply sign up and create a new instance: + +1. [Sign up for the HarperDB Studio](https:/studio.harperdb.io/sign-up) +1. [Create a new HarperDB Cloud instance](./administration/harperdb-studio/instances#create-a-new-instance) + +Note that a local instance and cloud instance are not mutually exclusive. You can register your local instance in the HarperDB Studio, and a common development flow is to develop locally and then deploy your application to your cloud instance. + +HarperDB Cloud instance provisioning typically takes 5-15 minutes. You will receive an email notification when your instance is ready. + +#### Using the HarperDB Studio + +Now that you have a HarperDB instance, if you want to use HarperDB as a standalone database, you can fully administer and interact with our database through the Studio. This section links to appropriate articles to get you started interacting with your data. + +1. [Create a database](./administration/harperdb-studio/manage-databases-browse-data#create-a-database) +1. [Create a table](./administration/harperdb-studio/manage-databases-browse-data#create-a-table) +1. [Add a record](./administration/harperdb-studio/manage-databases-browse-data#add-a-record) +1. [Load CSV data](./administration/harperdb-studio/manage-databases-browse-data#load-csv-data) (Here’s a sample CSV of the HarperDB team’s dogs) +1. [Query data via SQL](./administration/harperdb-studio/query-instance-data) + +## Administering HarperDB + +If you are deploying and administering HarperDB, you may want to look at our [configuration documentation](./deployments/configuration) and our administrative operations API below. + +### HarperDB APIs + +The preferred way to interact with HarperDB for typical querying, accessing, and updating data (CRUD) operations is through the REST interface, described in the [REST documentation](./developers/rest). + +The Operations API provides extensive administrative capabilities for HarperDB, and the [Operations API documentation has usage and examples](./developers/operations-api/). Generally it is recommended that you use the RESTful interface as your primary interface for performant data access, querying, and manipulation (DML) for building production applications (under heavy load), and the operations API (and SQL) for data definition (DDL) and administrative purposes. + +The HarperDB Operations API is single endpoint, which means the only thing that needs to change across different calls is the body. For example purposes, a basic cURL command is shown below to create a database called dev. To change this behavior, swap out the operation in the `data-raw` body parameter. + +``` +curl --location --request POST 'https:/instance-subdomain.harperdbcloud.com' \ +--header 'Authorization: Basic YourBase64EncodedInstanceUser:Pass' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "operation": "create_schema", + "database": "dev" +}' +``` + +## Support and Learning More + +If you find yourself in need of additional support you can submit a [HarperDB support ticket](https:/harperdbhelp.zendesk.com/hc/en-us/requests/new). You can also learn more about available HarperDB projects by searching [Github](https:/github.com/search?q=harperdb). + +### Video Tutorials + +[HarperDB video tutorials are available on our YouTube channel](https:/www.youtube.com/@harperdbio). HarperDB and the HarperDB Studio are constantly changing, as such, there may be small discrepancies in UI/UX. diff --git a/site/versioned_docs/version-4.3/index.md b/site/versioned_docs/version-4.3/index.md new file mode 100644 index 00000000..780b75aa --- /dev/null +++ b/site/versioned_docs/version-4.3/index.md @@ -0,0 +1,106 @@ +--- +title: HarperDB Docs +--- + +# HarperDB Docs + +HarperDB is a globally-distributed edge application platform. It reduces complexity, increases performance, and lowers costs by combining user-defined applications, a high-performance database, and an enterprise-grade streaming broker into a single package. The platform offers unlimited horizontal scale at the click of a button, and syncs data across the cluster in milliseconds. HarperDB simplifies the process of delivering applications and the data that drives them to the edge, which dramatically improves both the user experience and total cost of ownership for large-scale applications. Deploying HarperDB on global infrastructure enables a CDN-like solution for enterprise data and applications. + +HarperDB's documentation covers installation, getting started, administrative operation APIs, security, and much more. Browse the topics at left, or choose one of the commonly used documentation sections below. + +:::info +Wondering what's new with HarperDB 4.3? Take a look at our latest [Release Notes](./technical-details/release-notes/v4-tucker/4.3.0). +::: + +## Getting Started + +
+
+

+ + Getting Started Guide + +

+

+ Get up and running with HarperDB +

+
+
+

+ + Quick Install HarperDB + +

+

+ Run HarperDB on your on hardware +

+
+
+

+ + Try HarperDB Cloud + +

+

+ Spin up an instance in minutes to going fast +

+
+
+ +## Building with HarperDB + +
+
+

+ + HarperDB Applications + +

+

+ Build your a fully featured HarperDB Component with custom functionality +

+
+
+

+ + REST Queries + +

+

+ The recommended HTTP interface for data access, querying, and manipulation +

+
+
+

+ + Operations API + +

+

+ Configure, deploy, administer, and control your HarperDB instance +

+
+
+ +
+
+

+ + Clustering & Replication + +

+

+ The process of connecting multiple HarperDB databases together to create a database mesh network that enables users to define data replication patterns. +

+
+
+

+ + Explore the HarperDB Studio + +

+

+ The web-based GUI for HarperDB. Studio enables you to administer, navigate, and monitor all of your HarperDB instances in a simple, user friendly interface. +

+
+
diff --git a/site/versioned_docs/version-4.3/technical-details/_category_.json b/site/versioned_docs/version-4.3/technical-details/_category_.json new file mode 100644 index 00000000..69ce80a6 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Technical Details", + "position": 4, + "link": { + "type": "generated-index", + "title": "Technical Details Documentation", + "description": "Reference documentation and technical specifications", + "keywords": [ + "technical-details" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/reference/analytics.md b/site/versioned_docs/version-4.3/technical-details/reference/analytics.md new file mode 100644 index 00000000..7b475176 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/reference/analytics.md @@ -0,0 +1,117 @@ +--- +title: Analytics +--- + +# Analytics + +HarperDB provides extensive telemetry and analytics data to help monitor the status of the server and work loads, and to help understand traffic and usage patterns to identify issues and scaling needs, and identify queries and actions that are consuming the most resources. + +HarperDB collects statistics for all operations, URL endpoints, and messaging topics, aggregating information by thread, operation, resource, and methods, in real-time. These statistics are logged in the `hdb_raw_analytics` and `hdb_analytics` table in the `system` database. + +There are two "levels" of analytics in the HarperDB analytics table: the first is the immediate level of raw direct logging of real-time statistics. These analytics entries are recorded once a second (when there is activity) by each thread, and include all recorded activity in the last second, along with system resource information. The records have a primary key that is the timestamp in milliseconds since epoch. This can be queried (with `superuser` permission) using the search\_by\_conditions operation (this will search for 10 seconds worth of analytics) on the `hdb_raw_analytics` table: + +``` +POST http:/localhost:9925 +Content-Type: application/json + +{ + "operation": "search_by_conditions", + "schema": "system", + "table": "hdb_raw_analytics", + "conditions": [{ + "search_attribute": "id", + "search_type": "between", + "search_value": [168859400000, 1688594010000] + }] +} +``` + +And a typical response looks like: + +``` +{ + "time": 1688594390708, + "period": 1000.8336279988289, + "metrics": [ + { + "metric": "bytes-sent", + "path": "search_by_conditions", + "type": "operation", + "median": 202, + "mean": 202, + "p95": 202, + "p90": 202, + "count": 1 + }, + ... + { + "metric": "memory", + "threadId": 2, + "rss": 1492664320, + "heapTotal": 124596224, + "heapUsed": 119563120, + "external": 3469790, + "arrayBuffers": 798721 + }, + { + "metric": "utilization", + "idle": 138227.52767700003, + "active": 70.5066209952347, + "utilization": 0.0005098165086230495 + } + ], + "threadId": 2, + "totalBytesProcessed": 12182820, + "id": 1688594390708.6853 +} +``` + +The second level of analytics recording is aggregate data. The aggregate records are recorded once a minute, and aggregate the results from all the per-second entries from all the threads, creating a summary of statistics once a minute. The ids for these milliseconds since epoch can be queried from the `hdb_analytics` table. You can query these with an operation like: + +``` +POST http:/localhost:9925 +Content-Type: application/json + +{ + "operation": "search_by_conditions", + "schema": "system", + "table": "hdb_analytics", + "conditions": [{ + "search_attribute": "id", + "search_type": "between", + "search_value": [1688194100000, 1688594990000] + }] +} +``` + +And a summary record looks like: + +``` +{ + "period": 60000, + "metric": "bytes-sent", + "method": "connack", + "type": "mqtt", + "median": 4, + "mean": 4, + "p95": 4, + "p90": 4, + "count": 1, + "id": 1688589569646, + "time": 1688589569646 +} +``` + +The following are general resource usage statistics that are tracked: + +* memory - This includes RSS, heap, buffer and external data usage. +* utilization - How much of the time the worker was processing requests. +* mqtt-connections - The number of MQTT connections. + +The following types of information is tracked for each HTTP request: + +* success - How many requests returned a successful response (20x response code). TTFB - Time to first byte in the response to the client. +* transfer - Time to finish the transfer of the data to the client. +* bytes-sent - How many bytes of data were sent to the client. + +Requests are categorized by operation name, for the operations API, by the resource (name) with the REST API, and by command for the MQTT interface. diff --git a/site/versioned_docs/version-4.3/technical-details/reference/architecture.md b/site/versioned_docs/version-4.3/technical-details/reference/architecture.md new file mode 100644 index 00000000..f2881d3c --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/reference/architecture.md @@ -0,0 +1,42 @@ +--- +title: Architecture +--- + +# Architecture + +HarperDB's architecture consists of resources, which includes tables and user defined data sources and extensions, and server interfaces, which includes the RESTful HTTP interface, operations API, and MQTT. Servers are supported by routing and auth services. + +``` + ┌──────────┐ ┌──────────┐ + │ Clients │ │ Clients │ + └────┬─────┘ └────┬─────┘ + │ │ + ▼ ▼ + ┌────────────────────────────────────────┐ + │ │ + │ Socket routing/management │ + ├───────────────────────┬────────────────┤ + │ │ │ + │ Server Interfaces ─►│ Authentication │ + │ RESTful HTTP, MQTT │ Authorization │ + │ ◄─┤ │ + │ ▲ └────────────────┤ + │ │ │ │ + ├───┼──────────┼─────────────────────────┤ + │ │ │ ▲ │ + │ ▼ Resources ▲ │ ┌───────────┐ │ + │ │ └─┤ │ │ + ├─────────────────┴────┐ │ App │ │ + │ ├─►│ resources │ │ + │ Database tables │ └───────────┘ │ + │ │ ▲ │ + ├──────────────────────┘ │ │ + │ ▲ ▼ │ │ + │ ┌────────────────┐ │ │ + │ │ External │ │ │ + │ │ data sources ├────┘ │ + │ │ │ │ + │ └────────────────┘ │ + │ │ + └────────────────────────────────────────┘ +``` diff --git a/site/versioned_docs/version-4.3/technical-details/reference/content-types.md b/site/versioned_docs/version-4.3/technical-details/reference/content-types.md new file mode 100644 index 00000000..d2bc096a --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/reference/content-types.md @@ -0,0 +1,29 @@ +--- +title: Content Types +--- + +# Content Types + +HarperDB supports several different content types (or MIME types) for both HTTP request bodies (describing operations) as well as for serializing content into HTTP response bodies. HarperDB follows HTTP standards for specifying both request body content types and acceptable response body content types. Any of these content types can be used with any of the standard HarperDB operations. + +For request body content, the content type should be specified with the `Content-Type` header. For example with JSON, use `Content-Type: application/json` and for CBOR, include `Content-Type: application/cbor`. To request that the response body be encoded with a specific content type, use the `Accept` header. If you want the response to be in JSON, use `Accept: application/json`. If you want the response to be in CBOR, use `Accept: application/cbor`. + +The following content types are supported: + +## JSON - application/json + +JSON is the most widely used content type, and is relatively readable and easy to work with. However, JSON does not support all the data types that are supported by HarperDB, and can't be used to natively encode data types like binary data or explicit Maps/Sets. Also, JSON is not as efficient as binary formats. When using JSON, compression is recommended (this also follows standard HTTP protocol with the `Accept-Encoding` header) to improve network transfer performance (although there is server performance overhead). JSON is a good choice for web development and when standard JSON types are sufficient and when combined with compression and debuggability/observability is important. + +## CBOR - application/cbor + +CBOR is a highly efficient binary format, and is a recommended format for most production use cases with HarperDB. CBOR supports the full range of HarperDB data types, including binary data, typed dates, and explicit Maps/Sets. CBOR is very performant and space efficient even without compression. Compression will still yield better network transfer size/performance, but compressed CBOR is generally not any smaller than compressed JSON. CBOR also natively supports streaming for optimal performance (using indefinite length arrays). The CBOR format has excellent standardization and HarperDB's CBOR provides an excellent balance of performance and size efficiency. + +## MessagePack - application/x-msgpack + +MessagePack is another efficient binary format like CBOR, with support for all HarperDB data types. MessagePack generally has wider adoption than CBOR and can be useful in systems that don't have CBOR support (or good support). However, MessagePack does not have native support for streaming of arrays of data (for query results), and so query results are returned as a (concatenated) sequence of MessagePack objects/maps. MessagePack decoders used with HarperDB's MessagePack must be prepared to decode a direct sequence of MessagePack values to properly read responses. + +## Comma-separated Values (CSV) - text/csv + +Comma-separated values is an easy to use and understand format that can be readily imported into spreadsheets or used for data processing. CSV lacks hierarchical structure for most data types, and shouldn't be used for frequent/production use, but when you need it, it is available. + +In addition, with the REST interface, you can use file-style extensions to indicate an encoding like http:/host/path.csv to indicate CSV encoding. See the [REST documentation](../../developers/rest) for more information on how to do this. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/reference/data-types.md b/site/versioned_docs/version-4.3/technical-details/reference/data-types.md new file mode 100644 index 00000000..c8acebe4 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/reference/data-types.md @@ -0,0 +1,52 @@ +--- +title: Data Types +--- + +# Data Types + +HarperDB supports a rich set of data types for use in records in databases. Various data types can be used from both direct JavaScript interfaces in Custom Functions and the HTTP operations APIs. Using JSON for communication naturally limits the data types to those available in JSON (HarperDB’s supports all of JSON data types), but JavaScript code and alternate data formats facilitate the use of additional data types. HarperDB supports MessagePack and CBOR, which allows for all of HarperDB supported data types. [Schema definitions can specify the expected types for fields, with GraphQL Schema Types](../../developers/applications/defining-schemas), which are used for validation of incoming typed data (JSON, MessagePack), and is used for auto-conversion of untyped data (CSV, [query parameters](../../developers/rest)). Available data types include: + +(Note that these labels are descriptive, they do not necessarily correspond to the GraphQL schema type names, but the schema type names are noted where possible) + +## Boolean + +true or false. The GraphQL schema type name is `Boolean`. + +## String + +Strings, or text, are a sequence of any unicode characters and are internally encoded with UTF-8. The GraphQL schema type name is `String`. + +## Number + +Numbers can be stored as signed integers up to a 1000 bits of precision (about 300 digits) or floating point with 64-bit floating point precision, and numbers are automatically stored using the most optimal type. With JSON, numbers are automatically parsed and stored in the most appropriate format. Custom components and applications may use BigInt numbers to store/access integers that are larger than 53-bit. The following GraphQL schema type name are supported: + +* `Float` - Any number that can be represented with [64-bit double precision floating point number](https:/en.wikipedia.org/wiki/Double-precision\_floating-point\_format) ("double") +* `Int` - Any integer between from -2147483648 to 2147483647 +* `Long` - Any integer between from -9007199254740992 to 9007199254740992 +* `BigInt` - Any integer (negative or positive) with less than 300 digits + +Note that `BigInt` is a distinct and separate type from standard numbers in JavaScript, so custom code should handle this type appropriately. + +## Object/Map + +Objects, or maps, that hold a set named properties can be stored in HarperDB. When provided as JSON objects or JavaScript objects, all property keys are stored as strings. The order of properties is also preserved in HarperDB’s storage. Duplicate property keys are not allowed (they are dropped in parsing any incoming data). + +## Array + +Arrays hold an ordered sequence of values and can be stored in HarperDB. There is no support for sparse arrays, although you can use objects to store data with numbers (converted to strings) as properties. + +## Null + +A null value can be stored in HarperDB property values as well. + +## Date + +Dates can be stored as a specific data type. This is not supported in JSON, but is supported by MessagePack and CBOR. Custom Functions can also store and use Dates using JavaScript Date instances. The GraphQL schema type name is `Date`. + +## Binary Data + +Binary data can be stored in property values as well. JSON doesn’t have any support for encoding binary data, but MessagePack and CBOR support binary data in data structures, and this will be preserved in HarperDB. Custom Functions can also store binary data by using NodeJS’s Buffer or Uint8Array instances to hold the binary data. The GraphQL schema type name is `Bytes`. + +## Explicit Map/Set + +Explicit instances of JavaScript Maps and Sets can be stored and preserved in HarperDB as well. This can’t be represented with JSON, but can be with CBOR. diff --git a/site/versioned_docs/version-4.3/technical-details/reference/dynamic-schema.md b/site/versioned_docs/version-4.3/technical-details/reference/dynamic-schema.md new file mode 100644 index 00000000..57624117 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/reference/dynamic-schema.md @@ -0,0 +1,148 @@ +--- +title: Dynamic Schema +--- + +# Dynamic Schema + +When tables are created without any schema, through the operations API (without specifying attributes) or studio, the tables follow "dynamic-schema" behavior. Generally it is best-practice to define schemas for your tables to ensure predictable, consistent structures with data integrity and precise control over indexing, without dependency on data itself. However, it can often be simpler and quicker to simply create a table and let the data auto-generate the schema dynamically with everything being auto-indexed for broad querying. + +With dynamic schemas individual attributes are reflexively created as data is ingested, meaning the table will adapt to the structure of data ingested. HarperDB tracks the metadata around schemas, tables, and attributes allowing for describe table, describe schema, and describe all operations. + +### Databases + +HarperDB databases hold a collection of tables together in a single file that are transactionally connected. This means that operations across tables within a database can be performed in a single atomic transaction. By default tables are added to the default database called "data", but other databases can be created and specified for tables. + +### Tables + +HarperDB tables group records together with a common data pattern. To create a table users must provide a table name and a primary key. + +* **Table Name**: Used to identify the table. +* **Primary Key**: This is a required attribute that serves as the unique identifier for a record and is also known as the `hash_attribute` in HarperDB operations API. + +## Primary Key + +The primary key (also referred to as the `hash_attribute`) is used to uniquely identify records. Uniqueness is enforced on the primary; inserts with the same primary key will be rejected. If a primary key is not provided on insert, a GUID will be automatically generated and returned to the user. The [HarperDB Storage Algorithm](./storage-algorithm) utilizes this value for indexing. + +**Standard Attributes** + +With tables that are using dynamic schemas, additional attributes are reflexively added via insert and update operations (in both SQL and NoSQL) when new attributes are included in the data structure provided to HarperDB. As a result, schemas are additive, meaning new attributes are created in the underlying storage algorithm as additional data structures are provided. HarperDB offers `create_attribute` and `drop_attribute` operations for users who prefer to manually define their data model independent of data ingestion. When new attributes are added to tables with existing data the value of that new attribute will be assumed `null` for all existing records. + +**Audit Attributes** + +HarperDB automatically creates two audit attributes used on each record if the table is created without a schema. + +* `__createdtime__`: The time the record was created in [Unix Epoch with milliseconds](https:/www.epochconverter.com/) format. +* `__updatedtime__`: The time the record was updated in [Unix Epoch with milliseconds](https:/www.epochconverter.com/) format. + +### Dynamic Schema Example + +To better understand the behavior let’s take a look at an example. This example utilizes [HarperDB API operations](../../developers/operations-api/databases-and-tables). + +**Create a Database** + +```bash +{ + "operation": "create_database", + "schema": "dev" +} +``` + +**Create a Table** + +Notice the schema name, table name, and primary key name are the only required parameters. + +```bash +{ + "operation": "create_table", + "database": "dev", + "table": "dog", + "primary_key": "id" +} +``` + +At this point the table does not have structure beyond what we provided, so the table looks like this: + +**dev.dog** + +![](/img/v4.3/reference/dynamic\_schema\_2\_create\_table.png.webp) + +**Insert Record** + +To define attributes we do not need to do anything beyond sending them in with an insert operation. + +```bash +{ + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + {"id": 1, "dog_name": "Penny", "owner_name": "Kyle"} + ] +} +``` + +With a single record inserted and new attributes defined, our table now looks like this: + +**dev.dog** + +![](/img/v4.3/reference/dynamic\_schema\_3\_insert\_record.png.webp) + +Indexes have been automatically created for `dog_name` and `owner_name` attributes. + +**Insert Additional Record** + +If we continue inserting records with the same data schema no schema updates are required. One record will omit the hash attribute from the insert to demonstrate GUID generation. + +```bash +{ + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + {"id": 2, "dog_name": "Monk", "owner_name": "Aron"}, + {"dog_name": "Harper","owner_name": "Stephen"} + ] +} +``` + +In this case, there is no change to the schema. Our table now looks like this: + +**dev.dog** + +![](/img/v4.3/reference/dynamic\_schema\_4\_insert\_additional\_record.png.webp) + +**Update Existing Record** + +In this case, we will update a record with a new attribute not previously defined on the table. + +```bash +{ + "operation": "update", + "database": "dev", + "table": "dog", + "records": [ + {"id": 2, "weight_lbs": 35} + ] +} +``` + +Now we have a new attribute called `weight_lbs`. Our table now looks like this: + +**dev.dog** + +![](/img/v4.3/reference/dynamic\_schema\_5\_update\_existing\_record.png.webp) + +**Query Table with SQL** + +Now if we query for all records where `weight_lbs` is `null` we expect to get back two records. + +```bash +{ + "operation": "sql", + "sql": "SELECT * FROM dev.dog WHERE weight_lbs IS NULL" +} +``` + +This results in the expected two records being returned. + +![](/img/v4.3/reference/dynamic\_schema\_6\_query\_table\_with\_sql.png.webp) diff --git a/site/versioned_docs/version-4.3/technical-details/reference/globals.md b/site/versioned_docs/version-4.3/technical-details/reference/globals.md new file mode 100644 index 00000000..c615d1c5 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/reference/globals.md @@ -0,0 +1,236 @@ +--- +title: Globals +--- + +# Globals + +The primary way that JavaScript code can interact with HarperDB is through the global variables, which has several objects and classes that provide access to the tables, server hooks, and resources that HarperDB provides for building applications. As global variables, these can be directly accessed in any module. + +These global variables are also available through the `harperdb` module/package, which can provide better typing in TypeScript. To use this with your own directory, make sure you link the package to your current `harperdb` installation: + +```bash +npm link harperdb +``` + +The `harperdb` package is automatically linked for all installed components. Once linked, if you are using EcmaScript module syntax you can import function from `harperdb` like: + +```javascript +import { tables, Resource } from 'harperdb'; +``` + +Or if you are using CommonJS format for your modules: + +```javascript +const { tables, Resource } = require('harperdb'); +``` + +The global variables include: + +## `tables` + +This is an object that holds all the tables for the default database (called `data`) as properties. Each of these property values is a table class that subclasses the Resource interface and provides access to the table through the Resource interface. For example, you can get a record from a table (in the default database) called 'my-table' with: + +```javascript +import { tables } from 'harperdb'; +const { MyTable } = tables; +async function getRecord() { + let record = await MyTable.get(recordId); +} +``` + +It is recommended that you [define a database](../../getting-started/) for all the tables that are required to exist in your application. This will ensure that the tables exist on the `tables` object. Also note that the property names follow a CamelCase convention for use in JavaScript and in the GraphQL Schemas, but these are translated to snake\_case for the actual table names, and converted back to CamelCase when added to the `tables` object. + +## `databases` + +This is an object that holds all the databases in HarperDB, and can be used to explicitly access a table by database name. Each database will be a property on this object, each of these property values will be an object with the set of all tables in that database. The default database, `databases.data` should equal the `tables` export. For example, if you want to access the "dog" table in the "dev" database, you could do so: + +```javascript +import { databases } from 'harperdb'; +const { Dog } = databases.dev; +``` + +## `Resource` + +This is the base class for all resources, including tables and external data sources. This is provided so that you can extend it to implement custom data source providers. See the [Resource API documentation](./resource) for more details about implementing a Resource class. + +## `auth(username, password?): Promise` + +This returns the user object with permissions/authorization information based on the provided username. If a password is provided, the password will be verified before returning the user object (if the password is incorrect, an error will be thrown). + +## `logger` + +This provides methods `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify` for logging. See the [logging documentation](../../administration/logging/standard-logging) for more information. + +## `server` + +The `server` global object provides a number of functions and objects to interact with Harper's HTTP service. + +### `server.http(listener: RequestListener, options: HttpOptions): HttpServer[]` + +Alias: `server.request` + +Add a handler method to the HTTP server request listener middleware chain. + +Returns an array of server instances based on the specified `options.port` and `options.securePort`. + +Example: + +```js +server.http((request, next) => { + return request.url === '/graphql' + ? handleGraphQLRequest(request) + : next(request); +}, { + runFirst: true, / run this handler first +}); +``` + +#### `RequestListener` + +Type: `(request: Request, next: RequestListener) => Promise` + +The HTTP request listener to be added to the middleware chain. To continue chain execution pass the `request` to the `next` function such as `return next(request);`. + +#### `Request` + +An implementation of WHATWG [Request](https:/developer.mozilla.org/en-US/docs/Web/API/Request) class. + +#### `Response` + +An implementation of WHATWG [Response](https:/developer.mozilla.org/en-US/docs/Web/API/Response) class. + +#### `HttpOptions` + +Type: `Object` + +Properties: + + + +- `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` +- `port` - _optional_ - `number` - Specify which HTTP server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` +- `securePort` - _optional_ - `number` - Specify which HTTPS server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` + +#### `HttpServer` + +Node.js [`http.Server`](https:/nodejs.org/api/http.html#class-httpserver) or [`https.SecureServer`](https:/nodejs.org/api/https.html#class-httpsserver) instance. + +### `server.socket(listener: ConnectionListener, options: SocketOptions): SocketServer` + +Creates a socket server on the specified `options.port` or `options.securePort`. + +Only one socket server will be created. A `securePort` takes precedence. + +#### `ConnectionListener` + +Node.js socket server connection listener as documented in [`net.createServer`](https:/nodejs.org/api/net.html#netcreateserveroptions-connectionlistener) or [`tls.createServer`](https:/nodejs.org/api/tls.html#tlscreateserveroptions-secureconnectionlistener) + +#### `SocketOptions` + +- `port` - _optional_ - `number` - Specify the port for the [`net.Server`](https:/nodejs.org/api/net.html#class-netserver) instance. +- `securePort` - _optional_ - `number` - Specify the port for the [`tls.Server`](https:/nodejs.org/api/tls.html#class-tlsserver) instance. + +#### `SocketServer` + +Node.js [`net.Server`](https:/nodejs.org/api/net.html#class-netserver) or [`tls.Server`](https:/nodejs.org/api/tls.html#class-tlsserver) instance. + +### `server.ws(listener: WsListener, options: WsOptions): HttpServer[]` + +Add a listener to the WebSocket connection listener middleware chain. The WebSocket server is associated with the HTTP server specified by the `options.port` or `options.securePort`. Use the [`server.upgrade()`](#serverupgradelistener-upgradelistener-options-upgradeoptions-void) method to add a listener to the upgrade middleware chain. + +Example: + +```js +server.ws((ws, request, chainCompletion) => { + chainCompletion.then(() => { + ws.on('error', console.error); + + ws.on('message', function message(data) { + console.log('received: %s', data); + }); + + ws.send('something'); + }); +}); +``` + +#### `WsListener` + +Type: `(ws: WebSocket, request: Request, chainCompletion: ChainCompletion, next: WsListener): Promise` + +The WebSocket connection listener. + +- The `ws` argument is the [WebSocket](https:/github.com/websockets/ws/blob/master/doc/ws.md#class-websocket) instance as defined by the `ws` module. +- The `request` argument is Harper's transformation of the `IncomingMessage` argument of the standard ['connection'](https:/github.com/websockets/ws/blob/master/doc/ws.md#event-connection) listener event for a WebSocket server. +- The `chainCompletion` argument is a `Promise` of the associated HTTP server's request chain. Awaiting this promise enables the user to ensure the HTTP request has finished being processed before operating on the WebSocket. +- The `next` argument is similar to that of other `next` arguments in Harper's server middlewares. To continue execution of the WebSocket connection listener middleware chain, pass all of the other arguments to this one such as: `next(ws, request, chainCompletion)` + +#### `WsOptions` + +Type: `Object` + +Properties: + + + +- `maxPayload` - _optional_ - `number` - Set the max payload size for the WebSocket server. Defaults to 100 MB. +- `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` +- `port` - _optional_ - `number` - Specify which WebSocket server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` +- `securePort` - _optional_ - `number` - Specify which WebSocket secure server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` + +### `server.upgrade(listener: UpgradeListener, options: UpgradeOptions): void` + +Add a listener to the HTTP Server [upgrade](https:/nodejs.org/api/http.html#event-upgrade_1) event. If a WebSocket connection listener is added using [`server.ws()`](#serverwslistener-wslistener-options-wsoptions-httpserver), a default upgrade handler will be added as well. The default upgrade handler will add a `__harperdb_request_upgraded` boolean to the `request` argument to signal the connection has already been upgraded. It will also check for this boolean _before_ upgrading and if it is `true`, it will pass the arguments along to the `next` listener. + +This method should be used to delegate HTTP upgrade events to an external WebSocket server instance. + +Example: + +> This example is from the HarperDB Next.js component. See the complete source code [here](https:/github.com/HarperDB/nextjs/blob/main/extension.js) + +```js +server.upgrade( + (request, socket, head, next) => { + if (request.url === '/_next/webpack-hmr') { + return upgradeHandler(request, socket, head).then(() => { + request.__harperdb_request_upgraded = true; + + next(request, socket, head); + }); + } + + return next(request, socket, head); + }, + { runFirst: true } +); +``` + +#### `UpgradeListener` + +Type: `(request, socket, head, next) => void` + +The arguments are passed to the middleware chain from the HTTP server [`'upgrade'`](https:/nodejs.org/api/http.html#event-upgrade_1) event. + +#### `UpgradeOptions` + +Type: `Object` + +Properties: + +- `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` +- `port` - _optional_ - `number` - Specify which HTTP server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` +- `securePort` - _optional_ - `number` - Specify which HTTP secure server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` + +### `server.config` + +This provides access to the HarperDB configuration object. This comes from the [harperdb-config.yaml](../../deployments/configuration) (parsed into object form). + +### `server.recordAnalytics(value, metric, path?, method?, type?)` + +This records the provided value as a metric into HarperDB's analytics. HarperDB efficiently records and tracks these metrics and makes them available through [analytics API](./analytics). The values are aggregated and statistical information is computed when many operations are performed. The optional parameters can be used to group statistics. For the parameters, make sure you are not grouping on too fine of a level for useful aggregation. The parameters are: + +* `value` - This is a numeric value for the metric that is being recorded. This can be a value measuring time or bytes, for example. +* `metric` - This is the name of the metric. +* `path` - This is an optional path (like a URL path). For a URL like /my-resource/, you would typically include a path of "my-resource", not including the id so you can group by all the requests to "my-resource" instead of individually aggregating by each individual id. +* `method` - Optional method to group by. +* `type` - Optional type to group by. diff --git a/site/versioned_docs/version-4.3/technical-details/reference/headers.md b/site/versioned_docs/version-4.3/technical-details/reference/headers.md new file mode 100644 index 00000000..c58bb7ec --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/reference/headers.md @@ -0,0 +1,12 @@ +--- +title: HarperDB Headers +--- + +# HarperDB Headers + +All HarperDB API responses include headers that are important for interoperability and debugging purposes. The following headers are returned with all HarperDB API responses: + +| Key | Example Value | Description | +|-------------------|------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------| +| server-timing | db;dur=7.165 | This reports the duration of the operation, in milliseconds. This follows the standard for Server-Timing and can be consumed by network monitoring tools. | +| content-type | application/json | This reports the MIME type of the returned content, which is negotiated based on the requested content type in the Accept header. | diff --git a/site/versioned_docs/version-4.3/technical-details/reference/index.md b/site/versioned_docs/version-4.3/technical-details/reference/index.md new file mode 100644 index 00000000..e9a6ebf9 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/reference/index.md @@ -0,0 +1,16 @@ +--- +title: Reference +--- + +# Reference + +This section contains technical details and reference materials for HarperDB. + +* [Resource API](./resource) +* [Transactions](./transactions) +* [Storage Algorithm](./storage-algorithm) +* [Dynamic Schema](./dynamic-schema) +* [Headers](./headers) +* [Limitations](./limits) +* Content Types +* [Data Types](./data-types) diff --git a/site/versioned_docs/version-4.3/technical-details/reference/limits.md b/site/versioned_docs/version-4.3/technical-details/reference/limits.md new file mode 100644 index 00000000..ccad9d64 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/reference/limits.md @@ -0,0 +1,33 @@ +--- +title: HarperDB Limits +--- + +# HarperDB Limits + +This document outlines limitations of HarperDB. + +## Database Naming Restrictions + +**Case Sensitivity** + +HarperDB database metadata (database names, table names, and attribute/column names) are case sensitive. Meaning databases, tables, and attributes can differ only by the case of their characters. + +**Restrictions on Database Metadata Names** + +HarperDB database metadata (database names, table names, and attribute names) cannot contain the following UTF-8 characters: + +``` +/`¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ +``` + +Additionally, they cannot contain the first 31 non-printing characters. Spaces are allowed, but not recommended as best practice. The regular expression used to verify a name is valid is: + +``` +^[\x20-\x2E|\x30-\x5F|\x61-\x7E]*$ +``` + +## Table Limitations + +**Attribute Maximum** + +HarperDB limits the number of total indexed attributes across tables (including the primary key of each table) to 10,000 per database. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/reference/resource.md b/site/versioned_docs/version-4.3/technical-details/reference/resource.md new file mode 100644 index 00000000..d1bc89f1 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/reference/resource.md @@ -0,0 +1,660 @@ +--- +title: Resource Class +--- + +# Resource Class + +## Resource Class + +The Resource class is designed to provide a unified API for modeling different data resources within HarperDB. Database/table data can be accessed through the Resource API. The Resource class can be extended to create new data sources. Resources can be exported to define endpoints. Tables themselves extend the Resource class, and can be extended by users. + +Conceptually, a Resource class provides an interface for accessing, querying, modifying, and monitoring a set of entities or records. Instances of a Resource class can represent a single record or entity, or a collection of records, at a given point in time, that you can interact with through various methods or queries. Resource instances can represent an atomic transactional view of a resource and facilitate transactional interaction. A Resource instance holds the primary key/identifier, context information, and any pending updates to the record, so any instance methods can act on the record and have full access to this information to during execution. Therefore, there are distinct resource instances created for every record or query that is accessed, and the instance methods are used for interaction with the data. + +Resource classes also have static methods, which are generally the preferred way to externally interact with tables and resources. The static methods handle parsing paths and query strings, starting a transaction as necessary, performing access authorization checks (if required), creating a resource instance, and calling the instance methods. This general rule for how to interact with resources: +* If you want to *act upon* a table or resource, querying or writing to it, then use the static methods to initial access or write data. For example, you could use `MyTable.get(34)` to access the record with a primary key of `34`. + * You can subsequently use the instance methods on the returned resource instance to perform additional actions on the record. +* If you want to *define custom behavior* for a table or resource (to control how a resource responds to queries/writes), then extend the class and override/define instance methods. + +The Resource API is heavily influenced by the REST/HTTP API, and the methods and properties of the Resource class are designed to map to and be used in a similar way to how you would interact with a RESTful API. + +The REST-based API is a little different than traditional Create-Read-Update-Delete (CRUD) APIs that were designed with single-server interactions in mind, but semantics that attempt to guarantee no existing record or overwrite-only behavior require locks that don't scale well in distributed database. Centralizing writes around `put` calls provides much more scalable, simple, and consistent behavior in a distributed eventually consistent database. You can generally think of CRUD operations mapping to REST operations like this: +* Read - `get` +* Create with a known primary key - `put` +* Create with a generated primary key - `post`/`create` +* Update (Full) - `put` +* Update (Partial) - `patch` +* Delete - `delete` + +The RESTful HTTP server and other server interfaces will directly call resource methods of the same name to fulfill incoming requests so resources can be defined as endpoints for external interaction. When resources are used by the server interfaces, the static method will be executed (which starts a transaction and does access checks), which will then create the resource instance and call the corresponding instance method. Paths (URL, MQTT topics) are mapped to different resource instances. Using a path that specifies an ID like `/MyResource/3492` will be mapped to a Resource instance where the instance's ID will be `3492`, and interactions will use the instance methods like `get()`, `put()`, and `post()`. Using the root path (`/MyResource/`) will map to a Resource instance with an ID of `null`, and this represents the collection of all the records in the resource or table. + +You can create classes that extend `Resource` to define your own data sources, typically to interface with external data sources (the `Resource` base class is available as a global variable in the HarperDB JS environment). In doing this, you will generally be extending and providing implementations for the instance methods below. For example: + +```javascript +export class MyExternalData extends Resource { + async get() { + / fetch data from an external source, using our id + let response = await this.fetch(this.id); + / do something with the response + } + put(data) { + / send the data into the external source + } + delete() { + / delete an entity in the external data source + } + subscribe(options) { + / if the external data source is capable of real-time notification of changes, can subscribe + } +} +/ we can export this class from resources.json as our own endpoint, or use this as the source for +/ a HarperDB data to store and cache the data coming from this data source: +tables.MyCache.sourcedFrom(MyExternalData); +``` + +You can also extend table classes in the same way, overriding the instance methods for custom functionality. The `tables` object is a global variable in the HarperDB JavaScript environment, along with `Resource`: + +```javascript +export class MyTable extends tables.MyTable { + get() { + / we can add properties or change properties before returning data: + this.newProperty = 'newValue'; + this.existingProperty = 44; + return super.get(); / returns the record, modified with the changes above + } + put(data) { + / can change data any way we want + super.put(data); + } + delete() { + super.delete(); + } + post(data) { + / providing a post handler (for HTTP POST requests) is a common way to create additional + / actions that aren't well described with just PUT or DELETE + } +} +``` +Make sure that if are extending and `export`ing your table with this class, that you remove the `@export` directive in your schema, so that you aren't exporting the same table/class name twice. + +## Global Variables + +### `tables` + +This is an object with all the tables in the default database (the default database is "data"). Each table that has been declared or created will be available as a (standard) property on this object, and the value will be the table class that can be used to interact with that table. The table classes implement the Resource API. + +### `databases` + +This is an object with all the databases that have been defined in HarperDB (in the running instance). Each database that has been declared or created will be available as a (standard) property on this object. The property values are an object with the tables in that database, where each property is a table, like the `tables` object. In fact, `databases.data === tables` should always be true. + +### `Resource` + +This is the Resource base class. This can be directly extended for custom resources, and is the base class for all tables. + +### `server` + +This object provides extension points for extension components that wish to implement new server functionality (new protocols, authentication, etc.). See the [extensions documentation for more information](../../developers/components/writing-extensions). + +### `transaction` + +This provides a function for starting transactions. See the transactions section below for more information. + +### `contentTypes` + +This provides an interface for defining new content type handlers. See the [content type extensions documentation](../../developers/components/writing-extensions) for more information. + +### TypeScript Support + +While these objects/methods are all available as global variables, it is easier to get TypeScript support (code assistance, type checking) for these interfaces by explicitly `import`ing them. This can be done by setting up a package link to the main HarperDB package in your app: + +``` +# you may need to go to your harperdb directory and set it up as a link first +npm link harperdb +``` + +And then you can import any of the main HarperDB APIs you will use, and your IDE should understand the full typings associated with them: + +``` +import { databases, tables, Resource } from 'harperdb'; +``` + +## Resource Class (Instance) Methods + +### Properties/attributes declared in schema + +Properties that have been defined in your table's schema can be accessed and modified as direct properties on the Resource instances. + +### `get(queryOrProperty?)`: Resource|AsyncIterable + +This is called to return the record or data for this resource, and is called by HTTP GET requests. This may be optionally called with a `query` object to specify a query should be performed, or a string to indicate that the specified property value should be returned. When defining Resource classes, you can define or override this method to define exactly what should be returned when retrieving a record. The default `get` method (`super.get()`) returns the current record as a plain object. + +The query object can be used to access any query parameters that were included in the URL. For example, with a request to `/my-resource/some-id?param1=value`, we can access URL/request information: + +```javascript +get(query) { + / note that query will only exist (as an object) if there is a query string + let param1 = query?.get?.('param1'); / returns 'value' + let id = this.getId(); / returns 'some-id' + ... +} +``` +If `get` is called for a single record (for a request like `/Table/some-id`), the default action is to return `this` instance of the resource. If `get` is called on a collection (`/Table/?name=value`), the default action is to `search` and return an AsyncIterable of results. + +It is important to note that `this` is the resource instance for a specific record, specified by the primary key. Therefore, calling `super.get(query)` performs a `get` on this specific record/resource, not on the whole table. If you wish to access a _different_ record, you should use the static `get` method on the table class, like `Table.get(otherId, context)`. + +### `search(query: Query)`: AsyncIterable + +This performs a query on this resource, searching for records that are descendants. By default, this is called by `get(query)` from a collection resource. When this is called for the root resource (like `/Table/`) it searches through all records in the table. However, if you call search from an instance with a specific ID like `1` from a path like `Table/1`, it will only return records that are descendants of that record, like `[1, 1]` (path of Table/1/1) and `[1, 2]` (path of Table/1/2). If you want to do a standard search of the table, make you call the static method like `Table.search(...)`. You can define or override this method to define how records should be queried. The default `search` method on tables (`super.search(query)`) will perform a query and return an AsyncIterable of results. The query object can be used to specify the desired query. + + +### `getId(): string|number|Array` + +Returns the primary key value for this resource. + +### `put(data: object, query?: Query)` + +This will assign the provided record or data to this resource, and is called for HTTP PUT requests. You can define or override this method to define how records should be updated. The default `put` method on tables (`super.put(data)`) writes the record to the table (updating or inserting depending on if the record previously existed) as part of the current transaction for the resource instance. + +It is important to note that `this` is the resource instance for a specific record, specified by the primary key. Therefore, calling `super.put(data)` updates this specific record/resource, not another records in the table. If you wish to update a _different_ record, you should use the static `put` method on the table class, like `Table.put(data, context)`. + +The `query` argument is used to represent any additional query parameters that were included in the URL. For example, with a request to `/my-resource/some-id?param1=value`, we can access URL/request information: + +```javascript +put(data, query) { + let param1 = query?.get?.('param1'); / returns 'value' + ... +} +``` + +### `patch(data: object, query?: Query)` + +This will update the existing record with the provided data's properties, and is called for HTTP PATCH requests. You can define or override this method to define how records should be updated. The default `patch` method on tables (`super.patch(data)`) updates the record. The properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `data` object. This is performed as part of the current transaction for the resource instance. The `query` argument is used to represent any additional query parameters that were included. + +### `update(data: object, fullUpdate: boolean?)` + +This is called by the default `put` and `patch` handlers to update a record. `put` calls with `fullUpdate` as `true` to indicate a full record replacement (`patch` calls it with the second argument as `false`). Any additional property changes that are made before the transaction commits will also be persisted. + +### `delete(queryOrProperty?)` + +This will delete this record or resource, and is called for HTTP DELETE requests. You can define or override this method to define how records should be deleted. The default `delete` method on tables (`super.put(record)`) deletes the record from the table as part of the current transaction. + +### `publish(message)` + +This will publish a message to this resource, and is called for MQTT publish commands. You can define or override this method to define how messages should be published. The default `publish` method on tables (`super.publish(message)`) records the published message as part of the current transaction; this will not change the data in the record but will notify any subscribers to the record/topic. + +### `post(data: object, query?: Query)` + +This is called for HTTP POST requests. You can define this method to provide your own implementation of how POST requests should be handled. Generally `POST` provides a generic mechanism for various types of data updates, and is a good place to define custom functionality for updating records. The default behavior is to create a new record/resource. The `query` argument is used to represent any additional query parameters that were included. + +### `invalidate()` + +This method is available on tables. This will invalidate the current record in the table. This can be used with a caching table and is used to indicate that the source data has changed, and the record needs to be reloaded when next accessed. + +### `subscribe(subscriptionRequest: SubscriptionRequest): Promise` + +This will subscribe to the current resource, and is called for MQTT subscribe commands. You can define or override this method to define how subscriptions should be handled. The default `subscribe` method on tables (`super.publish(message)`) will set up a listener that will be called for any changes or published messages to this resource. + +The returned (promise resolves to) Subscription object is an `AsyncIterable` that you can use a `for await` to iterate through. It also has a `queue` property which holds (an array of) any messages that are ready to be delivered immediately (if you have specified a start time, previous count, or there is a message for the current or "retained" record, these may be immediately returned). + +The `SubscriptionRequest` object supports the following properties (all optional): + +* `includeDescendants` - If this is enabled, this will create a subscription to all the record updates/messages that are prefixed with the id. For example, a subscription request of `{id:'sub', includeDescendants: true}` would return events for any update with an id/topic of the form sub/\* (like `sub/1`). +* `startTime` - This will begin the subscription at a past point in time, returning all updates/messages since the start time (a catch-up of historical messages). This can be used to resume a subscription, getting all messages since the last subscription. +* `previousCount` - This specifies the number of previous updates/messages to deliver. For example, `previousCount: 10` would return the last ten messages. Note that `previousCount` can not be used in conjunction with `startTime`. +* `omitCurrent` - Indicates that the current (or retained) record should _not_ be immediately sent as the first update in the subscription (if no `startTime` or `previousCount` was used). By default, the current record is sent as the first update. + +### `connect(incomingMessages?: AsyncIterable, query?: Query): AsyncIterable` + +This is called when a connection is received through WebSockets or Server Sent Events (SSE) to this resource path. This is called with `incomingMessages` as an iterable stream of incoming messages when the connection is from WebSockets, and is called with no arguments when the connection is from a SSE connection. This can return an asynchronous iterable representing the stream of messages to be sent to the client. + +### `set(property, value)` + +This will assign the provided value to the designated property in the resource's record. During a write operation, this will indicate that the record has changed and the changes will be saved during commit. During a read operation, this will modify the copy of the record that will be serialized during serialization (converted to the output format of JSON, MessagePack, etc.). + +### `allowCreate(user)` + +This is called to determine if the user has permission to create the current resource. This is called as part of external incoming requests (HTTP). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's insert permission to the table. + +### `allowRead(user)` + +This is called to determine if the user has permission to read from the current resource. This is called as part of external incoming requests (HTTP GET). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's read permission to the table. + +### `allowUpdate(user)` + +This is called to determine if the user has permission to update the current resource. This is called as part of external incoming requests (HTTP PUT). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's update permission to the table. + +### `allowDelete(user)` + +This is called to determine if the user has permission to delete the current resource. This is called as part of external incoming requests (HTTP DELETE). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's delete permission to the table. + +### `addTo(property, value)` + +This adds to provided value to the specified property using conflict-free data type (CRDT) incrementation. This ensures that even if multiple calls are simultaneously made to increment a value, the resulting merge of data changes from different threads and nodes will properly sum all the added values. + +### `getUpdatedTime(): number` + +This returns the last updated time of the resource (timestamp of last commit). This is returned as milliseconds from epoch. + +### `wasLoadedFromSource(): boolean` + +Indicates if the record had been loaded from source. When using caching tables, this indicates that there was a cache miss and the data had to be loaded from the source (or waiting on an inflight request from the source to finish). + +### `getContext(): Context` + +Returns the context for this resource. The context contains information about the current transaction, the user that initiated this action, and other metadata that should be retained through the life of an action. + +#### `Context` + +The `Context` object has the following (potential) properties: + +* `user` - This is the user object, which includes information about the username, role, and authorizations. +* `transaction` - The current transaction If the current method was triggered by an HTTP request, the following properties are available: +* `lastModified` - This value is used to indicate the last modified or updated timestamp of any resource(s) that are accessed and will inform the response's `ETag` (or `Last-Modified`) header. This can be updated by application code if it knows that modification should cause this timestamp to be updated. + +When a resource gets a request through HTTP, the request object is the context, which has the following properties: + +* `url` - The local path/URL of the request (this will not include the protocol or host name, but will start at the path and includes the query string). +* `method` - The method of the HTTP request. +* `headers` - This is an object with the headers that were included in the HTTP request. You can access headers by calling `context.headers.get(headerName)`. +* `responseHeaders` - This is an object with the headers that will be included in the HTTP response. You can set headers by calling `context.responseHeaders.set(headerName, value)`. +* `pathname` - This provides the path part of the URL (no querystring). +* `host` - This provides the host name of the request (from the `Host` header). +* `ip` - This provides the ip address of the client that made the request. +* `body` - This is the request body as a raw NodeJS Readable stream, if there is a request body. +* `data` - If the HTTP request had a request body, this provides a promise to the deserialized data from the request body. (Note that for methods that normally have a request body like `POST` and `PUT`, the resolved deserialized data is passed in as the main argument, but accessing the data from the context provides access to this for requests that do not traditionally have a request body like `DELETE`). + +When a resource is accessed as a data source: + +* `requestContext` - For resources that are acting as a data source for another resource, this provides access to the context of the resource that is making a request for data from the data source resource. Note that it is generally not recommended to rely on this context. The resolved data may be used fulfilled many different requests, and relying on this first request context may not be representative of future requests. Also, source resolution may be triggered by various actions, not just specified endpoints (for example queries, operations, studio, etc.), so make sure you are not relying on specific request context information. + +### `operation(operationObject: Object, authorize?: boolean): Promise` + +This method is available on tables and will execute a HarperDB operation, using the current table as the target of the operation (the `table` and `database` do not need to be specified). See the [operations API](../../developers/operations-api) for available operations that can be performed. You can set the second argument to `true` if you want the current user to be checked for authorization for the operation (if `true`, will throw an error if they are not authorized). + +### `allowStaleWhileRevalidate(entry: { version: number, localTime: number, expiresAt: number, value: object }, id): boolean` + +For caching tables, this can be defined to allow stale entries to be returned while revalidation is taking place, rather than waiting for revalidation. The `version` is the timestamp/version from the source, the `localTime` is when the resource was last refreshed, the `expiresAt` is when the resource expired and became stale, and the `value` is the last value (the stale value) of the record/resource. All times are in milliseconds since epoch. Returning `true` will allow the current stale value to be returned while revalidation takes place concurrently. Returning `false` will cause the response to wait for the data source or origin to revalidate or provide the latest value first, and then return the latest value. + +## Resource Static Methods and Properties + +The Resource class also has static methods that mirror the instance methods with an initial argument that is the id of the record to act on. The static methods are generally the preferred and most convenient method for interacting with tables outside of methods that are directly extending a table. Whereas instances methods are bound to a specific record, the static methods allow you to specify any record in the table to act on. + +The `get`, `put`, `delete`, `publish`, `subscribe`, and `connect` methods all have static equivalents. There is also a `static search()` method for specifically handling searching a table with query parameters. By default, the Resource static methods default to creating an instance bound to the record specified by the arguments, and calling the instance methods. Again, generally static methods are the preferred way to interact with resources and call them from application code. These methods are available on all user Resource classes and tables. + +### `get(id: Id, context?: Resource|Context)` + +This will retrieve a resource instance by id. For example, if you want to retrieve comments by id in the retrieval of a blog post you could do: + +```javascript +const { MyTable, Comment } = tables; +... +/ in class: + async get() { + for (let commentId of this.commentIds) { + let comment = await Comment.get(commentId, this); + / now you can do something with the comment record + } + } +``` + +Type definition for `Id`: +```typescript +Id = string|number|array +``` +### `get(query: Query, context?: Resource|Context)` + +This can be used to retrieve a resource instance by a query. The query can be used to specify a single/unique record by an `id` property, and can be combined with a `select`: +```javascript +MyTable.get({ id: 34, select: ['name', 'age'] }); +``` +This method may also be used to retrieve a collection of records by a query. If the query is not for a specific record id, this will call the `search` method, described above. + +### `put(id: Id, record: object, context?: Resource|Context): Promise` + +This will save the provided record or data to this resource. This will create a new record or fully replace an existing record if one exists with the same `id` (primary key). + +### `put(record: object, context?: Resource|Context): Promise` + +This will save the provided record or data to this resource. This will create a new record or fully replace an existing record if one exists with the same primary key provided in the record. If your table doesn't have a primary key attribute, you will need to use the method with the `id` argument. +Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `create(record: object, context?: Resource|Context): Promise` + +This will create a new record using the provided record for all fields (except primary key), generating a new primary key for the record. This does _not_ check for an existing record; the record argument should not have a primary key and should use the generated primary key. This will (asynchronously) return the new resource instance. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `post(id: Id, data: object, context?: Resource|Context): Promise` +### `post(data: object, context?: Resource|Context): Promise` + +This will save the provided data to this resource. By default, this will create a new record (by calling `create`). However, the `post` method is specifically intended to be available for custom behaviors, so extending a class to support custom `post` method behavior is encouraged. + +### `patch(recordUpdate: object, context?: Resource|Context): Promise` +### `patch(id: Id, recordUpdate: object, context?: Resource|Context): Promise` + +This will save the provided updates to the record. The `recordUpdate` object's properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `recordUpdate` object. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `delete(id: Id, context?: Resource|Context): Promise` + +Deletes this resource's record or data. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `publish(message: object, context?: Resource|Context): Promise` +### `publish(topic: Id, message: object, context?: Resource|Context): Promise` + +Publishes the given message to the record entry specified by the id in the context. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `subscribe(subscriptionRequest?, context?: Resource|Context): Promise` + +Subscribes to a record/resource. See the description of the `subscriptionRequest` object above for more information on how to use this. + +### `search(query: Query, context?: Resource|Context): AsyncIterable` + +This will perform a query on this table or collection. The query parameter can be used to specify the desired query. + +### `primaryKey` + +This property indicates the name of the primary key attribute for a table. You can get the primary key for a record using this property name. For example: + +```javascript +let record34 = await Table.get(34); +record34[Table.primaryKey] -> 34 +``` + +There are additional methods that are only available on table classes (which are a type of resource). + +### `Table.sourcedFrom(Resource, options)` + +This defines the source for a table. This allows a table to function as a cache for an external resource. When a table is configured to have a source, any request for a record that is not found in the table will be delegated to the source resource to retrieve (via `get`) and the result will be cached/stored in the table. All writes to the table will also first be delegated to the source (if the source defines write functions like `put`, `delete`, etc.). The `options` parameter can include an `expiration` property that will configure the table with a time-to-live expiration window for automatic deletion or invalidation of older entries. The `options` parameter (also) supports: +* `expiration` - Default expiration time for records in seconds. +* `eviction` - Eviction time for records in seconds. +* `scanInterval` - Time period for scanning the table for records to evict. + +If the source resource implements subscription support, real-time invalidation can be performed to ensure the cache is guaranteed to be fresh (and this can eliminate or reduce the need for time-based expiration of data). + +### `parsePath(path, context, query) {` + +This is called by static methods when they are responding to a URL (from HTTP request, for example), and translates the path to an id. By default, this will convert a multi-segment path to multipart id (an array), which facilitates hierarchical id-based data access, and also parses `.property` suffixes for accessing properties and specifying preferred content type in the URL. However, in some situations you may wish to preserve the path directly as a string. You can override `parsePath` for simpler path to id preservation: + +```javascript + static parsePath(path) { + return path; / return the path as the id + } +``` + +### `isCollection(resource: Resource): boolean` +This returns a boolean indicating if the provide resource instance represents a collection (can return a query result) or a single record/entity. + +### Context and Transactions + +Whenever you implement an action that is calling other resources, it is recommended that you provide the "context" for the action. This allows a secondary resource to be accessed through the same transaction, preserving atomicity and isolation. + +This also allows timestamps that are accessed during resolution to be used to determine the overall last updated timestamp, which informs the header timestamps (which facilitates accurate client-side caching). The context also maintains user, session, and request metadata information that is communicated so that contextual request information (like headers) can be accessed and any writes are properly attributed to the correct user, or any additional security checks to be applied to the user. + +When using an export resource class, the REST interface will automatically create a context for you with a transaction and request metadata, and you can pass this to other actions by simply including `this` as the source argument (second argument) to the static methods. + +For example, if we had a method to post a comment on a blog, and when this happens we also want to update an array of comment IDs on the blog record, but then add the comment to a separate comment table. We might do this: + +```javascript +const { Comment } = tables; + +export class BlogPost extends tables.BlogPost { + post(comment) { + / add a comment record to the comment table, using this resource as the source for the context + Comment.put(comment, this); + this.comments.push(comment.id); / add the id for the record to our array of comment ids + / Both of these actions will be committed atomically as part of the same transaction + } +} +``` + +Please see the [transaction documentation](./transactions) for more information on how transactions work in HarperDB. + +### Query + +The `get`/`search` methods accept a Query object that can be used to specify a query for data. The query is an object that has the following properties, which are all optional: + +#### `conditions` +This is an array of objects that specify the conditions to use the match records (if conditions are omitted or it is an empty array, this is a search for everything in the table). Each condition object can have the following properties: + * `attribute`: Name of the property/attribute to match on. + * `value`: The value to match. + * `comparator`: This can specify how the value is compared. This defaults to "equals", but can also be "greater\_than", "greater\_than\_equal", "less\_than", "less\_than\_equal", "starts\_with", "contains", "ends\_with", "between", and "not_equal". + * `conditions`: An array of conditions, which follows the same structure as above. + * `operator`: Specifies the operator to apply to this set of conditions (`and` or `or`. This is optional and defaults to `and`). + For example, a complex query might look like: + +For example, a more complex query might look like: +```javascript +Table.search({ conditions: [ + { attribute: 'price', comparator: 'less_than', value: 100 }, + { operator: 'or', conditions: [ + { attribute: 'rating', comparator: 'greater_than', value: 4 }, + { attribute: 'featured', value: true } + ]} +]}); +``` + +##### Chained Attributes/Properties +Chained attribute/property references can be used to search on properties within related records that are referenced by [relationship properties](../../developers/applications/defining-schemas) (in addition to the [schema documentation](../../developers/applications/defining-schemas), see the [REST documentation](../../developers/rest) for more of overview of relationships and querying). Chained property references are specified with an array, with each entry in the array being a property name for successive property references. For example, if a relationship property called `brand` has been defined that references a `Brand` table, we could search products by brand name: +```javascript +Product.search({ conditions: [ + { attribute: ['brand', 'name'], value: 'HarperDB' } +]}); +``` +This effectively executes a join, searching on the `Brand` table and joining results with matching records in the `Product` table. Chained array properties can be used in any condition, as well nested/grouped conditions. The chain of properties may also be more than two entries, allowing for multiple relationships to be traversed, effectively joining across multiple tables. +An array of chained properties can also be used as the `attribute` in the `sort` property, allowing for sorting by an attribute in a referenced joined tables. + +#### `operator` +Specifies if the conditions should be applied as an `"and"` (records must match all conditions), or as an "or" (records must match at least one condition). This is optional and defaults to `"and"`. + +#### `limit` +This specifies the limit of the number of records that should be returned from the query. + +#### `offset` +This specifies the number of records that should be skipped prior to returning records in the query. This is often used with `limit` to implement "paging" of records. + +#### `select` +This specifies the specific properties that should be included in each record that is returned. This can be an array, to specify a set of properties that should be included in the returned objects. The array can specify an `select.asArray = true` property and the query results will return a set of arrays of values of the specified properties instead of objects; this can be used to return more compact results. Each of the elements in the array can be a property name, or can be an object with a `name` and `select` array itself that specifies properties that should be returned by the referenced sub-object or related record. For example, a `select` can defined: +```javascript +Table.search({ select: [ 'name', 'age' ], conditions: ...}) +``` +Or nested/joined properties from referenced objects can be specified, here we are including the referenced `related` records, and returning the `description` and `id` from each of the related objects: +```javascript +Table.search({ select: [ 'name', { name: 'related', select: ['description', 'id'] } ], conditions: ...}) +``` +The select properties can also include certain special properties: +* `$id` - This will specifically return the primary key of the record (regardless of name, even if there is no defined primary key attribute for the table). +* `$updatedtime` - This will return the last updated timestamp/version of the record (regardless of whether there is an attribute for the updated time). + +Alternately, the select value can be a string value, to specify that the value of the specified property should be returned for each iteration/element in the results. For example to just return an iterator of the `id`s of object: +```javascript +Table.search({ select: 'id', conditions: ...}) +``` + +#### `sort` +This defines the sort order, and should be an object that can have the following properties: + * `attributes`: The attribute to sort on. + * `descending`: If true, will sort in descending order (optional and defaults to `false`). + * `next`: Specifies the next sort order to resolve ties. This is an object that follows the same structure as `sort`. + +#### `explain` +This will return the conditions re-ordered as HarperDB will execute them. HarperDB will estimate the number of the matching records for each condition and apply the narrowest condition applied first. + +#### `enforceExecutionOrder` +This will force the conditions to be executed in the order they were supplied, rather than using query estimation to re-order them. + +The query results are returned as an `AsyncIterable`. In order to access the elements of the query results, you must use a `for await` loop (it does _not_ return an array, you can not access the results by index). + +For example, we could do a query like: + +```javascript +let { Product } = tables; +let results = Product.search({ + conditions: [ + { attribute: 'rating', value: 4.5, comparator: 'greater_than' }, + { attribute: 'price', value: 100, comparator: 'less_than' }, + ], + offset: 20, + limit: 10, + select: ['id', 'name', 'price', 'rating'], + sort: { attribute: 'price' } +}) +for await (let record of results) { + / iterate through each record in the query results +} +``` +`AsyncIterable`s can be returned from resource methods, and will be properly serialized in responses. When a query is performed, this will open/reserve a read transaction until the query results are iterated, either through your own `for await` loop or through serialization. Failing to iterate the results this will result in a long-lived read transaction which can degrade performance (including write performance), and may eventually be aborted. + +### Interacting with the Resource Data Model + +When extending or interacting with table resources, when a resource instance is retrieved and instantiated, it will be loaded with the record data from its table. You can interact with this record through the resource instance. For any properties that have been defined in the table's schema, you can direct access or modify properties through standard property syntax. For example, let's say we defined a product schema: + +```graphql +type Product @table { + id: ID @primaryKey + name: String + rating: Int + price: Float +} +``` + +If we have extended this table class with our get() we can interact with any these specified attributes/properties: + +```javascript +export class CustomProduct extends Product { + get(query) { + let name = this.name; / this is the name of the current product + let rating = this.rating; / this is the rating of the current product + this.rating = 3 / we can also modify the rating for the current instance + / (with a get this won't be saved by default, but will be used when serialized) + return super.get(query); + } +} +``` + +Likewise, we can interact with resource instances in the same way when retrieving them through the static methods: + +```javascript +let product1 = await Product.get(1); +let name = product1.name; / this is the name of the product with a primary key of 1 +let rating = product1.rating; / this is the rating of the product with a primary key of 1 +product1.rating = 3 / modify the rating for this instance (this will be saved without a call to update()) + +``` + +If there are additional properties on (some) products that aren't defined in the schema, we can still access them through the resource instance, but since they aren't declared, there won't be getter/setter definition for direct property access, but we can access properties with the `get(propertyName)` method and modify properties with the `set(propertyName, value)` method: + +```javascript +let product1 = await Product.get(1); +let additionalInformation = product1.get('additionalInformation'); / get the additionalInformation property value even though it isn't defined in the schema +product1.set('newProperty', 'some value'); / we can assign any properties we want with set +``` + +And likewise, we can do this in an instance method, although you will probably want to use super.get()/set() so you don't have to write extra logic to avoid recursion: + +```javascript +export class CustomProduct extends Product { + get(query) { + let additionalInformation = super.get('additionalInformation'); / get the additionalInformation property value even though it isn't defined in the schema + super.set('newProperty', 'some value'); / we can assign any properties we want with set + } +} +``` + +Note that you may also need to use `get`/`set` for properties that conflict with existing method names. For example, your schema defines an attribute called `getId` (not recommended), you would need to access that property through `get('getId')` and `set('getId', value)`. + +If you want to save the changes you make, you can call the \`update()\`\` method: + +```javascript +let product1 = await Product.get(1); +product1.rating = 3; +product1.set('newProperty', 'some value'); +product1.update(); / save both of these property changes +``` + +Updates are automatically saved inside modifying methods like put and post: + +```javascript +export class CustomProduct extends Product { + post(data) { + this.name = data.name; + this.set('description', data.description); + / both of these changes will be saved automatically as this transaction commits + } +} +``` + +We can also interact with properties in nested objects and arrays, following the same patterns. For example we could define more complex types on our product: + +```graphql +type Product @table { + id: ID @primaryKey + name: String + rating: Int + price: Float + brand: Brand; + variations: [Variation]; +} +type Brand { + name: String +} +type Variation { + name: String + price: Float +} +``` + +We can interact with these nested properties: + +```javascript +export class CustomProduct extends Product { + post(data) { + let brandName = this.brand.name; + let firstVariationPrice = this.variations[0].price; + let additionalInfoOnBrand = this.brand.get('additionalInfo'); / not defined in schema, but can still try to access property + / make some changes + this.variations.splice(0, 1); / remove first variation + this.variations.push({ name: 'new variation', price: 9.99 }); / add a new variation + this.brand.name = 'new brand name'; + / all these change will be saved + } +} +``` + +If you need to delete a property, you can do with the `delete` method: + +```javascript +let product1 = await Product.get(1); +product1.delete('additionalInformation'); +product1.update(); +``` + +You can also get "plain" object representation of a resource instance by calling `toJSON`, which will return a simple frozen object with all the properties (whether defined in the schema) as direct normal properties (note that this object can *not* be modified, it is frozen since it is belongs to a cache): + +```javascript +let product1 = await Product.get(1); +let plainObject = product1.toJSON(); +for (let key in plainObject) { + / can iterate through the properties of this record +} +``` + +## Response Object +The resource methods can return an object that will be serialized and returned as the response to the client. However, these methods can also return a `Response` style object with `status`, `headers`, and optionally `body` or `data` properties. This allows you to have more control over the response, including setting custom headers and status codes. For example, you could return a redirect response like: + +```javascript +return { status: 302, headers: { Location: '/new-location' } }; +``` +If you include a `body` property, this must be a string or buffer that will be returned as the response body. If you include a `data` property, this must be an object that will be serialized as the response body (using the standard content negotiation). For example, we could return an object with a custom header: + +```javascript +return { status: 200, headers: { 'X-Custom-Header': 'custom value' }, data: { message: 'Hello, World!' } }; +``` + +### Throwing Errors + +You may throw errors (and leave them uncaught) from the response methods and these should be caught and handled by protocol the handler. For REST requests/responses, this will result in an error response. By default the status code will be 500. You can assign a property of `statusCode` to errors to indicate the HTTP status code that should be returned. For example: + +```javascript +if (notAuthorized()) { + let error = new Error('You are not authorized to access this'); + error.statusCode = 403; + throw error; +} +``` diff --git a/site/versioned_docs/version-4.3/technical-details/reference/storage-algorithm.md b/site/versioned_docs/version-4.3/technical-details/reference/storage-algorithm.md new file mode 100644 index 00000000..f91ce006 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/reference/storage-algorithm.md @@ -0,0 +1,27 @@ +--- +title: Storage Algorithm +--- + +# Storage Algorithm + +The HarperDB storage algorithm is fundamental to the HarperDB core functionality, enabling the [Dynamic Schema](./dynamic-schema) and all other user-facing functionality. HarperDB is built on top of Lightning Memory-Mapped Database (LMDB), a key-value store offering industry leading performance and functionality, which allows for our storage algorithm to store data in tables as rows/objects. This document will provide additional details on how data is stored within HarperDB. + +## Query Language Agnostic + +The HarperDB storage algorithm was designed to abstract the data storage from any individual query language. HarperDB currently supports both SQL and NoSQL on top of this storage algorithm, with the ability to add additional query languages in the future. This means data can be inserted via NoSQL and read via SQL while hitting the same underlying data storage. + +## ACID Compliant + +Utilizing Multi-Version Concurrency Control (MVCC) through LMDB, HarperDB offers ACID compliance independently on each node. Readers and writers operate independently of each other, meaning readers don’t block writers and writers don’t block readers. Each HarperDB table has a single writer process, avoiding deadlocks and assuring that writes are executed in the order in which they were received. HarperDB tables can have multiple reader processes operating at the same time for consistent, high scale reads. + +## Universally Indexed + +All top level attributes are automatically indexed immediately upon ingestion. The [HarperDB Dynamic Schema](./dynamic-schema) reflexively creates both the attribute and index reflexively as new schema metadata comes in. Indexes are agnostic of datatype, honoring the following order: booleans, numbers ordered naturally, strings ordered lexically. Within the LMDB implementation, table records are grouped together into a single LMDB environment file, where each attribute index is a sub-database (dbi) inside said environment file. An example of the indexing scheme can be seen below. + +## Additional LMDB Benefits + +HarperDB inherits both functional and performance benefits by implementing LMDB as the underlying key-value store. Data is memory-mapped, which enables quick data access without data duplication. All writers are fully serialized, making writes deadlock-free. LMDB is built to maximize operating system features and functionality, fully exploiting buffer cache and built to run in CPU cache. To learn more about LMDB, visit their documentation. + +## HarperDB Indexing Example (Single Table) + +![](/img/v4.3/reference/HarperDB-3.0-Storage-Algorithm.png.webp) diff --git a/site/versioned_docs/version-4.3/technical-details/reference/transactions.md b/site/versioned_docs/version-4.3/technical-details/reference/transactions.md new file mode 100644 index 00000000..8dbb70ca --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/reference/transactions.md @@ -0,0 +1,40 @@ +--- +title: Transactions +--- + +# Transactions + +Transactions are an important part of robust handling of data in data-driven applications. HarperDB provides ACID-compliant support for transactions, allowing for guaranteed atomic, consistent, and isolated data handling within transactions, with durability guarantees on commit. Understanding how transactions are tracked and behave is important for properly leveraging transactional support in HarperDB. For most operations this is very intuitive, each HTTP request is executed in a transaction, so when multiple actions are executed in a single request, they are normally automatically included in the same transaction. + +Transactions span a database. Once a read snapshot is started, it is an atomic snapshot of all the tables in a database. And writes that span multiple tables in the database will all be committed atomically together (no writes in one table will be visible before writes in another table in the same database). If a transaction is used to access or write data in multiple databases, there will actually be a separate database transaction used for each database, and there is no guarantee of atomicity between separate transactions in separate databases. This can be an important consideration when deciding if and how tables should be organized into different databases. + +Because HarperDB is designed to be a low-latency distributed database, locks are avoided in data handling. Because of this, transactions do not lock data within the transaction. When a transaction starts, it will provide a read snapshot of the database for any retrievals or queries, which means all reads will be performed on a single version of the database isolated from any other writes that are concurrently taking place. And within a transaction all writes are aggregated and atomically written on commit. These writes are all isolated (from other transactions) until committed, and all become visible atomically. However, because transactions are non-locking, it is possible that writes from other transactions may occur between when reads are performed and when the writes are committed (at which point the last write will win for any records that have been written concurrently). Support for locks in transactions is planned for a future release. + +Transactions can also be explicitly started using the `transaction` global function that is provided in the HarperDB environment: + +## `transaction(context?, callback: (transaction) => any): Promise` + +This executes the callback in a transaction, providing a context that can be used for any resource methods that are called. This returns a promise for when the transaction has been committed. The callback itself may be asynchronous (return a promise), allowing for asynchronous activity within the transaction. This is useful for starting a transaction when your code is not already running within a transaction (in an HTTP request handler, a transaction will typically already be started). For example, if we wanted to run an action on a timer that periodically loads data, we could ensure that the data is loaded in single transactions like this (note that HDB is multi-threaded and if we do a timer-based job, we very likely want it to only run in one thread): + +```javascript +import { tables } from 'harperdb'; +const { MyTable } = tables; +if (isMainThread) / only on main thread + setInterval(async () => { + let someData = await (await fetch(... some URL ...)).json(); + transaction((txn) => { + for (let item in someData) { + MyTable.put(item, txn); + } + }); + }, 3600000); / every hour +``` + +You can provide your own context object for the transaction to attach to. If you call `transaction` with a context that already has a transaction started, it will simply use the current transaction, execute the callback and immediately return (this can be useful for ensuring that a transaction has started). + +Once the transaction callback is completed (for non-nested transaction calls), the transaction will commit, and if the callback throws an error, the transaction will abort. However, the callback is called with the `transaction` object, which also provides the following methods and property: + +* `commit(): Promise` - Commits the current transaction. The transaction will be committed once the returned promise resolves. +* `abort(): void` - Aborts the current transaction and resets it. +* `resetReadSnapshot(): void` - Resets the read snapshot for the transaction, resetting to the latest data in the database. +* `timestamp: number` - This is the timestamp associated with the current transaction. diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/End-of-Life.md b/site/versioned_docs/version-4.3/technical-details/release-notes/End-of-Life.md new file mode 100644 index 00000000..ca15f713 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/End-of-Life.md @@ -0,0 +1,14 @@ +--- +title: HarperDB Software Lifecycle Schedules +--- + +# HarperDB Software Lifecycle Schedules + +The lifecycle schedules below form a part of HarperDB’s Support Policies. They include Major Releases and Minor Release that have reached their end of life date in the past 3 years. + +| **Release** | **Release Date** | **End of Life Date** | +|-------------|------------------|----------------------| +| 3.2 | 6/22 | 6/25 | +| 3.3 | 9/22 | 9/25 | +| 4.0 | 1/23 | 1/26 | +| 4.1 | 4/23 | 4/26 | diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/index.md b/site/versioned_docs/version-4.3/technical-details/release-notes/index.md new file mode 100644 index 00000000..b6d69205 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/index.md @@ -0,0 +1,173 @@ +--- +title: Release Notes +--- + +# Release Notes + +### Current Release + +[Meet Tucker](./v4-tucker) Our 4th Release Pup + +[4.3.36 Tucker](./v4-tucker/4.3.36) + +[4.3.35 Tucker](./v4-tucker/4.3.35) + +[4.3.34 Tucker](./v4-tucker/4.3.34) + +[4.3.33 Tucker](./v4-tucker/4.3.33) + +[4.3.32 Tucker](./v4-tucker/4.3.32) + +[4.3.31 Tucker](./v4-tucker/4.3.31) + +[4.3.30 Tucker](./v4-tucker/4.3.30) + +[4.3.29 Tucker](./v4-tucker/4.3.29) + +[4.3.28 Tucker](./v4-tucker/4.3.28) + +[4.3.27 Tucker](./v4-tucker/4.3.27) + +[4.3.26 Tucker](./v4-tucker/4.3.26) + +[4.3.25 Tucker](./v4-tucker/4.3.25) + +[4.3.24 Tucker](./v4-tucker/4.3.24) + +[4.3.23 Tucker](./v4-tucker/4.3.23) + +[4.3.22 Tucker](./v4-tucker/4.3.22) + +[4.3.21 Tucker](./v4-tucker/4.3.21) + +[4.3.20 Tucker](./v4-tucker/4.3.20) + +[4.3.19 Tucker](./v4-tucker/4.3.19) + +[4.3.18 Tucker](./v4-tucker/4.3.18) + +[4.3.17 Tucker](./v4-tucker/4.3.17) + +[4.3.16 Tucker](./v4-tucker/4.3.16) + +[4.3.15 Tucker](./v4-tucker/4.3.15) + +[4.3.14 Tucker](./v4-tucker/4.3.14) + +[4.3.13 Tucker](./v4-tucker/4.3.13) + +[4.3.12 Tucker](./v4-tucker/4.3.12) + +[4.3.11 Tucker](./v4-tucker/4.3.11) + +[4.3.10 Tucker](./v4-tucker/4.3.10) + +[4.3.9 Tucker](./v4-tucker/4.3.9) + +[4.3.8 Tucker](./v4-tucker/4.3.8) + +[4.3.7 Tucker](./v4-tucker/4.3.7) + +[4.3.6 Tucker](./v4-tucker/4.3.6) + +[4.3.5 Tucker](./v4-tucker/4.3.5) + +[4.3.4 Tucker](./v4-tucker/4.3.4) + +[4.3.3 Tucker](./v4-tucker/4.3.3) + +[4.3.2 Tucker](./v4-tucker/4.3.2) + +[4.3.1 Tucker](./v4-tucker/4.3.1) + +[4.3.0 Tucker](./v4-tucker/4.3.0) + +[4.2.8 Tucker](./v4-tucker/4.2.8) + +[4.2.7 Tucker](./v4-tucker/4.2.7) + +[4.2.6 Tucker](./v4-tucker/4.2.6) + +[4.2.5 Tucker](./v4-tucker/4.2.5) + +[4.2.4 Tucker](./v4-tucker/4.2.4) + +[4.2.3 Tucker](./v4-tucker/4.2.3) + +[4.2.2 Tucker](./v4-tucker/4.2.2) + +[4.2.1 Tucker](./v4-tucker/4.2.1) + +[4.2.0 Tucker](./v4-tucker/4.2.0) + +[4.1.2 Tucker](./v4-tucker/4.1.2) + +[4.1.1 Tucker](./v4-tucker/4.1.1) + +[4.1.0 Tucker](./v4-tucker/4.1.0) + +[4.0.7 Tucker](./v4-tucker/4.0.7) + +[4.0.6 Tucker](./v4-tucker/4.0.6) + +[4.0.5 Tucker](./v4-tucker/4.0.5) + +[4.0.4 Tucker](./v4-tucker/4.0.4) + +[4.0.3 Tucker](./v4-tucker/4.0.3) + +[4.0.2 Tucker](./v4-tucker/4.0.2) + +[4.0.1 Tucker](./v4-tucker/4.0.1) + +[4.0.0 Tucker](./v4-tucker/4.0.0) + +### Past Releases + +[Meet Monkey](./v3-monkey) Our 3rd Release Pup + +[3.2.1 Monkey](./v3-monkey/3.2.1) + +[3.2.0 Monkey](./v3-monkey/3.2.0) + +[3.1.5 Monkey](./v3-monkey/3.1.5) + +[3.1.4 Monkey](./v3-monkey/3.1.4) + +[3.1.3 Monkey](./v3-monkey/3.1.3) + +[3.1.2 Monkey](./v3-monkey/3.1.2) + +[3.1.1 Monkey](./v3-monkey/3.1.1) + +[3.1.0 Monkey](./v3-monkey/3.1.0) + +[3.0.0 Monkey](./v3-monkey/3.0.0) + +*** + +[Meet Penny](./v2-penny) Our 2nd Release Pup + +[2.3.1 Penny](./v2-penny/2.3.1) + +[2.3.0 Penny](./v2-penny/2.3.0) + +[2.2.3 Penny](./v2-penny/2.2.3) + +[2.2.2 Penny](./v2-penny/2.2.2) + +[2.2.0 Penny](./v2-penny/2.2.0) + +[2.1.1 Penny](./v2-penny/2.1.1) + +*** + +[Meet Alby](./v1-alby) Our 1st Release Pup + +[1.3.1 Alby](./v1-alby/1.3.1) + +[1.3.0 Alby](./v1-alby/1.3.0) + +[1.2.0 Alby](./v1-alby/1.2.0) + +[1.1.0 Alby](./v1-alby/1.1.0) diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/1.1.0.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/1.1.0.md new file mode 100644 index 00000000..b42514a2 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/1.1.0.md @@ -0,0 +1,77 @@ +--- +title: 1.1.0 +sidebar_position: 89899 +--- + +### HarperDB 1.1.0, Alby Release +4/18/2018 + +**Features** + +* Users & Roles: + + * Limit/Assign access to all HarperDB operations + + * Limit/Assign access to schemas, tables & attributes + + * Limit/Assign access to specific SQL operations (`INSERT`, `UPDATE`, `DELETE`, `SELECT`) + +* Enhanced SQL parser + + * Added extensive ANSI SQL Support. + + * Added Array function, which allows for converting relational data into Object/Hierarchical data + + * `Distinct_Array` Function: allows for removing duplicates in the Array function. + + * Enhanced SQL Validation: Improved validation around structure of SQL, validating the schema, etc.. + + * 10x performance improvement on SQL statements. + +* Export Function: can now call a NoSQL/SQL search and have it export to CSV or JSON. + +* Added upgrade function to CLI + +* Added ability to perform bulk update from CSV + +* Created landing page for HarperDB. + +* Added CORS support to HarperDB + +**Fixes** + +* Fixed memory leak in CSV bulk loads + +* Corrected error when attempting to perform a `SQL DELETE` + +* Added further validation to NoSQL `UPDATE` to validate schema & table exist + +* Fixed install issue occurring when part of the install path does not exist, the install would silently fail. + +* Fixed issues with replicated data when one of the replicas is down + +* Removed logging of initial user’s credentials during install + +* Can now use reserved words as aliases in SQL + +* Removed user(s) password in results when calling `list_users` + +* Corrected forwarding of operations to other nodes in a cluster + +* Corrected lag in schema meta-data passing to other nodes in a cluster + +* Drop table & schema now move the table & schema or table to the trash folder under the Database folder for later permanent deletion. + +* Bulk inserts no longer halt the entire operation if n records already exist, instead the return includes the hashes of records that have been skipped. + +* Added ability to accept EULA from command line + +* Corrected `search_by_value` not searching on the correct attribute + +* Added ability to increase the timeout of a request by adding `SERVER_TIMEOUT_MS` to config/settings.js + +* Add error handling resulting from SQL calculations. + +* Standardized error responses as JSON. + +* Corrected internal process generation to not allow more processes than machine has cores. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/1.2.0.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/1.2.0.md new file mode 100644 index 00000000..095bf239 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/1.2.0.md @@ -0,0 +1,42 @@ +--- +title: 1.2.0 +sidebar_position: 89799 +--- + +### HarperDB 1.2.0, Alby Release +7/10/2018 + +**Features** + +* Time to Live: Conserve the resources of your edge device by setting data on devices to live for a specific period of time. +* Geo: HarperDB has implemented turf.js into its SQL parser to enable geo based analytics. +* Jobs: CSV Data loads, Exports & Time to Live now all run as back ground jobs. +* Exports: Perform queries that export into JSON or CSV and save to disk or S3. + + +**Fixes** + +* Fixed issue where CSV data loads incorrectly report number of records loaded. +* Added validation to stop `BETWEEN` operations in SQL. +* Updated logging to not include internal variables in the logs. +* Cleaned up `add_role` response to not include internal variables. +* Removed old and unused dependencies. +* Build out further unit tests and integration tests. +* Fixed https to handle certificates properly. +* Improved stability of clustering & replication. +* Corrected issue where Objects and Arrays were not casting properly in `SQL SELECT` response. +* Fixed issue where Blob text was not being returned from `SQL SELECT`s. +* Fixed error being returned when querying on table with no data, now correctly returns empty array. +* Improved performance in SQL when searching on exact values. +* Fixed error when ./harperdb stop is called. +* Fixed logging issue causing instability in installer. +* Fixed `read_log` operation to accept date time. +* Added permissions checking to `export_to_s3`. +* Added ability to run SQL on `SELECT` without a `FROM`. +* Fixed issue where updating a user’s password was not encrypting properly. +* Fixed `user_guide.html` to point to readme on git repo. +* Created option to have HarperDB run as a foreground process. +* Updated `user_info` to return the correct role for a user. +* Fixed issue where HarperDB would not stop if the database root was deleted. +* Corrected error message on insert if an invalid schema is provided. +* Added permissions checks for user & role operations. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/1.3.0.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/1.3.0.md new file mode 100644 index 00000000..ad196159 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/1.3.0.md @@ -0,0 +1,27 @@ +--- +title: 1.3.0 +sidebar_position: 89699 +--- + +### HarperDB 1.3.0, Alby Release +11/2/2018 + +**Features** + +* Upgrade: Upgrade to newest version via command line. +* SQL Support: Added `IS NULL` for SQL parser. +* Added attribute validation to search operations. + + +**Fixes** + +* Fixed `SELECT` calculations, i.e. `SELECT` 2+2. +* Fixed select OR not returning expected results. +* No longer allowing reserved words for schema and table names. +* Corrected process interruptions from improper SQL statements. +* Improved message handling between spawned processes that replace killed processes. +* Enhanced error handling for updates to tables that do not exist. +* Fixed error handling for NoSQL responses when `get_attributes` is provided with invalid attributes. +* Fixed issue with new columns not being updated properly in update statements. +* Now validating roles, tables and attributes when creating or updating roles. +* Fixed an issue where in some cases `undefined` was being returned after dropping a role diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/1.3.1.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/1.3.1.md new file mode 100644 index 00000000..77e3ffe4 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/1.3.1.md @@ -0,0 +1,29 @@ +--- +title: 1.3.1 +sidebar_position: 89698 +--- + +### HarperDB 1.3.1, Alby Release +2/26/2019 + +**Features** + +* Clustering connection direction appointment +* Foundations for threading/multi processing +* UUID autogen for hash attributes that were not provided +* Added cluster status operation + + +**Bug Fixes and Enhancements** + +* More logging +* Clustering communication enhancements +* Clustering queue ordering by timestamps +* Cluster re connection enhancements +* Number of system core(s) detection +* Node LTS (10.15) compatibility +* Update/Alter users enhancements +* General performance enhancements +* Warning is logged if different versions of harperdb are connected via clustering +* Fixed need to restart after user creation/alteration +* Fixed SQL error that occurred on selecting from an empty table \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/_category_.json b/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/_category_.json new file mode 100644 index 00000000..e33195ec --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Alby (Version 1)", + "position": -1 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/index.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/index.md new file mode 100644 index 00000000..a813fc17 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v1-alby/index.md @@ -0,0 +1,13 @@ +--- +title: HarperDB Alby (Version 1) +--- + +# HarperDB Alby (Version 1) + +Did you know our release names are dedicated to employee pups? For our first release, Alby was our pup. + +Here is a bit about Alby: + +![picture of black dog](/img/v4.3/dogs/alby.webp) + +_Hi, I am Alby. My mom is Kaylan Stock, Director of Marketing at HarperDB. I am a 9-year-old Great Dane mix who loves sun bathing, going for swims, and wreaking havoc on the local squirrels. My favorite snack is whatever you are eating, and I love a good butt scratch!_ diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.1.1.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.1.1.md new file mode 100644 index 00000000..e1314a5f --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.1.1.md @@ -0,0 +1,27 @@ +--- +title: 2.1.1 +sidebar_position: 79898 +--- + +### HarperDB 2.1.1, Penny Release +05/22/2020 + +**Highlights** + +* CORE-1007 Added the ability to perform `SQL INSERT` & `UPDATE` with function calls & expressions on values. +* CORE-1023 Fixed minor bug in final SQL step incorrectly trying to translate ordinals to alias in `ORDER BY` statement. +* CORE-1020 Fixed bug allowing 'null' and 'undefined' string values to be passed in as valid hash values. +* CORE-1006 Added SQL functionality that enables `JOIN` statements across different schemas. +* CORE-1005 Implemented JSONata library to handle our JSON document search functionality in SQL, creating the `SEARCH_JSON` function. +* CORE-1009 Updated schema validation to allow all printable ASCII characters to be used in schema/table/attribute names, except, forward slashes and backticks. Same rules apply now for hash attribute values. +* CORE-1003 Fixed handling of ORDER BY statements with function aliases. +* CORE-1004 Fixed bug related to `SELECT*` on `JOIN` queries with table columns with the same name. +* CORE-996 Fixed an issue where the `transact_to_cluster` flag is lost for CSV URL loads, fixed an issue where new attributes created in CSV bulk load do not sync to the cluster. +* CORE-994 Added new operation `system_information`. This operation returns info & metrics for the OS, time, memory, cpu, disk, network. +* CORE-993 Added new custom date functions for AlaSQL & UTC updates. +* CORE-991 Changed jobs to spawn a new process which will run the intended job without impacting a main HarperDB process. +* CORE-992 HTTPS enabled by default. +* CORE-990 Updated `describe_table` to add the record count for the table for LMDB data storage. +* CORE-989 Killed the socket cluster processes prior to HarperDB processes to eliminate a false uptime. +* CORE-975 Updated time values set by SQL Date Functions to be in epoch format. +* CORE-974 Added date functions to `SQL SELECT` column alias functionality. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.2.0.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.2.0.md new file mode 100644 index 00000000..267168cd --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.2.0.md @@ -0,0 +1,43 @@ +--- +title: 2.2.0 +sidebar_position: 79799 +--- + +### HarperDB 2.2.0, Penny Release +08/24/2020 + +**Features/Updates** + +* CORE-997 Updated the data format for CSV data loads being sync'd across a cluster to take up less resources +* CORE-1018 Adds SQL functionality for `BETWEEN` statements +* CORE-1032 Updates permissions to allow regular users (i.e. non-super users) to call the `get_job` operation +* CORE-1036 On create/drop table we auto create/drop the related transactions environments for the schema.table +* CORE-1042 Built raw functions to write to a tables transaction log for insert/update/delete operations +* CORE-1057 Implemented write transaction into lmdb create/update/delete functions +* CORE-1048 Adds `SEARCH` wildcard handling for role permissions standards +* CORE-1059 Added config setting to disable transaction logging for an instance +* CORE-1076 Adds permissions filter to describe operations +* CORE-1043 Change clustering catchup to use the new transaction log +* CORE-1052 Removed word "master" from source +* CORE-1061 Added new operation called `delete_transactions_before` this will tail a transaction log for a specific schema / table +* CORE-1040 On HarperDB startup make sure all tables have a transaction environment +* CORE-1055 Added 2 new setting to change the server headersTimeout & keepAliveTimeout from the config file +* CORE-1044 Created new operation `read_transaction_log` which will allow a user to get transactions for a table by `timestamp`, `username`, or `hash_value` +* CORE-1043 Change clustering catchup to use the new transaction log +* CORE-1089 Added new attribute to `system_information` for table/transaction log data size in bytes & transaction log record count +* CORE-1101 Fix to store empty strings rather than considering them null & fix to be able to search on empty strings in SQL/NoSQL. +* CORE-1054 Updates permissions object to remove delete attribute permission and update table attribute permission key to `attribute_permissions` +* CORE-1092 Do not allow the `__createdtime__` to be updated +* CORE-1085 Updates create schema/table & drop schema/table/attribute operations permissions to require super user role and adds integration tests to validate +* CORE-1071 Updates response messages and status codes from `describe_schema` and `describe_table` operations to provide standard language/status code when a schema item is not found +* CORE-1049 Updates response message for SQL update op with no matching rows +* CORE-1096 Added tracking of the origin in the transaction log. This origin object stores the node name, timestamp of the transaction from the originating node & the user. + +**Bug Fixes** + +* CORE-1028 Fixes bug for simple `SQL SELECT` queries not returning aliases and incorrectly returning hash values when not requested in query +* CORE-1037 Fixed an issue where numbers with leading zero i.e. 00123 are converted to numbers rather than being honored as strings. +* CORE-1063 Updates permission error response shape to consolidate issues into individual objects per schema/table combo +* CORE-1098 Fixed an issue where transaction environments were remaining in the global cache after being dropped. +* CORE-1086 Fixed issue where responses from insert/update were incorrect with skipped records. +* CORE-1079 Fixes SQL bugs around invalid schema/table and special characters in `WHERE` clause \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.2.2.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.2.2.md new file mode 100644 index 00000000..827c63db --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.2.2.md @@ -0,0 +1,16 @@ +--- +title: 2.2.2 +sidebar_position: 79797 +--- + +### HarperDB 2.2.2, Penny Release +10/27/2020 + +* CORE-1154 Allowed transaction logging to be disabled even if clustering is enabled. +* CORE-1153 Fixed issue where `delete_files_before` was writing to transaction log. +* CORE-1152 Fixed issue where no more than 4 HarperDB forks would be created. +* CORE-1112 Adds handling for system timestamp attributes in permissions. +* CORE-1131 Adds better handling for checking perms on operations with action value in JSON. +* CORE-1113 Fixes validation bug checking for super user/cluster user permissions and other permissions. +* CORE-1135 Adds validation for valid keys in role API operations. +* CORE-1073 Adds new `import_from_s3` operation to API. diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.2.3.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.2.3.md new file mode 100644 index 00000000..eca953e2 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.2.3.md @@ -0,0 +1,9 @@ +--- +title: 2.2.3 +sidebar_position: 79796 +--- + +### HarperDB 2.2.3, Penny Release +11/16/2020 + +* CORE-1158 Performance improvements to core delete function and configuration of `delete_files_before` to run in batches with a pause into between. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.3.0.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.3.0.md new file mode 100644 index 00000000..2b248490 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.3.0.md @@ -0,0 +1,22 @@ +--- +title: 2.3.0 +sidebar_position: 79699 +--- + +### HarperDB 2.3.0, Penny Release +12/03/2020 + +**Features/Updates** + +* CORE-1191, CORE-1190, CORE-1125, CORE-1157, CORE-1126, CORE-1140, CORE-1134, CORE-1123, CORE-1124, CORE-1122 Added JWT Authentication option (See documentation for more information) +* CORE-1128, CORE-1143, CORE-1140, CORE-1129 Added `upsert` operation +* CORE-1187 Added `get_configuration` operation which allows admins to view their configuration settings. +* CORE-1175 Added new internal LMDB function to copy an environment for use in future features. +* CORE-1166 Updated packages to address security vulnerabilities. + +**Bug Fixes** + +* CORE-1195 Modified `drop_attribute` to drop after data cleanse completes. +* CORE-1149 Fix SQL bug regarding self joins and updates alasql to 0.6.5 release. +* CORE-1168 Fix inconsistent invalid schema/table errors. +* CORE-1162 Fix bug which caused `delete_files_before` to cause tables to grow in size due to an open cursor issue. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.3.1.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.3.1.md new file mode 100644 index 00000000..51291a01 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/2.3.1.md @@ -0,0 +1,12 @@ +--- +title: 2.3.1 +sidebar_position: 79698 +--- + +### HarperDB 2.3.1, Penny Release +1/29/2021 + +**Bug Fixes** + +* CORE-1218 A bug in HarperDB 2.3.0 was identified related to manually calling the `create_attribute` operation. This bug caused secondary indexes to be overwritten by the most recently inserted or updated value for the index, thereby causing a search operation filtered with that index to only return the most recently inserted/updated row. Note, this issue does not affect attributes that are reflexively/automatically created. It only affects attributes created using `create_attribute`. To resolve this issue in 2.3.0 or earlier, drop and recreate your table using reflexive attribute creation. In 2.3.1, drop and recreate your table and use either reflexive attribute creation or `create_attribute`. +* CORE-1219 Increased maximum table attributes from 1000 to 10000 \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/_category_.json b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/_category_.json new file mode 100644 index 00000000..285eecf7 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Penny (Version 2)", + "position": -2 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/index.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/index.md new file mode 100644 index 00000000..89a91cb7 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v2-penny/index.md @@ -0,0 +1,13 @@ +--- +title: HarperDB Penny (Version 2) +--- + +# HarperDB Penny (Version 2) + +Did you know our release names are dedicated to employee pups? For our second release, Penny was the star. + +Here is a bit about Penny: + +![picture of brindle dog](/img/v4.3/dogs/penny.webp) + +_Hi I am Penny! My dad is Kyle Bernhardy, the CTO of HarperDB. I am a nine-year-old Whippet who lives for running hard and fast while exploring the beautiful terrain of Colorado. My favorite activity is chasing birds along with afternoon snoozes in a sunny spot in my backyard._ diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.0.0.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.0.0.md new file mode 100644 index 00000000..2907ee6c --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.0.0.md @@ -0,0 +1,31 @@ +--- +title: 3.0.0 +sidebar_position: 69999 +--- + +### HarperDB 3.0, Monkey Release +5/18/2021 + +**Features/Updates** + +* CORE-1217, CORE-1226, CORE-1232 Create new `search_by_conditions` operation. +* CORE-1304 Upgrade to Node 12.22.1. +* CORE-1235 Adds new upgrade/install functionality. +* CORE-1206, CORE-1248, CORE-1252 Implement `lmdb-store` library for optimized performance. +* CORE-1062 Added alias operation for `delete_files_before`, named `delete_records_before`. +* CORE-1243 Change `HTTPS_ON` settings value to false by default. +* CORE-1189 Implement fastify web server, resulting in improved performance. +* CORE-1221 Update user API to use role name instead of role id. +* CORE-1225 Updated dependencies to eliminate npm security warnings. +* CORE-1241 Adds 3.0 update directive and refactors/fixes update functionality. + +**Bug Fixes** + +* CORE-1299 Remove all references to the `PROJECT_DIR` setting. This setting is problematic when using node version managers and upgrading the version of node and then installing a new instance of HarperDB. +* CORE-1288 Fix bug with drop table/schema that was causing 'env required' error log. +* CORE-1285 Update warning log when trying to create an attribute that already exists. +* CORE-1254 Added logic to manage data collisions in clustering. +* CORE-1212 Add pre-check to `drop_user` that returns error if user doesn't exist. +* CORE-1114 Update response code and message from `add_user` when user already exists. +* CORE-1111 Update response from `create_attribute` to match the create schema/table response. +* CORE-1205 Fixed bug that prevented schema/table from being dropped if name was a number or had a wildcard value in it. Updated validation for insert, upsert and update. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.0.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.0.md new file mode 100644 index 00000000..148690f6 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.0.md @@ -0,0 +1,23 @@ +--- +title: 3.1.0 +sidebar_position: 69899 +--- + +### HarperDB 3.1.0, Monkey Release +8/24/2021 + +**Features/Updates** + +* CORE-1320, CORE-1321, CORE-1323, CORE-1324 Version 1.0 of HarperDB Custom Functions +* CORE-1275, CORE-1276, CORE-1278, CORE-1279, CORE-1280, CORE-1282, CORE-1283, CORE-1305, CORE-1314 IPC server for communication between HarperDB processes, including HarperDB, HarperDB Clustering, and HarperDB Functions +* CORE-1352, CORE-1355, CORE-1356, CORE-1358 Implement pm2 for HarperDB process management +* CORE-1292, CORE-1308, CORE-1312, CORE-1334, CORE-1338 Updated installation process to start HarperDB immediately on install and to accept all config settings via environment variable or command line arguments +* CORE-1310 Updated licensing functionality +* CORE-1301 Updated validation for performance improvement +* CORE-1359 Add `hdb-response-time` header which returns the HarperDB response time in milliseconds +* CORE-1330, CORE-1309 New config settings: `LOG_TO_FILE`, `LOG_TO_STDSTREAMS`, `IPC_SERVER_PORT`, `RUN_IN_FOREGROUND`, `CUSTOM_FUNCTIONS`, `CUSTOM_FUNCTIONS_PORT`, `CUSTOM_FUNCTIONS_DIRECTORY`, `MAX_CUSTOM_FUNCTION_PROCESSES` + +**Bug Fixes** + +* CORE-1315 Corrected issue in HarperDB restart scenario +* CORE-1370 Update some of the validation error handlers so that they don't log full stack \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.1.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.1.md new file mode 100644 index 00000000..0adbeb21 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.1.md @@ -0,0 +1,18 @@ +--- +title: 3.1.1 +sidebar_position: 69898 +--- + +### HarperDB 3.1.1, Monkey Release +9/23/2021 + +**Features/Updates** + +* CORE-1393 Added utility function to add settings from env/cmd vars to the settings file on every run/restart +* CORE-1395 Create a setting which will allow to enable the local Studio to be served from an instance of HarperDB +* CORE-1397 Update the stock 404 response to not return the request URL +* General updates to optimize Docker container + +**Bug Fixes** + +* CORE-1399 Added fixes for complex SQL alias issues \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.2.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.2.md new file mode 100644 index 00000000..f1c192b6 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.2.md @@ -0,0 +1,15 @@ +--- +title: 3.1.2 +sidebar_position: 69897 +--- + +### HarperDB 3.1.2, Monkey Release +10/21/2021 + +**Features/Updates** + +* Updated the installation ASCII art to reflect the new HarperDB logo + +**Bug Fixes** + +* CORE-1408 Corrects issue where `drop_attribute` was not properly setting the LMDB version number causing tables to behave unexpectedly \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.3.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.3.md new file mode 100644 index 00000000..2d484f8d --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.3.md @@ -0,0 +1,11 @@ +--- +title: 3.1.3 +sidebar_position: 69896 +--- + +### HarperDB 3.1.3, Monkey Release +1/14/2022 + +**Bug Fixes** + +* CORE-1446 Fix for scans on indexes larger than 1 million entries causing queries to never return \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.4.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.4.md new file mode 100644 index 00000000..ae0074fd --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.4.md @@ -0,0 +1,11 @@ +--- +title: 3.1.4 +sidebar_position: 69895 +--- + +### HarperDB 3.1.4, Monkey Release +2/24/2022 + +**Features/Updates** + +* CORE-1460 Added new setting `STORAGE_WRITE_ASYNC`. If this setting is true, LMDB will have faster write performance at the expense of not being crash safe. The default for this setting is false, which results in HarperDB being crash safe. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.5.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.5.md new file mode 100644 index 00000000..eff4b5b0 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.1.5.md @@ -0,0 +1,11 @@ +--- +title: 3.1.5 +sidebar_position: 69894 +--- + +### HarperDB 3.1.5, Monkey Release +3/4/2022 + +**Features/Updates** + +* CORE-1498 Fixed incorrect autocasting of string that start with "0." that tries to convert to number but instead returns NaN. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.2.0.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.2.0.md new file mode 100644 index 00000000..003575d8 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.2.0.md @@ -0,0 +1,13 @@ +--- +title: 3.2.0 +sidebar_position: 69799 +--- + +### HarperDB 3.2.0, Monkey Release +3/25/2022 + +**Features/Updates** + +* CORE-1391 Bug fix related to orphaned HarperDB background processes. +* CORE-1509 Updated node version check, updated Node.js version, updated project dependencies. +* CORE-1518 Remove final call from logger. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.2.1.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.2.1.md new file mode 100644 index 00000000..dc511a70 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.2.1.md @@ -0,0 +1,11 @@ +--- +title: 3.2.1 +sidebar_position: 69798 +--- + +### HarperDB 3.2.1, Monkey Release +6/1/2022 + +**Features/Updates** + +* CORE-1573 Added logic to track the pid of the foreground process if running in foreground. Then on stop, use that pid to kill the process. Logic was also added to kill the pm2 daemon when stop is called. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.3.0.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.3.0.md new file mode 100644 index 00000000..3e3ca784 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/3.3.0.md @@ -0,0 +1,12 @@ +--- +title: 3.3.0 +sidebar_position: 69699 +--- + +### HarperDB 3.3.0 - Monkey + +* CORE-1595 Added new role type `structure_user`, this enables non-superusers to be able to create/drop schema/table/attribute. +* CORE-1501 Improved performance for drop_table. +* CORE-1599 Added two new operations for custom functions `install_node_modules` & `audit_node_modules`. +* CORE-1598 Added `skip_node_modules` flag to `package_custom_function_project` operation. This flag allows for not bundling project dependencies and deploying a smaller project to other nodes. Use this flag in tandem with `install_node_modules`. +* CORE-1707 Binaries are now included for Linux on AMD64, Linux on ARM64, and macOS. GCC, Make, Python are no longer required when installing on these platforms. diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/_category_.json b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/_category_.json new file mode 100644 index 00000000..0103ac36 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Monkey (Version 3)", + "position": -3 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/index.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/index.md new file mode 100644 index 00000000..a5589f20 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v3-monkey/index.md @@ -0,0 +1,11 @@ +--- +title: HarperDB Monkey (Version 3) +--- + +# HarperDB Monkey (Version 3) + +Did you know our release names are dedicated to employee pups? For our third release, we have Monkey. + +![picture of tan dog](/img/v4.3/dogs/monkey.webp) + +_Hi, I am Monkey, a.k.a. Monk, a.k.a. Monchichi. My dad is Aron Johnson, the Director of DevOps at HarperDB. I am an eight-year-old Australian Cattle dog mutt whose favorite pastime is hunting and collecting tennis balls from the park next to her home. I love burrowing in the Colorado snow, rolling in the cool grass on warm days, and cheese!_ diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.0.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.0.md new file mode 100644 index 00000000..49770307 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.0.md @@ -0,0 +1,124 @@ +--- +title: 4.0.0 +sidebar_position: 59999 +--- + +### HarperDB 4.0.0, Tucker Release +11/2/2022 + +**Networking & Data Replication (Clustering)** + +The HarperDB clustering internals have been rewritten and the underlying technology for Clustering has been completely replaced with [NATS](https:/nats.io/), an enterprise grade connective technology responsible for addressing, discovery and exchanging of messages that drive the common patterns in distributed systems. +* CORE-1464, CORE-1470, : Remove SocketCluster dependencies and all code related to them. +* CORE-1465, CORE-1485, CORE-1537, CORE-1538, CORE-1558, CORE-1583, CORE_1665, CORE-1710, CORE-1801, CORE-1865 :Add nats-`server` code as dependency, on install of HarperDB download nats-`server` is possible else fallback to building from source code. +* CORE-1593, CORE-1761: Add `nats.js` as project dependency. +* CORE-1466: Build NATS configs on `harperdb run` based on HarperDB YAML configuration. +* CORE-1467, CORE-1508: Launch and manage NATS servers with PM2. +* CORE-1468, CORE-1507: Create a process which reads the work queue stream and processes transactions. +* CORE-1481, CORE-1529, CORE-1698, CORE-1502, CORE-1696: On upgrade to 4.0, update pre-existing clustering configurations, create table transaction streams, create work queue stream, update `hdb_nodes` table, create clustering folder structure, and rebuild self-signed certs. +* CORE-1494, CORE-1521, CORE-1755: Build out internals to interface with NATS. +* CORE-1504: Update existing hooks to save transactions to work with NATS. +* CORE-1514, CORE-1515, CORE-1516, CORE-1527, CORE-1532: Update `add_node`, `update_node`, and `remove_node` operations to no longer need host and port in payload. These operations now manage dynamically sourcing of table level transaction streams between nodes and work queues. +* CORE-1522: Create `NATSReplyService` process which handles the receiving NATS based requests from remote instances and sending back appropriate responses. +* CORE-1471, CORE-1568, CORE-1563, CORE-1534, CORE-1569: Update `cluster_status` operation. +* CORE-1611: Update pre-existing transaction log operations to be audit log operations. +* CORE-1541, CORE-1612, CORE-1613: Create translation log operations which interface with streams. +* CORE-1668: Update NATS serialization / deserialization to use MessagePack. +* CORE-1673: Add `system_info` param to `hdb_nodes` table and update on `add_node` and `cluster_status`. +* CORE-1477, CORE-1493, CORE-1557, CORE-1596, CORE-1577: Both a full HarperDB restart & just clustering restart call the NATS server with a reload directive to maintain full uptime while servers refresh. +* CORE-1474:HarperDB install adds clustering folder structure. +* CORE-1530: Post `drop_table` HarperDB purges the related transaction stream. +* CORE-1567: Set NATS config to always use TLS. +* CORE-1543: Removed the `transact_to_cluster` attribute from the bulk load operations. Now bulk loads always replicate. +* CORE-1533, CORE-1556, CORE-1561, CORE-1562, CORE-1564: New operation `configure_cluster`, this operation enables bulk publishing and subscription of multiple tables to multiple instances of HarperDB. +* CORE-1535: Create work queue stream on install of HarperDB. This stream receives transactions from remote instances of HarperDB which are then ingested in order. +* CORE-1551: Create transaction streams on the remote node if they do not exist when performing `add_node` or `update_node`. +* CORE-1594, CORE-1605, CORE-1749, CORE-1767, CORE-1770: Optimize the work queue stream and its consumer to be more performant and validate exact once delivery. +* CORE-1621, CORE-1692, CORE-1570, CORE-1693: NATS stream names are MD5 hashed to avoid characters that HarperDB allows, but NATS may not. +* CORE-1762: Add a new optional attribute to `add_node` and `update_node` named `opt_start_time`. This attribute sets a starting time to start synchronizing transactions. +* CORE-1785: Optimizations and bug fixes in regards to sourcing data from remote instances on HarperDB. +* CORE-1588: Created new operation `set_cluster_routes` to enable setting routes for instances of HarperDB to mesh together. +* CORE-1589: Created new operation `get_cluster_routes` to allow for retrieval of routes used to connect the instance of HarperDB to the mesh. +* CORE-1590: Created new operation `delete_cluster_routes` to allow for removal of routes used to connect the instance of HarperDB to the mesh. +* CORE-1667: Fix old environment variable `CLUSTERING_PORT` not mapping to new hub server port. +* CORE-1609: Allow `remove_node` to be called when the other node cannot be reached. +* CORE-1815: Add transaction lock to `add_node` and `update_node` to avoid concurrent nats source update bug. +* CORE-1848: Update stream configs if the node name has been changed in the YAML configuration. +* CORE-1873: Update `add_node` and `update_node` so that it auto-creates schema/table on both local and remote node respectively + + +**Data Storage** + +We have made improvements to how we store, index, and retrieve data. +* CORE-1619: Enabled new concurrent flushing technology for improved write performance. +* CORE-1701: Optimize search performance for `search_by_conditions` when executing multiple AND conditions. +* CORE-1652: Encode the values of secondary indices more efficiently for faster access. +* CORE-1670: Store updated timestamp in `lmdb.js`' version property. +* CORE-1651: Enabled multiple value indexing of array values which allows for the ability to search on specific elements in an array more efficiently. +* CORE-1649, CORE-1659: Large text values (larger than 255 bytes) are no longer stored in separate blob index. Now they are segmented and delimited in the same index to increase search performance. +* Complex objects and object arrays are no longer stored in a separate index to preserve storage and increase write throughput. +* CORE-1650, CORE-1724, CORE-1738: Improved internals around interpreting attribute values. +* CORE-1657: Deferred property decoding allows large objects to be stored, but individual attributes can be accessed (like with get_attributes) without incurring the cost of decoding the entire object. +* CORE-1658: Enable in-memory caching of records for even faster access to frequently accessed data. +* CORE-1693: Wrap updates in async transactions to ensure ACID-compliant updates. +* CORE-1653: Upgrade to 4.0 rebuilds tables to reflect changes made to index improvements. +* CORE-1753: Removed old `node-lmdb` dependency. +* CORE-1787: Freeze objects returned from queries. +* CORE-1821: Read the `WRITE_ASYNC` setting which enables LMDB nosync. + +**Logging** + +HarperDB has increased logging specificity by breaking out logs based on components logging. There are specific log files each for HarperDB Core, Custom Functions, Hub Server, Leaf Server, and more. +* CORE-1497: Remove `pino` and `winston` dependencies. +* CORE-1426: All logging is output via `stdout` and `stderr`, our default logging is then picked up by PM2 which handles writing out to file. +* CORE-1431: Improved `read_log` operation validation. +* CORE-1433, CORE-1463: Added log rotation. +* CORE-1553, CORE-1555, CORE-1552, CORE-1554, CORE-1704: Performance gain by only serializing objects and arrays if the log is for the level defined in configuration. +* CORE-1436: Upgrade to 4.0 updates internals for logging changes. +* CORE-1428, CORE-1440, CORE-1442, CORE-1434, CORE-1435, CORE-1439, CORE-1482, CORE-1751, CORE-1752: Bug fixes, performance improvements and improved unit tests. +* CORE-1691: Convert non-PM2 managed log file writes to use Node.js `fs.appendFileSync` function. + +**Configuration** + +HarperDB has updated its configuration from a properties file to YAML. +* CORE-1448, CORE-1449, CORE-1519, CORE-1587: Upgrade automatically converts the pre-existing settings file to YAML. +* CORE-1445, CORE-1534, CORE-1444, CORE-1858: Build out new logic to create, update, and interpret the YAML configuration file. +* Installer has updated prompts to reflect YAML settings. +* CORE-1447: Create an alias for the `configure_cluster` operation as `set_configuration`. +* CORE-1461, CORE-1462, CORE-1483: Unit test improvements. +* CORE-1492: Improvements to get_configuration and set_configuration operations. +* CORE-1503: Modify HarperDB configuration for more granular certificate definition. +* CORE-1591: Update `routes` IP param to `host` and to `leaf` config in `harperdb.conf` +* CORE-1519: Fix issue when switching between old and new versions of HarperDB we are getting the config parameter is undefined error on npm install. + +**Broad NodeJS and Platform Support** +* CORE-1624: HarperDB can now run on multiple versions of NodeJS, from v14 to v19. We primarily test on v18, so that is the preferred version. + +**Windows 10 and 11** +* CORE-1088: HarperDB now runs natively on Windows 10 and 11 without the need to run in a container or installed in WSL. Windows is only intended for evaluation and development purposes, not for production work loads. + +**Extra Changes and Bug Fixes** +* CORE-1520: Refactor installer to remove all waterfall code and update to use Promises. +* CORE-1573: Stop the PM2 daemon and any logging processes when stopping hdb. +* CORE-1586: When HarperDB is running in foreground stop any additional logging processes from being spawned. +* CORE-1626: Update docker file to accommodate new `harperdb.conf` file. +* CORE-1592, CORE-1526, CORE-1660, CORE-1646, CORE-1640, CORE-1689, CORE-1711, CORE-1601, CORE-1726, CORE-1728, CORE-1736, CORE-1735, CORE-1745, CORE-1729, CORE-1748, CORE-1644, CORE-1750, CORE-1757, CORE-1727, CORE-1740, CORE-1730, CORE-1777, CORE-1778, CORE-1782, CORE-1775, CORE-1771, CORE-1774, CORE-1759, CORE-1772, CORE-1861, CORE-1862, CORE-1863, CORE-1870, CORE-1869:Changes for CI/CD pipeline and integration tests. +* CORE-1661: Fixed issue where old boot properties file caused an error when attempting to install 4.0.0. +* CORE-1697, CORE-1814, CORE-1855: Upgrade fastify dependency to new major version 4. +* CORE-1629: Jobs are now running as processes managed by the PM2 daemon. +* CORE-1733: Update LICENSE to reflect our EULA on our site. +* CORE-1606: Enable Custom Functions by default. +* CORE-1714: Include pre-built binaries for most common platforms (darwin-arm64, darwin-x64, linux-arm64, linux-x64, win32-x64). +* CORE-1628: Fix issue where setting license through environment variable not working. +* CORE-1602, CORE-1760, CORE-1838, CORE-1839, CORE-1847, CORE-1773: HarperDB Docker container improvements. +* CORE-1706: Add support for encoding HTTP responses with MessagePack. +* CORE-1709: Improve the way lmdb.js dependencies are installed. +* CORE-1758: Remove/update unnecessary HTTP headers. +* CORE-1756: On `npm install` and `harperdb install` change the node version check from an error to a warning if the installed Node.js version does not match our preferred version. +* CORE-1791: Optimizations to authenticated user caching. +* CORE-1794: Update README to discuss Windows support & Node.js versions +* CORE-1837: Fix issue where Custom Function directory was not being created on install. +* CORE-1742: Add more validation to audit log - check schema/table exists and log is enabled. +* CORE-1768: Fix issue where when running in foreground HarperDB process is not stopping on `harperdb stop`. +* CORE-1864: Fix to semver checks on upgrade. +* CORE-1850: Fix issue where a `cluster_user` type role could not be altered. diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.1.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.1.md new file mode 100644 index 00000000..9e148e63 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.1.md @@ -0,0 +1,12 @@ +--- +title: 4.0.1 +sidebar_position: 59998 +--- + +### HarperDB 4.0.1, Tucker Release +01/20/2023 + +**Bug Fixes** + +* CORE-1992 Local studio was not loading because the path got mangled in the build. +* CORE-2001 Fixed deploy_custom_function_project after node update broke it. diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.2.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.2.md new file mode 100644 index 00000000..b65d1427 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.2.md @@ -0,0 +1,12 @@ +--- +title: 4.0.2 +sidebar_position: 59997 +--- + +### HarperDB 4.0.2, Tucker Release +01/24/2023 + +**Bug Fixes** + +* CORE-2003 Fix bug where if machine had one core thread config would default to zero. +* Update to lmdb 2.7.3 and msgpackr 1.7.0 diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.3.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.3.md new file mode 100644 index 00000000..67aaae56 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.3.md @@ -0,0 +1,11 @@ +--- +title: 4.0.3 +sidebar_position: 59996 +--- + +### HarperDB 4.0.3, Tucker Release +01/26/2023 + +**Bug Fixes** + +* CORE-2007 Add update nodes 4.0.0 launch script to build script to fix clustering upgrade. diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.4.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.4.md new file mode 100644 index 00000000..2a30c9d1 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.4.md @@ -0,0 +1,11 @@ +--- +title: 4.0.4 +sidebar_position: 59995 +--- + +### HarperDB 4.0.4, Tucker Release +01/27/2023 + +**Bug Fixes** + +* CORE-2009 Fixed bug where add node was not being called when upgrading clustering. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.5.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.5.md new file mode 100644 index 00000000..dc66721f --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.5.md @@ -0,0 +1,14 @@ +--- +title: 4.0.5 +sidebar_position: 59994 +--- + +### HarperDB 4.0.5, Tucker Release +02/15/2023 + +**Bug Fixes** + +* CORE-2029 Improved the upgrade process for handling existing user TLS certificates and correctly configuring TLS settings. Added a prompt to upgrade to determine if new certificates should be created or existing certificates should be kept/used. +* Fix the way NATS connections are honored in a local environment. +* Do not define the certificate authority path to NATS if it is not defined in the HarperDB config. + diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.6.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.6.md new file mode 100644 index 00000000..bf97d148 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.6.md @@ -0,0 +1,11 @@ +--- +title: 4.0.6 +sidebar_position: 59993 +--- + +### HarperDB 4.0.6, Tucker Release +03/09/2023 + +**Bug Fixes** + +* Fixed a data serialization error that occurs when a large number of different record structures are persisted in a single table. diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.7.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.7.md new file mode 100644 index 00000000..7d48666a --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.0.7.md @@ -0,0 +1,11 @@ +--- +title: 4.0.7 +sidebar_position: 59992 +--- + +### HarperDB 4.0.7, Tucker Release +03/10/2023 + +**Bug Fixes** + +* Update lmdb.js dependency \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.1.0.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.1.0.md new file mode 100644 index 00000000..2b3805d2 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.1.0.md @@ -0,0 +1,63 @@ +--- +title: 4.1.0 +sidebar_position: 59899 +--- + +# 4.1.0 + +HarperDB 4.1 introduces the ability to use worker threads for concurrently handling HTTP requests. Previously this was handled by processes. This shift provides important benefits in terms of better control of traffic delegation with support for optimized load tracking and session affinity, better debuggability, and reduced memory footprint. + +This means debugging will be much easier for custom functions. If you install/run HarperDB locally, most modern IDEs like WebStorm and VSCode support worker thread debugging, so you can start HarperDB in your IDE, and set breakpoints in your custom functions and debug them. + +The associated routing functionality now includes session affinity support. This can be used to consistently route users to the same thread which can improve caching locality, performance, and fairness. This can be enabled in with the [`http.sessionAffinity` option in your configurationsecurity configuration. + +HarperDB 4.1's NoSQL query handling has been revamped to consistently use iterators, which provide an extremely memory efficient mechanism for directly streaming query results to the network _as_ the query results are computed. This results in faster Time to First Byte (TTFB) (only the first record/value in a query needs to be computed before data can start to be sent), and less memory usage during querying (the entire query result does not need to be stored in memory). These iterators are also available in query results for custom functions and can provide means for custom function code to iteratively access data from the database without loading entire results. This should be a completely transparent upgrade, all HTTP APIs function the same, with the one exception that custom functions need to be aware that they can't access query results by `[index]` (they should use array methods or for-in loops to handle query results). + +4.1 includes configuration options for specifying the location of database storage files. This allows you to specifically locate database directories and files on different volumes for better flexibility and utilization of disks and storage volumes. See the [storage configuration](../../../deployments/configuration#storage) and [schemas configuration](../../../deployments/configuration#schemas) for information on how to configure these locations. + +Logging has been revamped and condensed into one `hdb.log` file. See [logginglogging for more information. + +A new operation called `cluster_network` was added, this operation will ping the cluster and return a list of enmeshed nodes. + +Custom Functions will no longer automatically load static file routes, instead the `@fastify/static` plugin will need to be registered with the Custom Function server. See [Host A Static Web UI-static](https:/docs.harperdb.io/docs/v/4.1/custom-functions/host-static). + +Updates to S3 import and export mean that these operations now require the bucket `region` in the request. Also, if referencing a nested object it should be done in the `key` parameter. See examples [here](../../../developers/operations-api/bulk-operations#import-from-s3). + +Due to the AWS SDK v2 reaching end of life support we have updated to v3. This has caused some breaking changes in our operations `import_from_s3` and `export_to_s3`: + +* A new attribute `region` will need to be supplied +* The `bucket` attribute can no longer have trailing slashes. Slashes will now need to be in the `key`. + +Starting HarperDB without any command (just `harperdb`) now runs HarperDB like a standard process, in the foreground. This means you can use standard unix tooling for interacting with the process and is conducive for running HarperDB with systemd or any other process management tool. If you wish to have HarperDB launch itself in separate background process (and immediately terminate the shell process), you can do so by running `harperdb start`. + +Internal Tickets completed: + +* CORE-609 - Ensure that attribute names are always added to global schema as Strings +* CORE-1549 - Remove fastify-static code from Custom Functions server which auto serves content from "static" folder +* CORE-1655 - Iterator based queries +* CORE-1764 - Fix issue where describe\_all operation returns an empty object for non super-users if schema(s) do not yet have table(s) +* CORE-1854 - Switch to using worker threads instead of processes for handling concurrency +* CORE-1877 - Extend the csv\_url\_load operation to allow for additional headers to be passed to the remote server when the csv is being downloaded +* CORE-1893 - Add last updated timestamp to describe operations +* CORE-1896 - Fix issue where Select \* from system.hdb\_info returns wrong HDB version number after Instance Upgrade +* CORE-1904 - Fix issue when executing GEOJSON query in SQL +* CORE-1905 - Add HarperDB YAML configuration setting which defines the storage location of NATS streams +* CORE-1906 - Add HarperDB YAML configuration setting defining the storage location of tables. +* CORE-1655 - Streaming binary format serialization +* CORE-1943 - Add configuration option to set mount point for audit tables +* CORE-1921 - Update NATS transaction lifecycle to handle message deduplication in work queue streams. +* CORE-1963 - Update logging for better readability, reduced duplication, and request context information. +* CORE-1968 - In server\nats\natsIngestService.js remove the js\_msg.working(); line to improve performance. +* CORE-1976 - Fix error when calling describe\_table operation with no schema or table defined in payload. +* CORE-1983 - Fix issue where create\_attribute operation does not validate request for required attributes +* CORE-2015 - Remove PM2 logs that get logged in console when starting HDB +* CORE-2048 - systemd script for 4.1 +* CORE-2052 - Include thread information in system\_information for visibility of threads +* CORE-2061 - Add a better error msg when clustering is enabled without a cluster user set +* CORE-2068 - Create new log rotate logic since pm2 log-rotate no longer used +* CORE-2072 - Update to Node 18.15.0 +* CORE-2090 - Upgrade Testing from v4.0.x and v3.x to v4.1. +* CORE-2091 - Run the performance tests +* CORE-2092 - Allow for automatic patch version updates of certain packages +* CORE-2109 - Add verify option to clustering TLS configuration +* CORE-2111 - Update AWS SDK to v3 diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.1.1.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.1.1.md new file mode 100644 index 00000000..537ef71c --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.1.1.md @@ -0,0 +1,15 @@ +--- +title: 4.1.1 +sidebar_position: 59898 +--- + +# 4.1.1 + +06/16/2023 + +* HarperDB uses improved logic for determining default heap limits and thread counts. When running in a restricted container and on NodeJS 18.15+, HarperDB will use the constrained memory limit to determine heap limits for each thread. In more memory constrained servers with many CPU cores, a reduced default thread count will be used to ensure that excessive memory is not used by many workers. You may still define your own thread count (with `http`/`threads`) in the [configuration](../../../deployments/configuration). +* An option has been added for [disabling the republishing NATS messages](../../../deployments/configuration), which can provide improved replication performance in a fully connected network. +* Improvements to our OpenShift container. +* Dependency security updates. +* **Bug Fixes** +* Fixed a bug in reporting database metrics in the `system_information` operation. diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.1.2.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.1.2.md new file mode 100644 index 00000000..2a62db64 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.1.2.md @@ -0,0 +1,13 @@ +--- +title: 4.1.2 +sidebar_position: 59897 +--- + +### HarperDB 4.1.2, Tucker Release +06/16/2023 + +* HarperDB has updated binary dependencies to support older glibc versions back 2.17. +* A new CLI command was added to get the current status of whether HarperDB is running and the cluster status. This is available with `harperdb status`. +* Improvements to our OpenShift container. +* Dependency security updates. + diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.0.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.0.md new file mode 100644 index 00000000..a57a9781 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.0.md @@ -0,0 +1,99 @@ +--- +title: 4.2.0 +sidebar_position: 59799 +--- + +# 4.2.0 + +#### HarperDB 4.2.0 + +HarperDB 4.2 introduces a new interface to accessing our core database engine with faster access, well-typed idiomatic JavaScript interfaces, ergonomic object mapping, and real-time data subscriptions. 4.2 also had adopted a new component architecture for building extensions to deliver customized external data sources, authentication, file handlers, content types, and more. These architectural upgrades lead to several key new HarperDB capabilities including a new REST interface, advanced caching, real-time messaging and publish/subscribe functionality through MQTT, WebSockets, and Server-Sent Events. + +4.2 also introduces configurable database schemas, using GraphQL Schema syntax. The new component structure is also configuration-driven, providing easy, low-code paths to building applications. [Check out our new getting starting guide](../../../getting-started) to see how easy it is to get started with HarperDB apps. + +### Resource API + +The [Resource API](../../reference/resource) is the new interface for accessing data in HarperDB. It utilizes a uniform interface for accessing data in HarperDB database/tables and is designed to easily be implemented or extended for defining customized application logic for table access or defining custom external data sources. This API has support for connecting resources together for caching and delivering data change and message notifications in real-time. The [Resource API documentation details this interface](../../reference/resource). + +### Component Architecture + +HarperDB's custom functions have evolved towards a [full component architecture](../../../developers/components); our internal functionality is defined as components, and this can be used in a modular way in conjunction with user components. These can all easily be configured and loaded through configuration files, and there is now a [well-defined interface for creating your own components](../../../developers/components/writing-extensions). Components can easily be deployed/installed into HarperDB using [NPM and Github references as well](../../../developers/components/installing). + +### Configurable Database Schemas + +HarperDB applications or components support [schema definitions using GraphQL schema syntax](../../../../developers/applications/defining-schemas). This makes it easy to define your table and attribute structure and gives you control over which attributes should be indexed and what types they should be. With schemas in configuration, these schemas can be bundled with an application and deployed together with application code. + +### REST Interface + +HarperDB 4.2 introduces a new REST interface for accessing data through best-practice HTTP APIs using intuitive paths and standards-based methods and headers that directly map to our Resource API. This new interface provides fast and easy access to data via queries through GET requests, modifications of data through PUTs, customized actions through POSTs and more. With standards-based header support built-in, this works seamlessly with external caches (including browser caches) for accelerated performance and reduced network transfers. + +### Real-Time + +HarperDB 4.2 now provides standard interfaces for subscribing to data changes and receiving notifications of changes and messages in real-time. Using these new real-time messaging capabilities with structured data provides a powerful integrated platform for both database style data updates and querying along with message delivery. [Real-time messaging](../../../../developers/real-time) of data is available through several protocols: + +#### MQTT + +4.2 now includes MQTT support which is a publish and subscribe messaging protocol, designed for efficiency (designed to be efficient enough for even small Internet of Things devices). This allows clients to connect to HarperDB and publish messages through our data center and subscribe to messages and data for real-time delivery. 4.2 implements support for QoS 0 and 1, along with durable sessions. + +#### WebSockets + +HarperDB now also supports WebSockets. This can be used as a transport for MQTT or as a connection for custom connection handling. + +#### Server-Sent Events + +HarperDB also includes support for Server-Sent Events. This is a very easy-to-use browser API that allows web sites/applications to connect to HarperDB and subscribe to data changes with minimal effort over standard HTTP. + +### Database Structure + +HarperDB databases contain a collection of tables, and these tables are now contained in a single transactionally-consistent database file. This means reads and writes can be performed transactionally and atomically across tables (as long as they are in the same database). Multi-table transactions are replicated as single atomic transactions as well. Audit logs are also maintained in the same database with atomic consistency as well. + +Databases are now entirely encapsulated in a file, which means they can be moved/copied to another database without requiring any separate metadata updates in the system tables. + +### Clone Node + +HarperDB includes new functionality for adding new HarperDB nodes in a cluster. New instances can be configured to clone from a leader node, performing and copying a database snapshot from a leader node, and self-configuring from the leader node as well, to facilitate accelerated deployment of new nodes for fast horizontal scaling to meet demand needs. [See the documentation on Clone Node for more information.](../../../../administration/cloning) + +### Operations API terminology updates + +Any operation that used the `schema` property was updated to make this property optional and alternately support `database` as the property for specifying the database (formerly 'schema'). If both `schema` and `database` are absent, operation defaults to using the `data` database. Term 'primary key' now used in place of 'hash'. noSQL operation `search_by_hash` updated to `search_by_id`. + +Support was added for defining a table with `primary_key` instead of `hash_attribute`. + +## Configuration + +There have been significant changes to `harperdb-config.yaml`, however none of these changes should affect pre-4.2 versions. If you upgrade to 4.2 any existing configuration should be backwards compatible and will not need to be updated. + +`harperdb-config.yaml` has had some configuration values added, removed, renamed and defaults changed. Please refer to [harperdb-config.yaml](../../../deployments/configuration) for the most current configuration parameters. + +* The `http` element has been expanded. + * `compressionThreshold` was added. + * All `customFunction` configuration now lives here, except for the `tls` section. +* `threads` has moved out of the `http` element and now is its own top level element. +* `authentication` section was moved out of the `operationsApi` section and is now its own top level element/section. +* `analytics.aggregatePeriod` was added. +* Default logging level was changed to `warn`. +* Default clustering log level was changed to `info`. +* `clustering.republishMessages` now defaults to `false`. +* `operationsApi.foreground` was removed. To start HarperDB in the foreground, from the CLI run `harperdb`. +* Made `operationsApi` configuration optional. Any config not defined here will default to the `http` section. +* Added a `securePort` parameter to `operationsApi` and `http` used for setting the https port. +* Added a new top level `tls` section. +* Removed `customFunctions.enabled`, `customFunctions.network.https`, `operationsApi.network.https` and `operationsApi.nodeEnv`. +* Added an element called `componentRoot` which replaces `customFunctions.root`. +* Updated custom pathing to use `databases` instead of `schemas`. +* Added `logging.auditAuthEvents.logFailed` and `logging.auditAuthEvents.logSuccessful` for enabling logging of auth events. +* A new `mqtt` section was added. + +### Socket Management + +HarperDB now uses socket sharing to distribute incoming connections to different threads (`SO_REUSEPORT`). This is considered to be the most performant mechanism available for multi-threaded socket handling. This does mean that we have deprecated session-affinity based socket delegation. + +HarperDB now also supports more flexible port configurations: application endpoints and WebSockets run on 9926 by default, but these can be separated, or application endpoints can be configured to run on the same port as the operations API for a single port configuration. + +### Sessions + +HarperDB now supports cookie-based sessions for authentication for web clients. This can be used with the standard authentication mechanisms to login, and then cookies can be used to preserve the authenticated session. This is generally a more secure way of maintaining authentication in browsers, without having to rely on storing credentials. + +### Dev Mode + +HarperDB can now directly run a HarperDB application from any location using `harperdb run /path/to/app` or `harperdb dev /path/to/app`. The latter starts in dev mode, with logging directly to the console, debugging enabled, and auto-restarting with any changes in your application files. Dev mode is recommended for local application and component development. diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.1.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.1.md new file mode 100644 index 00000000..38617ca9 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.1.md @@ -0,0 +1,13 @@ +--- +title: 4.2.1 +sidebar_position: 59798 +--- + +### HarperDB 4.2.1, Tucker Release +11/3/2023 + +* Downgrade NATS 2.10.3 back to 2.10.1 due to regression in connection handling. +* Handle package names with underscores. +* Improved validation of queries and comparators +* Avoid double replication on transactions with multiple commits +* Added file metadata on get_component_file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.2.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.2.md new file mode 100644 index 00000000..15768374 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.2.md @@ -0,0 +1,15 @@ +--- +title: 4.2.2 +sidebar_position: 59797 +--- + +### HarperDB 4.2.2, Tucker Release +11/8/2023 + +* Increase timeouts for NATS connections. +* Fix for database snapshots for backups (and for clone node). +* Fix application of permissions for default tables exposed through REST. +* Log replication failures with record information. +* Fix application of authorization/permissions for MQTT commands. +* Fix copying of local components in clone node. +* Fix calculation of overlapping start time in clone node. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.3.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.3.md new file mode 100644 index 00000000..dab25c3d --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.3.md @@ -0,0 +1,13 @@ +--- +title: 4.2.3 +sidebar_position: 59796 +--- + +### HarperDB 4.2.3, Tucker Release +11/15/2023 + +* When setting setting securePort, disable unsecure port setting on same port +* Fix `harperdb status` when pid file is missing +* Fix/include missing icons/fonts from local studio +* Fix crash that can occur when concurrently accessing records > 16KB +* Apply a lower heap limit to better ensure that memory leaks are quickly caught/mitigated \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.4.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.4.md new file mode 100644 index 00000000..87ee241d --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.4.md @@ -0,0 +1,10 @@ +--- +title: 4.2.4 +sidebar_position: 59795 +--- + +### HarperDB 4.2.4, Tucker Release +11/16/2023 + +* Prevent coercion of strings to numbers in SQL queries (in WHERE clause) +* Address fastify deprecation warning about accessing config \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.5.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.5.md new file mode 100644 index 00000000..1172c4b3 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.5.md @@ -0,0 +1,12 @@ +--- +title: 4.2.5 +sidebar_position: 59794 +--- + +### HarperDB 4.2.5, Tucker Release +11/22/2023 + +* Disable compression on server-sent events to ensure messages are immediately sent (not queued for later deliver) +* Update geoNear function to tolerate null values +* lmdb-js fix to ensure prefetched keys are pinned in memory until retrieved +* Add header to indicate start of a new authenticated session (for studio to identify authenticated sessions) diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.6.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.6.md new file mode 100644 index 00000000..d0a1f177 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.6.md @@ -0,0 +1,10 @@ +--- +title: 4.2.6 +sidebar_position: 59793 +--- + +### HarperDB 4.2.6, Tucker Release +11/29/2023 + +* Update various geo SQL functions to tolerate invalid values +* Properly report component installation/load errors in `get_components` (for studio to load components after an installation failure) \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.7.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.7.md new file mode 100644 index 00000000..78bfcaa7 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.7.md @@ -0,0 +1,11 @@ +--- +title: 4.2.7 +sidebar_position: 59792 +--- + +### HarperDB 4.2.7 +12/6/2023 + +* Add support for cloning over the top of an existing HarperDB instance +* Add health checks for NATS consumer with ability to restart consumer loops for better resiliency +* Revert Fastify autoload module due to a regression that had caused EcmaScript modules for Fastify route modules to fail to load on Windows \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.8.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.8.md new file mode 100644 index 00000000..fbe94b69 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.2.8.md @@ -0,0 +1,14 @@ +--- +title: 4.2.8 +sidebar_position: 59791 +--- + +### HarperDB 4.2.8 +12/19/2023 + +* Added support CLI command line arguments for clone node +* Added support for cloning a node without enabling clustering +* Clear NATS client cache on closed event +* Fix check for attribute permissions so that an empty attribute permissions array is treated as a table level permission definition +* Improve speed of cross-node health checks +* Fix for using `database` in describe operations diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.0.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.0.md new file mode 100644 index 00000000..ebbffbee --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.0.md @@ -0,0 +1,110 @@ +--- +title: 4.3.0 +sidebar_position: 59699 +--- + +### HarperDB 4.3.0, Tucker Release +3/19/2024 + +### Relationships and Joins + +HarperDB now supports defining relationships between tables. These relationships can be defined as one-to-many, many-to-one, or many-to-many, and use a foreign key to record the relationship between records from different tables. An example of how to use this to define a many-to-one and one-to-many relationships between a product and brand table: +```graphql +type Product @table { + id: ID @primaryKey + name: String @indexed + # foreign key used to reference a brand + brandId: ID @indexed + # many-to-one relationship to brand + brand: Related @relation(from: "brandId") +} +type Brand @table { + id: ID @primaryKey + name: String @indexed + # one-to-many relationship of brand to products of that brand + products: Product @relation(to: "brandId") +} +``` +This relationships model can be used in queries and selects, which will automatically "join" the data from the tables. For example, you could search for products by brand name: +```http +/Product?brand.name=Microsoft +``` + +HarperDB also now supports querying with a sort order. Multiple sort orders can be provided breaking ties. Nested select have also been added, which also utilizes joins when related records are referenced. For example: +```http +/Product?brand.name=Microsoft&sort(price)&select(name,brand{name,size}) +``` + +See the [schema definition documentation](../../../../developers/applications/defining-schemas) for more information on defining relationships, and the [REST documentation for more information on queries](../../../../developers/rest). + +### OpenAPI Specification +A new default endpoint `GET /openapi` was added for describing endpoints configured through a GraphQL schema. + +### Query Optimizations +HarperDB has also made numerous improvements to query planning and execution for high performance query results with a broader range of queries. + +### Indexing Nulls +New tables and indexes now support indexing null values, enabling queries by null (as well as queries for non-null values). For example, you can query by nulls with the REST interface: +```http +GET /Table/?attribute=null +``` +Note, that existing indexes will remain without null value indexing, and can only support indexing/querying by nulls if they are rebuilt (removed and re-added). + +### CLI Expansion + +The HarperDB now supports an expansive set of commands that execute operations from the operations API. For example, you can list users from the command line: +```bash +harperdb list_users +``` + +### BigInt Support + +HarperDB now supports `BigInt` attributes/values with integers (with full precision) up to 1000 bits (or 10^301). These can be used as primary keys or standard attributes, and can be used in queries or other operations. Within JSON documents, you can simply use standard JSON integer numbers with up to 300 digits, and large BigInt integers will be returned as standard JSON numbers. + +### Local Studio Upgrade + +HarperDB has upgraded the local studio to match the same version that is offered on http:/studio.harperdb.io. The local studio now has the full robust feature set of the online version. + +## MQTT + +### mTLS Support + +HarperDB now supports mTLS based authentication for HTTP, WebSockets, and MQTT. See the [configuration documentation for more information](../../../deployments/configuration). + +### Single-Level Wildcards + +HarperDB's MQTT service now supports single-level wildcards (`+`), which facilitates a great range of subscriptions. + +### Retain handling + +HarperDB's MQTT now supports the retain handling flags for subscriptions that are made using MQTT v5. + +### CRDT + +HarperDB now supports basic conflict-free data type (CRDT) updates that allow properties to be individually updated and merged when separate properties are updated on different threads or nodes. Individual property CRDT updates are automatically performed when you update individual properties through the resource API. Individual property CRDT updates are used when making `PATCH` requests through the REST API. + +The CRDT functionality also supports explicit incrementation to merge multiple parallel incrementation requests with proper summing. See the [Resource API for more information](../../../technical-details/reference/resource). + +### Configuration Improvements + +The configuration has improved support for detecting port conflicts, handling paths for fastify routes, and now includes support for specifying a heap limit and TLS ciphers. See the [configuration documentation for more information](../../../deployments/configuration). + +### Balanced Audit Log Cleanup + +Audit log cleanup has been improved to reduce resource consumption during scheduled cleanups. + +### `export_*` support for `search_by_conditions` + +The `export_local` and `export_to_s3` operations now support `search_by_conditions` as one of the allowed search operators. + +## Storage Performance Improvements + +Significant improvements were made to handling of free-space to decrease free-space fragmentation and improve performance of reusing free-space for new data. This includes prioritizing reuse of recently released free-space for more better memory/caching utilization. + +### Compact Database + +In addition to storage improvements, HarperDB now includes functionality for [compacting a database](../../../deployments/harperdb-cli) (while offline), which can be used to eliminate all free-space to reset any fragmentation. + +### Compression + +Compression is now enabled by default for all records over 4KB. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.1.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.1.md new file mode 100644 index 00000000..e583d175 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.1.md @@ -0,0 +1,11 @@ +--- +title: 4.3.1 +sidebar_position: 59698 +--- + +### HarperDB 4.3.1 +3/25/2024 + +* Fix Fastify warning about responseTime usage +* Add access to the MQTT topic in the context +* Fix for ensuring local NATS streams are created diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.10.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.10.md new file mode 100644 index 00000000..bd286e90 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.10.md @@ -0,0 +1,12 @@ +--- +title: 4.3.10 +sidebar_position: 59689 +--- + +### HarperDB 4.3.10 +5/5/2024 + +* Provide a `data` property on the request/context with deserialized data from the request body for any request including methods that don't typically have a request body +* Ensure that CRDTs are not double applied after committing a transaction +* Delete MQTT will after publishing even if it fails to publish +* Improve transaction retry logic to use async non-optimistic transactions after multiple retries \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.11.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.11.md new file mode 100644 index 00000000..df2cc2fb --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.11.md @@ -0,0 +1,10 @@ +--- +title: 4.3.11 +sidebar_position: 59688 +--- + +### HarperDB 4.3.11 +5/15/2024 + +* Add support for multiple certificates with SNI-based selection of certificates for HTTPS/TLS +* Fix warning in Node v22 \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.12.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.12.md new file mode 100644 index 00000000..c4344da9 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.12.md @@ -0,0 +1,10 @@ +--- +title: 4.3.12 +sidebar_position: 59687 +--- + +### HarperDB 4.3.12 +5/16/2024 + +* Fix for handling ciphers in multiple certificates +* Allow each certificate config to have multiple hostnames \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.13.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.13.md new file mode 100644 index 00000000..7152f231 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.13.md @@ -0,0 +1,11 @@ +--- +title: 4.3.13 +sidebar_position: 59686 +--- + +### HarperDB 4.3.13 +5/22/2024 + +* Fix for handling HTTPS/TLS with IP address targets (no hostname) where SNI is not available +* Fix for memory leak when a node is down and consumers are trying to reconnect +* Faster cross-thread notification mechanism for transaction events \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.14.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.14.md new file mode 100644 index 00000000..8374b138 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.14.md @@ -0,0 +1,9 @@ +--- +title: 4.3.14 +sidebar_position: 59685 +--- + +### HarperDB 4.3.14 +5/24/2024 + +* Fix application of ciphers to multi-certificate TLS configuration \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.15.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.15.md new file mode 100644 index 00000000..5bbb2304 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.15.md @@ -0,0 +1,10 @@ +--- +title: 4.3.15 +sidebar_position: 59684 +--- + +### HarperDB 4.3.15 +5/29/2024 + +* Add support for wildcards in hostnames for SNI +* Properly apply ciphers settings on multiple TLS configurations \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.16.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.16.md new file mode 100644 index 00000000..b3b198d8 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.16.md @@ -0,0 +1,10 @@ +--- +title: 4.3.16 +sidebar_position: 59683 +--- + +### HarperDB 4.3.16 +6/3/2024 + +* Properly shim legacy TLS configuration with new multi-certificate support +* Show the changed filenames when an application is reloaded \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.17.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.17.md new file mode 100644 index 00000000..6cebb30b --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.17.md @@ -0,0 +1,14 @@ +--- +title: 4.3.17 +sidebar_position: 59682 +--- + +### HarperDB 4.3.17 +6/13/2024 + +* Add MQTT analytics of incoming messages and separate by QoS level +* Ensure that any installed `harperdb` package in components is relinked to running harperdb. +* Upgrade storage to more efficiently avoid storage increases +* Fix to improve database metrics in system_information +* Fix for pathing on Windows with extension modules +* Add ability to define a range of listening threads \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.18.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.18.md new file mode 100644 index 00000000..7de1ca2d --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.18.md @@ -0,0 +1,9 @@ +--- +title: 4.3.18 +sidebar_position: 59681 +--- + +### HarperDB 4.3.18 +6/18/2024 + +* Immediately terminate an MQTT connection when there is a keep-alive timeout. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.19.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.19.md new file mode 100644 index 00000000..ed2782da --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.19.md @@ -0,0 +1,11 @@ +--- +title: 4.3.19 +sidebar_position: 59680 +--- + +### HarperDB 4.3.19 +7/2/2024 + +* Properly return records for the existing value for subscriptions used for retained messages, so they are correctly serialized. +* Ensure that deploy components empty the target directory for a clean installation and expansion of a `package` sub-directory. +* Ensure that we do not double load components that are referenced by symlink from node_modules and in components directory. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.2.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.2.md new file mode 100644 index 00000000..7a967e98 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.2.md @@ -0,0 +1,15 @@ +--- +title: 4.3.2 +sidebar_position: 59697 +--- + +### HarperDB 4.3.2 +3/29/2024 + +* Clone node updates to individually clone missing parts +* Fixes for publishing OpenShift container +* Increase purge stream timeout +* Fixed declaration of analytics schema so queries work before a restart +* Fix for iterating queries when deleted records exist +* LMDB stability upgrade +* Fix for cleanup of last will in MQTT \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.20.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.20.md new file mode 100644 index 00000000..68a18912 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.20.md @@ -0,0 +1,17 @@ +--- +title: 4.3.20 +sidebar_position: 59679 +--- + +### HarperDB 4.3.20 +7/11/2024 + +* The restart_service operation is now executed as a job, making it possible to track the progress of a restart (which is performed as a rolling restart of threads) +* Disable Nagle's algorithm for TCP connections to improve performance +* Append Server-Timing header if a fastify route has already added one +* Avoid symlinking the harperdb directory to itself +* Fix for deleting an empty database +* Upgrade ws and pm2 packages for security vulnerabilities +* Improved TypeScript definitions for Resource and Context. +* The context of a source can set `noCacheStore` to avoid caching the results of a retrieval from source +* Better error reporting of MQTT parsing errors and termination of connections for compliance diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.21.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.21.md new file mode 100644 index 00000000..b8c22de5 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.21.md @@ -0,0 +1,13 @@ +--- +title: 4.3.21 +sidebar_position: 59678 +--- + +### HarperDB 4.3.21 +8/21/2024 + +* Fixed an issue with iterating/serializing query results with a `limit`. +* Fixed an issue that was preventing the caching of structured records in memory. +* Fixed and added several TypeScript exported types including `tables`, `databases`, `Query`, and `Context`. +* Fixed logging warnings about license limits after a license is updated. +* Don't register a certificate as the default certificate for non-SNI connections unless it lists an IP address in the SAN field. \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.22.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.22.md new file mode 100644 index 00000000..92f1da33 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.22.md @@ -0,0 +1,14 @@ +--- +title: 4.3.22 +sidebar_position: 59677 +--- + +### HarperDB 4.3.22 +9/6/2024 + +* Adding improved back-pressure handling for large subscriptions and backlogs with durable MQTT sessions +* Allow .extension in URL paths to indicate both preferred encoding and decoding +* Added support for multi-part ids in query parameters +* Limit describe calls by time before using statistical sampling +* Proper cleanup of a transaction when it is aborted due to running out of available read transactions +* Updates to release/builds \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.23.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.23.md new file mode 100644 index 00000000..8dd47c25 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.23.md @@ -0,0 +1,11 @@ +--- +title: 4.3.23 +sidebar_position: 59676 +--- + +### HarperDB 4.3.23 +9/12/2024 + +* Avoid long-running read transactions on subscription catch-ups +* Reverted change to setting default certificate for IP address only +* Better handling of last-will messages on startup \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.24.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.24.md new file mode 100644 index 00000000..ef4933ea --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.24.md @@ -0,0 +1,9 @@ +--- +title: 4.3.24 +sidebar_position: 59675 +--- + +### HarperDB 4.3.24 +9/12/2024 + +* Fix for querying for large strings (over 255 characters) \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.25.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.25.md new file mode 100644 index 00000000..387a2588 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.25.md @@ -0,0 +1,12 @@ +--- +title: 4.3.25 +sidebar_position: 59674 +--- + +### HarperDB 4.3.25 +9/24/2024 + +* Add analytics for replication latency +* Fix iteration issue over asynchronous joined queries +* Local studio fix for loading applications in insecure context (HTTP) +* Local studio fix for loading configuration tab \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.26.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.26.md new file mode 100644 index 00000000..d910120c --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.26.md @@ -0,0 +1,10 @@ +--- +title: 4.3.26 +sidebar_position: 59673 +--- + +### HarperDB 4.3.26 +9/27/2024 + +* Fixed a security issue that allowed users to bypass access controls with the operations API +* Previously expiration handling was limited to tables with a source, but now it can be applied to any table \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.27.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.27.md new file mode 100644 index 00000000..ca8352d3 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.27.md @@ -0,0 +1,13 @@ +--- +title: 4.3.27 +sidebar_position: 59672 +--- + +### HarperDB 4.3.27 +10/2/2024 + +* Fixed handling HTTP upgrade with Connection header that does not use Upgrade as the sole value (for Firefox) +* Added metrics for requests by status code +* Properly remove attributes from the stored metadata when removed from GraphQL schema +* Fixed a regression in clustering retrieval of schema description +* Fix attribute validation/handling to ensure that sequential ids can be assigned with insert/upsert operations \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.28.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.28.md new file mode 100644 index 00000000..fdba3828 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.28.md @@ -0,0 +1,11 @@ +--- +title: 4.3.28 +sidebar_position: 59671 +--- + +### HarperDB 4.3.28 +10/3/2024 + +* Tolerate user with no role when building NATS config +* Change metrics for requests by status code to be prefixed with "response_" +* Log error `cause`, and other properties, when available. diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.29.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.29.md new file mode 100644 index 00000000..c1f533fd --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.29.md @@ -0,0 +1,16 @@ +--- +title: 4.3.29 +sidebar_position: 59670 +--- + +### HarperDB 4.3.29 +10/7/2024 + +* Avoid unnecessary cookie session creation without explicit login +* Added support for caching directives in operations API +* Fixed issue with creating metadata for table with no primary key +* Local studio upgrade: + * Added support for "cache only" mode to view table data without origin resolution + * Added partial support for cookie-based authentication + * Added support for browsing tables with no primary key + * Improved performance for sorting tables diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.3.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.3.md new file mode 100644 index 00000000..52d7ebde --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.3.md @@ -0,0 +1,9 @@ +--- +title: 4.3.3 +sidebar_position: 59696 +--- + +### HarperDB 4.3.3 +4/01/2024 + +* Improve MQTT logging by properly logging auth failures, logging disconnections diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.30.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.30.md new file mode 100644 index 00000000..70c10852 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.30.md @@ -0,0 +1,9 @@ +--- +title: 4.3.30 +sidebar_position: 59669 +--- + +### HarperDB 4.3.30 +10/9/2024 + +* Properly assign transaction timestamp to writes from cache resolutions (ensuring that latencies can be calculated on replicating nodes) diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.31.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.31.md new file mode 100644 index 00000000..097726ac --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.31.md @@ -0,0 +1,11 @@ +--- +title: 4.3.31 +sidebar_position: 59668 +--- + +### HarperDB 4.3.31 +10/10/2024 + +* Reset the restart limit for manual restarts to ensure that NATS process will continue to restart after more than 10 manual restarts +* Only apply caching directives (from headers) to tables/resources that are configured to be caching, sourced from another resource +* Catch/tolerate errors on serializing objects for logging diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.32.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.32.md new file mode 100644 index 00000000..ee5da648 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.32.md @@ -0,0 +1,11 @@ +--- +title: 4.3.32 +sidebar_position: 59667 +--- + +### HarperDB 4.3.32 +10/16/2024 + +* Fix a memory leak when cluster_network closes a hub connection +* Improved MQTT error handling, with less verbose logging of more common errors, and treat a missing subscription as an invalid/missing topic +* Record analytics and server-timing header even when cache resolution fails diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.33.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.33.md new file mode 100644 index 00000000..271373ef --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.33.md @@ -0,0 +1,9 @@ +--- +title: 4.3.33 +sidebar_position: 59666 +--- + +### HarperDB 4.3.33 +10/24/2024 + +* Change the default maximum length for a fastify route parameter from 100 to 1000 characters. diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.34.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.34.md new file mode 100644 index 00000000..1071c273 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.34.md @@ -0,0 +1,9 @@ +--- +title: 4.3.34 +sidebar_position: 59665 +--- + +### HarperDB 4.3.34 +10/24/2024 + +* lmdb-js upgrade diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.35.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.35.md new file mode 100644 index 00000000..1811732b --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.35.md @@ -0,0 +1,10 @@ +--- +title: 4.3.35 +sidebar_position: 59664 +--- + +### HarperDB 4.3.35 +11/12/2024 + +* Upgrades for supporting Node.js V23 +* Fix for handling a change in the schema for nested data structures diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.36.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.36.md new file mode 100644 index 00000000..9200cd62 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.36.md @@ -0,0 +1,9 @@ +--- +title: 4.3.36 +sidebar_position: 59663 +--- + +### HarperDB 4.3.35 +11/14/2024 + +* lmdb-js upgrade for better free-space management diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.4.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.4.md new file mode 100644 index 00000000..f50f1bb6 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.4.md @@ -0,0 +1,10 @@ +--- +title: 4.3.4 +sidebar_position: 59695 +--- + +### HarperDB 4.3.4 +4/9/2024 + +* Fixed a buffer overrun issue with decompressing compressed data +* Better keep-alive of transactions with long running queries \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.5.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.5.md new file mode 100644 index 00000000..40d030e5 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.5.md @@ -0,0 +1,9 @@ +--- +title: 4.3.5 +sidebar_position: 59694 +--- + +### HarperDB 4.3.5 +4/10/2024 + +* Fixed a buffer overrun issue with decompressing compressed data \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.6.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.6.md new file mode 100644 index 00000000..92b28286 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.6.md @@ -0,0 +1,13 @@ +--- +title: 4.3.6 +sidebar_position: 59693 +--- + +### HarperDB 4.3.6 +4/12/2024 + +* Fixed parsing of dates from epoch millisecond times in queries +* Fixed CRDT incrementation of different data types +* Adjustments to text/plain content type q-value handling +* Fixed parsing of passwords with a colon +* Added MQTT events for connections, authorization, and disconnections \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.7.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.7.md new file mode 100644 index 00000000..8f45995a --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.7.md @@ -0,0 +1,13 @@ +--- +title: 4.3.7 +sidebar_position: 59692 +--- + +### HarperDB 4.3.7 +4/16/2024 + +* Fixed transaction handling to stay on open on long compaction operations +* Fixed handling of sorting on non-indexed attributes +* Storage stability improvements +* Fixed authentication/authorization of WebSockets connection and use of cookies +* Fixes for clone node operations \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.8.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.8.md new file mode 100644 index 00000000..cd0fe88e --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.8.md @@ -0,0 +1,13 @@ +--- +title: 4.3.8 +sidebar_position: 59691 +--- + +### HarperDB 4.3.8 +4/26/2024 + +* Added support for the MQTT keep-alive feature (disconnecting if no control messages are received within keep-alive window) +* Improved handling of write queue timeouts, with configurability +* Fixed a memory leak that can occur with NATS reconnections after heartbeat misses +* Fixed a bug in clone node with a null port +* Add error events to MQTT events system \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.9.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.9.md new file mode 100644 index 00000000..dca6a92f --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/4.3.9.md @@ -0,0 +1,9 @@ +--- +title: 4.3.9 +sidebar_position: 59690 +--- + +### HarperDB 4.3.9 +4/30/2024 + +* lmdb-js upgrade \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/_category_.json b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/_category_.json new file mode 100644 index 00000000..9a7bca50 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Tucker (Version 4)", + "position": -4 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/index.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/index.md new file mode 100644 index 00000000..ba5cb8a7 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/index.md @@ -0,0 +1,29 @@ +--- +title: HarperDB Tucker (Version 4) +--- + +# HarperDB Tucker (Version 4) + +HarperDB version 4 ([Tucker release](./tucker)) represents major step forward in database technology. This release line has ground-breaking architectural advancements including: + +## [4.0](./4.0.0) +* New clustering technology that delivers robust, resilient and high-performance replication +* Major storage improvements with highly-efficient adaptive-structure modified MessagePack format, with on-demand deserialization capabilities + +## [4.1](./4.1.0) +* New streaming iterators mechanism that allows query results to be delivered to clients _while_ querying results are being processed, for incredibly fast time-to-first-byte and concurrent processing/delivery +* New thread-based concurrency model for more efficient resource usage + +## [4.2](./4.2.0) +* New component architecture and Resource API for advanced, robust custom database application development +* Real-time capabilites through MQTT, WebSockets, and Server-Sent Events +* REST interface for intuitive, fast, and standards-compliant HTTP interaction +* Native caching capabilities for high-performance cache scenarios +* Clone node functionality + +## [4.3](./4.3.0) +* Relationships, joins, and broad new querying capabilites for complex and nested conditions, sorting, joining, and selecting with significant query optimizations +* More advanced transaction support for CRDTs and storage of large integers (with BigInt) +* Better management with new upgraded local studio and new CLI features + +Did you know our release names are dedicated to employee pups? For our fourth release, [meet Tucker!](./tucker) diff --git a/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/tucker.md b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/tucker.md new file mode 100644 index 00000000..e8dc0a36 --- /dev/null +++ b/site/versioned_docs/version-4.3/technical-details/release-notes/v4-tucker/tucker.md @@ -0,0 +1,11 @@ +--- +title: HarperDB Tucker (Version 4) +--- + +# HarperDB Tucker (Version 4) + +Did you know our release names are dedicated to employee pups? For our fourth release, we have Tucker. + +![picture of grey and white dog](/img/v4.3/dogs/tucker.png) + +_G’day, I’m Tucker. My dad is David Cockerill, a software engineer here at HarperDB. I am a 3-year-old Labrador Husky mix. I love to protect my dad from all the squirrels and rabbits we have in our yard. I have very ticklish feet and love belly rubs!_ diff --git a/site/versioned_docs/version-4.4/administration/_category_.json b/site/versioned_docs/version-4.4/administration/_category_.json new file mode 100644 index 00000000..828e0998 --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Administration", + "position": 2, + "link": { + "type": "generated-index", + "title": "Administration Documentation", + "description": "Guides for managing and administering HarperDB instances", + "keywords": [ + "administration" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/administration/administration.md b/site/versioned_docs/version-4.4/administration/administration.md new file mode 100644 index 00000000..4577a9d9 --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/administration.md @@ -0,0 +1,27 @@ +--- +title: Best Practices and Recommendations +--- + +# Best Practices and Recommendations + +Harper is designed for minimal administrative effort, and with managed services these are handled for you. But there are important things to consider for managing your own Harper servers. + +### Data Protection and (Backup and) Recovery + +As a distributed database, data protection and recovery can benefit from different data protection strategies than a traditional single-server database. But multiple aspects of data protection and recovery should be considered: + +* Availability: As a distributed database Harper is intrinsically built for high-availability and a cluster will continue to run even with complete server(s) failure. This is the first and primary defense for protecting against any downtime or data loss. Harper provides fast horizontal scaling functionality with node cloning, which facilitates ease of establishing high availability clusters. +* [Audit log](./logging/audit-logging): Harper defaults to tracking data changes so malicious data changes can be found, attributed, and reverted. This provides security-level defense against data loss, allowing for fine-grained isolation and reversion of individual data without the large-scale reversion/loss of data associated with point-in-time recovery approaches. +* Snapshots: When used as a source-of-truth database for crucial data, we recommend using snapshot tools to regularly snapshot databases as a final backup/defense against data loss (this should only be used as a last resort in recovery). Harper has a [`get_backup`](../developers/operations-api/databases-and-tables#get-backup) operation, which provides direct support for making and retrieving database snapshots. An HTTP request can be used to get a snapshot. Alternatively, volume snapshot tools can be used to snapshot data at the OS/VM level. Harper can also provide scripts for replaying transaction logs from snapshots to facilitate point-in-time recovery when necessary (often customization may be preferred in certain recovery situations to minimize data loss). + +### Horizontal Scaling with Node Cloning + +Harper provides rapid horizontal scaling capabilities through [node cloning functionality described here](./cloning). + +### Monitoring + +Harper provides robust capabilities for analytics and observability to facilitate effective and informative monitoring: +* Analytics provides statistics on usage, request counts, load, memory usage with historical tracking. The analytics data can be [accessed through querying](../technical-details/reference/analytics). +* A large variety of real-time statistics about load, system information, database metrics, thread usage can be retrieved through the [`system_information` API](../developers/operations-api/utilities). +* Information about the current cluster configuration and status can be found in the [cluster APIs](../developers/operations-api/clustering). +* Analytics and system information can easily be exported to Prometheus with our [Prometheus exporter component](https:/github.com/HarperDB-Add-Ons/prometheus_exporter), making it easy visualize and monitor Harper with Graphana. diff --git a/site/versioned_docs/version-4.4/administration/cloning.md b/site/versioned_docs/version-4.4/administration/cloning.md new file mode 100644 index 00000000..1550814f --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/cloning.md @@ -0,0 +1,150 @@ +--- +title: Clone Node +--- + +# Clone Node + +Clone node is a configurable node script that when pointed to another instance of Harper will create a clone of that +instance's config, databases and setup full replication. If it is run in a location where there is no existing Harper install, +it will, along with cloning, install Harper. If it is run in a location where there is another Harper instance, it will +only clone config, databases and replication that do not already exist. + +Clone node is triggered when Harper is installed or started with certain environment or command line (CLI) variables set (see below). + +**Leader node** - the instance of Harper you are cloning.\ +**Clone node** - the new node which will be a clone of the leader node. + +To start clone run `harperdb` in the CLI with either of the following variables set: + +#### Environment variables + +* `HDB_LEADER_URL` - The URL of the leader node's operation API (usually port 9925). +* `HDB_LEADER_USERNAME` - The leader node admin username. +* `HDB_LEADER_PASSWORD` - The leader node admin password. +* `REPLICATION_HOSTNAME` - _(optional)_ The clones replication hostname. This value will be added to `replication.hostname` on the clone node. If this value is not set, replication will not be set up between the leader and clone. + +For example: +``` +HDB_LEADER_URL=https:/node-1.my-domain.com:9925 REPLICATION_HOSTNAME=node-1.my-domain.com HDB_LEADER_USERNAME=... HDB_LEADER_PASSWORD=... harperdb +``` + +#### Command line variables + +* `--HDB_LEADER_URL` - The URL of the leader node's operation API (usually port 9925). +* `--HDB_LEADER_USERNAME` - The leader node admin username. +* `--HDB_LEADER_PASSWORD` - The leader node admin password. +* `--REPLICATION_HOSTNAME` - _(optional)_ The clones clustering host. This value will be added to `replication.hostname` on the clone node. If this value is not set, replication will not be set up between the leader and clone. + +For example: +``` +harperdb --HDB_LEADER_URL https:/node-1.my-domain.com:9925 --REPLICATION_HOSTNAME node-1.my-domain.com --HDB_LEADER_USERNAME ... --HDB_LEADER_PASSWORD ... +``` + +Each time clone is run it will set a value `cloned: true` in `harperdb-config.yaml`. This value will prevent clone from +running again. If you want to run clone again set this value to `false`. If Harper is started with the clone variables +still present and `cloned` is true, Harper will just start as normal. + +Clone node does not require any additional configuration apart from the variables referenced above. +However, if you wish to set any configuration during clone this can be done by passing the config as environment/CLI +variables or cloning overtop of an existing `harperdb-config.yaml` file. + +More can be found in the Harper config documentation [here](../deployments/configuration). + +### Excluding database and components + +To set any specific (optional) clone config, including the exclusion of any database and/or replication, there is a file +called `clone-node-config.yaml` that can be used. + +The file must be located in the `ROOTPATH` directory of your clone (the `hdb` directory where you clone will be installed. +If the directory does not exist, create one and add the file to it). + +The config available in `clone-node-config.yaml` is: + +```yaml +databaseConfig: + excludeDatabases: + - database: null + excludeTables: + - database: null + table: null +componentConfig: + exclude: + - name: null +``` + +_Note: only include the configuration that you are using. If no clone config file is provided nothing will be excluded, +unless it already exists on the clone._ + +`databaseConfig` - Set any databases or tables that you wish to exclude from cloning. + +`componentConfig` - Set any components that you do not want cloned. Clone node will not clone the component code, +it will only clone the component reference that exists in the leader harperdb-config file. + +### Cloning configuration + +Clone node will not clone any configuration that is classed as unique to the leader node. This includes `replication.hostname`, `replication.url`,`clustering.nodeName`, +`rootPath` and any other path related values, for example `storage.path`, `logging.root`, `componentsRoot`, +any authentication certificate/key paths. + +### Cloning system database + +Harper uses a database called `system` to store operational information. Clone node will only clone the user and role +tables from this database. It will also set up replication on this table, which means that any existing and future user and roles +that are added will be replicated throughout the cluster. + +Cloning the user and role tables means that once clone node is complete, the clone will share the same login credentials with +the leader. + +### Replication + +If clone is run with the `REPLICATION_HOSTNAME` variable set, a fully replicating clone will be created. + +If any databases are excluded from the clone, replication will not be set up on these databases. + +### JWT Keys + +If cloning with replication, the leader's JWT private and public keys will be cloned. To disable this, include `CLONE_KEYS=false` in your clone variables. + +### Cloning overtop of an existing Harper instance + +Clone node will not overwrite any existing config, database or replication. It will write/clone any config database or replication +that does not exist on the node it is running on. + +An example of how this can be useful is if you want to set Harper config before the clone is created. To do this you +would create a harperdb-config.yaml file in your local `hdb` root directory with the config you wish to set. Then +when clone is run it will append the missing config to the file and install Harper with the desired config. + +Another useful example could be retroactively adding another database to an existing instance. Running clone on +an existing instance could create a full clone of another database and set up replication between the database on the +leader and the clone. + +### Cloning steps + +Clone node will execute the following steps when ran: +1. Look for an existing Harper install. It does this by using the default (or user provided) `ROOTPATH`. +1. If an existing instance is found it will check for a `harperdb-config.yaml` file and search for the `cloned` value. If the value exists and is `true` clone will skip the clone logic and start Harper. +1. Clone harperdb-config.yaml values that don't already exist (excluding values unique to the leader node). +1. Fully clone any databases that don't already exist. +1. If classed as a "fresh clone", install Harper. An instance is classed as a fresh clone if there is no system database. +1. If `REPLICATION_HOSTNAME` is set, set up replication between the leader and clone. +1. Clone is complete, start Harper. + +### Cloning with Docker + +To run clone inside a container add the environment variables to your run command. + +For example: + +``` +docker run -d \ + -v :/home/harperdb/hdb \ + -e HDB_LEADER_PASSWORD=password \ + -e HDB_LEADER_USERNAME=admin \ + -e HDB_LEADER_URL=https:/1.123.45.6:9925 \ + -e REPLICATION_HOSTNAME=1.123.45.6 \ + -p 9925:9925 \ + -p 9926:9926 \ + harperdb/harperdb +``` + +Clone will only run once, when you first start the container. If the container restarts the environment variables will be ignored. diff --git a/site/versioned_docs/version-4.4/administration/compact.md b/site/versioned_docs/version-4.4/administration/compact.md new file mode 100644 index 00000000..1a71db14 --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/compact.md @@ -0,0 +1,60 @@ +--- +title: Compact +--- + +# Compact + +Database files can grow quickly as you use them, sometimes impeding performance. Harper has multiple compact features that can be used to reduce database file size and potentially improve performance. The compact process does not compress your data, it instead makes your database file smaller by eliminating free-space and fragmentation. + +There are two options that Harper offers for compacting a Database. + +_Note: Some of the storage configuration (such as compression) cannot be updated on existing databases, this is where the following options are useful. They will create a new compressed copy of the database with any updated configuration._ + +More information on the storage configuration options can be [found here](../deployments/configuration#storage) + +### Copy compaction + +It is recommended that, to prevent any record loss, Harper is not running when performing this operation. + +This will copy a Harper database with compaction. If you wish to use this new database in place of the original, you will need to move/rename it to the path of the original database. + +This command should be run in the [CLI](../deployments/harper-cli) + +```bash +harperdb copy-db +``` + +For example, to copy the default database: + +```bash +harperdb copy-db data /home/user/hdb/database/copy.mdb +``` + +### Compact on start + +Compact on start is a more automated option that will compact **all** databases when Harper is started. Harper will not start until compact is complete. Under the hood it loops through all non-system databases, creates a backup of each one and calls copy-db. After the copy/compaction is complete it will move the new database to where the original one is located and remove any backups. + +Compact on start is initiated by config in `harperdb-config.yaml` + +_Note: Compact on start will switch `compactOnStart` to `false` after it has run_ + +`compactOnStart` - _Type_: boolean; _Default_: false + +`compactOnStartKeepBackup` - _Type_: boolean; _Default_: false + +```yaml +storage: + compactOnStart: true + compactOnStartKeepBackup: false +``` + +Using CLI variables + +```bash +--STORAGE_COMPACTONSTART true --STORAGE_COMPACTONSTARTKEEPBACKUP true +``` + +```bash +STORAGE_COMPACTONSTART=true +STORAGE_COMPACTONSTARTKEEPBACKUP=true +``` diff --git a/site/versioned_docs/version-4.4/administration/harper-studio/create-account.md b/site/versioned_docs/version-4.4/administration/harper-studio/create-account.md new file mode 100644 index 00000000..fdc23cfb --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/harper-studio/create-account.md @@ -0,0 +1,26 @@ +--- +title: Create a Studio Account +--- + +# Create a Studio Account +Start at the [Harper Studio sign up page](https:/studio.harperdb.io/sign-up). + +1) Provide the following information: + * First Name + * Last Name + * Email Address + * Subdomain + + *Part of the URL that will be used to identify your Harper Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com.* + * Coupon Code (optional) +2) Review the Privacy Policy and Terms of Service. +3) Click the sign up for free button. +4) You will be taken to a new screen to add an account password. Enter your password. + *Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character.* +5) Click the add account password button. + +You will receive a Studio welcome email confirming your registration. + + + +Note: Your email address will be used as your username and cannot be changed. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/administration/harper-studio/enable-mixed-content.md b/site/versioned_docs/version-4.4/administration/harper-studio/enable-mixed-content.md new file mode 100644 index 00000000..85b7f8a7 --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/harper-studio/enable-mixed-content.md @@ -0,0 +1,11 @@ +--- +title: Enable Mixed Content +--- + +# Enable Mixed Content + +Enabling mixed content is required in cases where you would like to connect the Harper Studio to Harper Instances via HTTP. This should not be used for production systems, but may be convenient for development and testing purposes. Doing so will allow your browser to reach HTTP traffic, which is considered insecure, through an HTTPS site like the Studio. + + + +A comprehensive guide is provided by Adobe [here](https:/experienceleague.adobe.com/docs/target/using/experiences/vec/troubleshoot-composer/mixed-content.html). \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/administration/harper-studio/index.md b/site/versioned_docs/version-4.4/administration/harper-studio/index.md new file mode 100644 index 00000000..6db20847 --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/harper-studio/index.md @@ -0,0 +1,17 @@ +--- +title: Harper Studio +--- + +# Harper Studio +Harper Studio is the web-based GUI for Harper. Studio enables you to administer, navigate, and monitor all of your Harper instances in a simple, user-friendly interface without any knowledge of the underlying Harper API. It’s free to sign up, get started today! + +[Sign up for free!](https:/studio.harperdb.io/sign-up) + +Harper now includes a simplified local Studio that is packaged with all Harper installations and served directly from the instance. It can be enabled in the [configuration file](../../deployments/configuration#localstudio). This section is dedicated to the hosted Studio accessed at [studio.harperdb.io](https:/studio.harperdb.io). + +--- +## How does Studio Work? +While Harper Studio is web based and hosted by us, all database interactions are performed on the Harper instance the studio is connected to. The Harper Studio loads in your browser, at which point you login to your Harper instances. Credentials are stored in your browser cache and are not transmitted back to Harper. All database interactions are made via the Harper Operations API directly from your browser to your instance. + +## What type of instances can I manage? +Harper Studio enables users to manage both Harper Cloud instances and privately hosted instances all from a single UI. All Harper instances feature identical behavior whether they are hosted by us or by you. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/administration/harper-studio/instance-configuration.md b/site/versioned_docs/version-4.4/administration/harper-studio/instance-configuration.md new file mode 100644 index 00000000..fe0d295f --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/harper-studio/instance-configuration.md @@ -0,0 +1,125 @@ +--- +title: Instance Configuration +--- + +# Instance Configuration + +Harper instance configuration can be viewed and managed directly through the Harper Studio. Harper Cloud instances can be resized in two different ways via this page, either by modifying machine RAM or by increasing drive storage. Enterprise instances can have their licenses modified by modifying licensed RAM. + + + +All instance configuration is handled through the **config** page of the Harper Studio, accessed with the following instructions: + +1) Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click config in the instance control bar. + +*Note, the **config** page will only be available to super users and certain items are restricted to Studio organization owners.* + +## Instance Overview + +The **instance overview** panel displays the following instance specifications: + +* Instance URL + +* Applications URL + +* Instance Node Name (for clustering) + +* Instance API Auth Header (this user) + + *The Basic authentication header used for the logged in Harper database user* + +* Created Date (Harper Cloud only) + +* Region (Harper Cloud only) + + *The geographic region where the instance is hosted.* + +* Total Price + +* RAM + +* Storage (Harper Cloud only) + +* Disk IOPS (Harper Cloud only) + +## Update Instance RAM + +Harper Cloud instance size and Enterprise instance licenses can be modified with the following instructions. This option is only available to Studio organization owners. + + + +Note: For Harper Cloud instances, upgrading RAM may add additional CPUs to your instance as well. Click here to see how many CPUs are provisioned for each instance size. + +1) In the **update ram** panel at the bottom left: + + * Select the new instance size. + + * If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. + + * If you do have a credit card associated, you will be presented with the updated billing information. + + * Click **Upgrade**. + +2) The instance will shut down and begin reprovisioning/relicensing itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. + +3) Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. + +*Note, if Harper Cloud instance reprovisioning takes longer than 20 minutes, please submit a support ticket here: https:/harperdbhelp.zendesk.com/hc/en-us/requests/new.* + +## Update Instance Storage + +The Harper Cloud instance storage size can be increased with the following instructions. This option is only available to Studio organization owners. + +Note: Instance storage can only be upgraded once every 6 hours. + +1) In the **update storage** panel at the bottom left: + + * Select the new instance storage size. + + * If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. + + * If you do have a credit card associated, you will be presented with the updated billing information. + + * Click **Upgrade**. + +2) The instance will shut down and begin reprovisioning itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. + +3) Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. + +*Note, if this process takes longer than 20 minutes, please submit a support ticket here: https:/harperdbhelp.zendesk.com/hc/en-us/requests/new.* + +## Remove Instance + +The Harper instance can be deleted/removed from the Studio with the following instructions. Once this operation is started it cannot be undone. This option is only available to Studio organization owners. + +1) In the **remove instance** panel at the bottom left: + * Enter the instance name in the text box. + + * The Studio will present you with a warning. + + * Click **Remove**. + +2) The instance will begin deleting immediately. + +## Restart Instance + +The Harper Cloud instance can be restarted with the following instructions. + +1) In the **restart instance** panel at the bottom right: + * Enter the instance name in the text box. + + * The Studio will present you with a warning. + + * Click **Restart**. + +2) The instance will begin restarting immediately. + +## Instance Config (Read Only) + +A JSON preview of the instance config is available for reference at the bottom of the page. This is a read only visual and is not editable via the Studio. To make changes to the instance config, review the [configuration file documentation](../../deployments/configuration#using-the-configuration-file-and-naming-conventions). \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/administration/harper-studio/instance-metrics.md b/site/versioned_docs/version-4.4/administration/harper-studio/instance-metrics.md new file mode 100644 index 00000000..eae954f1 --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/harper-studio/instance-metrics.md @@ -0,0 +1,16 @@ +--- +title: Instance Metrics +--- + +# Instance Metrics + +The Harper Studio display instance status and metrics on the instance status page, which can be accessed with the following instructions: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **status** in the instance control bar. + +Once on the instance browse page you can view host system information, [Harper logs](../logging/standard-logging), and Harper Cloud alarms (if it is a cloud instance). + +_Note, the **status** page will only be available to super users._ diff --git a/site/versioned_docs/version-4.4/administration/harper-studio/instances.md b/site/versioned_docs/version-4.4/administration/harper-studio/instances.md new file mode 100644 index 00000000..f44fb609 --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/harper-studio/instances.md @@ -0,0 +1,130 @@ +--- +title: Instances +--- + +# Instances + +The Harper Studio allows you to administer all of your HarperDinstances in one place. Harper currently offers the following instance types: + +* **Harper Cloud Instance** Managed installations of Harper, what we call [Harper Cloud](../../deployments/harper-cloud/). +* **5G Wavelength Instance** Managed installations of Harper running on the Verizon network through AWS Wavelength, what we call 5G Wavelength Instances. _Note, these instances are only accessible via the Verizon network._ +* **Enterprise Instance** Any Harper installation that is managed by you. These include instances hosted within your cloud provider accounts (for example, from the AWS or Digital Ocean Marketplaces), privately hosted instances, or instances installed locally. + +All interactions between the Studio and your instances take place directly from your browser. Harper stores metadata about your instances, which enables the Studio to display these instances when you log in. Beyond that, all traffic is routed from your browser to the Harper instances using the standard [Harper API](../../developers/operations-api/). + +## Organization Instance List + +A summary view of all instances within an organization can be viewed by clicking on the appropriate organization from the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. Each instance gets their own card. Harper Cloud and Enterprise instances are listed together. + +## Create a New Instance + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization for the instance to be created under. +1. Click the **Create New Harper Cloud Instance + Register Enterprise Instance** card. +1. Select your desired Instance Type. +1. For a Harper Cloud Instance or a Harper 5G Wavelength Instance, click **Create Harper Cloud Instance**. + 1. Fill out Instance Info. + 1. Enter Instance Name + + _This will be used to build your instance URL. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com. The Instance URL will be previewed below._ + 1. Enter Instance Username + + _This is the username of the initial Harper instance super user._ + 1. Enter Instance Password + + _This is the password of the initial Harper instance super user._ + 1. Click **Instance Details** to move to the next page. + 1. Select Instance Specs + 1. Select Instance RAM + + _Harper Cloud Instances are billed based on Instance RAM, this will select the size of your provisioned instance._ _More on instance specs__._ + 1. Select Storage Size + + _Each instance has a mounted storage volume where your Harper data will reside. Storage is provisioned based on space and IOPS._ _More on IOPS Impact on Performance__._ + 1. Select Instance Region + + _The geographic area where your instance will be provisioned._ + 1. Click **Confirm Instance Details** to move to the next page. + 1. Review your Instance Details, if there is an error, use the back button to correct it. + 1. Review the [Privacy Policy](https:/harperdb.io/legal/privacy-policy/) and [Terms of Service](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. + 1. Click **Add Instance**. + 1. Your Harper Cloud instance will be provisioned in the background. Provisioning typically takes 5-15 minutes. You will receive an email notification when your instance is ready. + +## Register Enterprise Instance + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization for the instance to be created under. +1. Click the **Create New Harper Cloud Instance + Register Enterprise Instance** card. +1. Select **Register Enterprise Instance**. + 1. Fill out Instance Info. + 1. Enter Instance Name + + _This is used for descriptive purposes only._ + 1. Enter Instance Username + + _The username of a Harper super user that is already configured in your Harper installation._ + 1. Enter Instance Password + + _The password of a Harper super user that is already configured in your Harper installation._ + 1. Enter Host + + _The host to access the Harper instance. For example, `harperdb.myhost.com` or `localhost`._ + 1. Enter Port + + _The port to access the Harper instance. Harper defaults `9925` for HTTP and `31283` for HTTPS._ + 1. Select SSL + + _If your instance is running over SSL, select the SSL checkbox. If not, you will need to enable mixed content in your browser to allow the HTTPS Studio to access the HTTP instance. If there are issues connecting to the instance, the Studio will display a red error message._ + 1. Click **Instance Details** to move to the next page. + 1. Select Instance Specs + 1. Select Instance RAM + + _Harper instances are billed based on Instance RAM. Selecting additional RAM will enable the ability for faster and more complex queries._ + 1. Click **Confirm Instance Details** to move to the next page. + 1. Review your Instance Details, if there is an error, use the back button to correct it. + 1. Review the [Privacy Policy](https:/harperdb.io/legal/privacy-policy/) and [Terms of Service](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. + 1. Click **Add Instance**. + 1. The Harper Studio will register your instance and restart it for the registration to take effect. Your instance will be immediately available after this is complete. + +## Delete an Instance + +Instance deletion has two different behaviors depending on the instance type. + +* **Harper Cloud Instance** This instance will be permanently deleted, including all data. This process is irreversible and cannot be undone. +* **Enterprise Instance** The instance will be removed from the Harper Studio only. This does not uninstall Harper from your system and your data will remain intact. + +An instance can be deleted as follows: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card and click the trash can icon. +1. Enter the instance name into the text box. + + _This is done for confirmation purposes to ensure you do not accidentally delete an instance._ +1. Click the **Do It** button. + +## Upgrade an Instance + +Harper instances can be resized on the [Instance Configuration](./instance-configuration) page. + +## Instance Log In/Log Out + +The Studio enables users to log in and out of different database users from the instance control panel. To log out of an instance: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card and click the lock icon. +1. You will immediately be logged out of the instance. + +To log in to an instance: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card, it will have an unlocked icon and a status reading PLEASE LOG IN, and click the center of the card. +1. Enter the database username. + + _The username of a Harper user that is already configured in your Harper instance._ +1. Enter the database password. + + _The password of a Harper user that is already configured in your Harper instance._ +1. Click **Log In**. diff --git a/site/versioned_docs/version-4.4/administration/harper-studio/login-password-reset.md b/site/versioned_docs/version-4.4/administration/harper-studio/login-password-reset.md new file mode 100644 index 00000000..2d1e7eac --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/harper-studio/login-password-reset.md @@ -0,0 +1,42 @@ +--- +title: Login and Password Reset +--- + +# Login and Password Reset + +## Log In to Your Harper Studio Account + +To log into your existing Harper Studio account: + +1) Navigate to the [Harper Studio](https:/studio.harperdb.io/). +2) Enter your email address. +3) Enter your password. +4) Click **sign in**. + +## Reset a Forgotten Password + +To reset a forgotten password: + +1) Navigate to the Harper Studio password reset page. +2) Enter your email address. +3) Click **send password reset email**. +4) If the account exists, you will receive an email with a temporary password. +5) Navigate back to the Harper Studio login page. +6) Enter your email address. +7) Enter your temporary password. +8) Click **sign in**. +9) You will be taken to a new screen to reset your account password. Enter your new password. +*Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character.* +10) Click the **add account password** button. + +## Change Your Password + +If you are already logged into the Studio, you can change your password though the user interface. + +1) Navigate to the Harper Studio profile page. +2) In the **password** section, enter: + + * Current password. + * New password. + * New password again *(for verification)*. +4) Click the **Update Password** button. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/administration/harper-studio/manage-applications.md b/site/versioned_docs/version-4.4/administration/harper-studio/manage-applications.md new file mode 100644 index 00000000..16974445 --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/harper-studio/manage-applications.md @@ -0,0 +1,59 @@ +--- +title: Manage Applications +--- + +# Manage Applications + +[Harper Applications](../../developers/applications/) are enabled by default and can be configured further through the Harper Studio. It is recommended to read through the [Applications](../../developers/applications/) documentation first to gain a strong understanding of Harper Applications behavior. + +All Applications configuration and development is handled through the **applications** page of the Harper Studio, accessed with the following instructions: + +1. Navigate to the Harper Studio Organizations page. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **applications** in the instance control bar. + +_Note, the **applications** page will only be available to super users._ + +## Manage Applications + +The Applications editor is not required for development and deployment, though it is a useful tool to maintain and manage your Harper Applications. The editor provides the ability to create new applications or import/deploy remote application packages. + +The left bar is the applications file navigator, allowing you to select files to edit and add/remove files and folders. By default, this view is empty because there are no existing applications. To get started, either create a new application or import/deploy a remote application. + +The right side of the screen is the file editor. Here you can make edit individual files of your application directly in the Harper Studio. + +## Things to Keep in Mind + +To learn more about developing Harper Applications, make sure to read through the [Applications](../../developers/applications/) documentation. + +When working with Applications in the Harper Studio, by default the editor will restart the Harper Applications server every time a file is saved. Note, this behavior can be turned off by toggling the `auto` toggle at the top right of the applications page. If you are constantly editing your application, it may result in errors causing the application not to run. These errors will not be visible on the application page, however they will be available in the Harper logs, which can be found on the [status page](./instance-metrics). + +The Applications editor stores unsaved changes in cache. This means that occasionally your editor will show a discrepancy from the code that is stored and running on your Harper instance. You can identify if the code in your Studio differs if the "save" and "revert" buttons are active. To revert the cached version in your editor to the version of the file stored on your Harper instance click the "revert" button. + +## Accessing Your Application Endpoints + +Accessing your application endpoints varies with which type of endpoint you're creating. All endpoints, regardless of type, will be accessed via the [Harper HTTP port found in the Harper configuration file](../../deployments/configuration#http). The default port is `9926`, but you can verify what your instances is set to by navigating to the [instance config page](./instance-configuration) and examining the read only JSON version of your instance's config file looking specifically for either the `http: port: 9926` or `http: securePort: 9926` configs. If `port` is set, you will access your endpoints via `http` and if `securePort` is set, you will access your endpoints via `https`. + +Below is a breakdown of how to access each type of endpoint. In these examples, we will use a locally hosted instance with `securePort` set to `9926`: `https:/localhost:9926`. + +* **Standard REST Endpoints**\ + Standard REST endpoints are defined via the `@export` directive to tables in your schema definition. You can read more about these in the [Adding an Endpoint section of the Applications documentation](../../developers/applications/#adding-an-endpoint). Here, if we are looking to access a record with ID `1` from table `Dog` on our instance, [per the REST documentation](../../developers/rest), we could send a `GET` (or since this is a GET, we could post the URL in our browser) to `https:/localhost:9926/Dog/1`. +* **Augmented REST Endpoints**\ + Harper Applications enable you to write [Custom Functionality with JavaScript](../../developers/applications/#custom-functionality-with-javascript) for your resources. Accessing these endpoints is identical to accessing the standard REST endpoints above, though you may have defined custom behavior in each function. Taking the example from the [Applications documentation](../../developers/applications/#custom-functionality-with-javascript), if we are looking to access the `DogWithHumanAge` example, we could send the GET to `https:/localhost:9926/DogWithHumanAge/1`. +* **Fastify Routes**\ + If you need more functionality than the REST applications can provide, you can define your own custom endpoints using [Fastify Routes](../../developers/applications/#define-fastify-routes). The paths to these routes are defined via the application `config.yaml` file. You can read more about how you can customize the configuration options in the [Define Fastify Routes documentation](../../developers/applications/define-routes). By default, routes are accessed via the following pattern: `[Instance URL]:[HTTP Port]/[Project Name]/[Route URL]`. Using the example from the [Harper Application Template](https:/github.com/HarperDB/application-template/), where we've named our project `application-template`, we would access the `getAll` route at `https:/localhost/application-template/getAll`. + +## Creating a New Application + +1. From the application page, click the "+ app" button at the top right. +1. Click "+ Create A New Application Using The Default Template". +1. Enter a name for your project, note project names must contain only alphanumeric characters, dashes and underscores. +1. Click OK. +1. Your project will be available in the applications file navigator on the left. Click a file to select a file to edit. + +## Editing an Application + +1. From the applications page, click the file you would like to edit from the file navigator on the left. +1. Edit the file with any changes you'd like. +1. Click "save" at the top right. Note, as mentioned above, when you save a file, the Harper Applications server will be restarted immediately. diff --git a/site/versioned_docs/version-4.4/administration/harper-studio/manage-databases-browse-data.md b/site/versioned_docs/version-4.4/administration/harper-studio/manage-databases-browse-data.md new file mode 100644 index 00000000..88c16a6c --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/harper-studio/manage-databases-browse-data.md @@ -0,0 +1,132 @@ +--- +title: Manage Databases / Browse Data +--- + +# Manage Databases / Browse Data + +Manage instance databases/tables and browse data in tabular format with the following instructions: + +1) Navigate to the Harper Studio Organizations page. +2) Click the appropriate organization that the instance belongs to. +3) Select your desired instance. +4) Click **browse** in the instance control bar. + +Once on the instance browse page you can view data, manage databases and tables, add new data, and more. + +## Manage Databases and Tables + +#### Create a Database + +1) Click the plus icon at the top right of the databases section. +2) Enter the database name. +3) Click the green check mark. + + +#### Delete a Database + +Deleting a database is permanent and irreversible. Deleting a database removes all tables and data within it. + +1) Click the minus icon at the top right of the databases section. +2) Identify the appropriate database to delete and click the red minus sign in the same row. +3) Click the red check mark to confirm deletion. + + +#### Create a Table + +1) Select the desired database from the databases section. +2) Click the plus icon at the top right of the tables section. +3) Enter the table name. +4) Enter the primary key. + + *The primary key is also often referred to as the hash attribute in the studio, and it defines the unique identifier for each row in your table.* +5) Click the green check mark. + + +#### Delete a Table +Deleting a table is permanent and irreversible. Deleting a table removes all data within it. + +1) Select the desired database from the databases section. +2) Click the minus icon at the top right of the tables section. +3) Identify the appropriate table to delete and click the red minus sign in the same row. +4) Click the red check mark to confirm deletion. + +## Manage Table Data + +The following section assumes you have selected the appropriate table from the database/table browser. + + + +#### Filter Table Data + +1) Click the magnifying glass icon at the top right of the table browser. +2) This expands the search filters. +3) The results will be filtered appropriately. + + +#### Load CSV Data + +1) Click the data icon at the top right of the table browser. You will be directed to the CSV upload page where you can choose to import a CSV by URL or upload a CSV file. +2) To import a CSV by URL: + 1) Enter the URL in the **CSV file URL** textbox. + 2) Click **Import From URL**. + 3) The CSV will load, and you will be redirected back to browse table data. +3) To upload a CSV file: + 1) Click **Click or Drag to select a .csv file** (or drag your CSV file from your file browser). + 2) Navigate to your desired CSV file and select it. + 3) Click **Insert X Records**, where X is the number of records in your CSV. + 4) The CSV will load, and you will be redirected back to browse table data. + + +#### Add a Record + +1) Click the plus icon at the top right of the table browser. +2) The Studio will pre-populate existing table attributes in JSON format. + + *The primary key is not included, but you can add it in and set it to your desired value. Auto-maintained fields are not included and cannot be manually set. You may enter a JSON array to insert multiple records in a single transaction.* +3) Enter values to be added to the record. + + *You may add new attributes to the JSON; they will be reflexively added to the table.* +4) Click the **Add New** button. + + +#### Edit a Record + +1) Click the record/row you would like to edit. +2) Modify the desired values. + + *You may add new attributes to the JSON; they will be reflexively added to the table.* + +3) Click the **save icon**. + + +#### Delete a Record + +Deleting a record is permanent and irreversible. If transaction logging is turned on, the delete transaction will be recorded as well as the data that was deleted. + +1) Click the record/row you would like to delete. +2) Click the **delete icon**. +3) Confirm deletion by clicking the **check icon**. + +## Browse Table Data + +The following section assumes you have selected the appropriate table from the database/table browser. + +#### Browse Table Data + +The first page of table data is automatically loaded on table selection. Paging controls are at the bottom of the table. Here you can: + +* Page left and right using the arrows. +* Type in the desired page. +* Change the page size (the amount of records displayed in the table). + + +#### Refresh Table Data + +Click the refresh icon at the top right of the table browser. + + + +#### Automatically Refresh Table Data + +Toggle the auto switch at the top right of the table browser. The table data will now automatically refresh every 15 seconds. Filters and pages will remain set for refreshed data. + diff --git a/site/versioned_docs/version-4.4/administration/harper-studio/manage-instance-roles.md b/site/versioned_docs/version-4.4/administration/harper-studio/manage-instance-roles.md new file mode 100644 index 00000000..d0f8c82d --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/harper-studio/manage-instance-roles.md @@ -0,0 +1,76 @@ +--- +title: Manage Instance Roles +--- + +# Manage Instance Roles + +Harper users and roles can be managed directly through the Harper Studio. It is recommended to read through the [users & roles documentation](../../developers/security/users-and-roles) to gain a strong understanding of how they operate. + +Instance role configuration is handled through the **roles** page of the Harper Studio, accessed with the following instructions: + +1) Navigate to the Harper Studio Organizations page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **roles** in the instance control bar. + +*Note, the **roles** page will only be available to super users.* + + + +The *roles management* screen consists of the following panels: + +* **super users** + + Displays all super user roles for this instance. +* **cluster users** + + Displays all cluster user roles for this instance. +* **standard roles** + + Displays all standard roles for this instance. +* **role permission editing** + + Once a role is selected for editing, permissions will be displayed here in JSON format. + +*Note, when new tables are added that are not configured, the Studio will generate configuration values with permissions defaulting to `false`.* + +## Role Management + +#### Create a Role + +1) Click the plus icon at the top right of the appropriate role section. + +2) Enter the role name. + +3) Click the green check mark. + +4) Optionally toggle the **manage databases/tables** switch to specify the `structure_user` config. + +5) Configure the role permissions in the role permission editing panel. + + *Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel.* + +6) Click **Update Role Permissions**. + +#### Modify a Role + +1) Click the appropriate role from the appropriate role section. + +2) Modify the role permissions in the role permission editing panel. + + *Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel.* + +3) Click **Update Role Permissions**. + +#### Delete a Role + +Deleting a role is permanent and irreversible. A role cannot be remove if users are associated with it. + +1) Click the minus icon at the top right of the roles section. + +2) Identify the appropriate role to delete and click the red minus sign in the same row. + +3) Click the red check mark to confirm deletion. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/administration/harper-studio/manage-instance-users.md b/site/versioned_docs/version-4.4/administration/harper-studio/manage-instance-users.md new file mode 100644 index 00000000..a99ae4c6 --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/harper-studio/manage-instance-users.md @@ -0,0 +1,61 @@ +--- +title: Manage Instance Users +--- + +# Manage Instance Users + +Harper users and roles can be managed directly through the Harper Studio. It is recommended to read through the [users & roles documentation](../../developers/security/users-and-roles) to gain a strong understanding of how they operate. + +Instance user configuration is handled through the **users** page of the Harper Studio, accessed with the following instructions: + +1) Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **users** in the instance control bar. + +*Note, the **users** page will only be available to super users.* + +## Add a User + +Harper instance users can be added with the following instructions. + +1) In the **add user** panel on the left enter: + + * New user username. + + * New user password. + + * Select a role. + + *Learn more about role management here: [Manage Instance Roles](./manage-instance-roles).* + +2) Click **Add User**. + +## Edit a User + +Harper instance users can be modified with the following instructions. + +1) In the **existing users** panel, click the row of the user you would like to edit. + +2) To change a user’s password: + + 1) In the **Change user password** section, enter the new password. + + 2) Click **Update Password**. + +3) To change a user’s role: + + 1) In the **Change user role** section, select the new role. + + 2) Click **Update Role**. + +4) To delete a user: + + 1) In the **Delete User** section, type the username into the textbox. + + *This is done for confirmation purposes.* + + 2) Click **Delete User**. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/administration/harper-studio/manage-replication.md b/site/versioned_docs/version-4.4/administration/harper-studio/manage-replication.md new file mode 100644 index 00000000..78a457a9 --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/harper-studio/manage-replication.md @@ -0,0 +1,89 @@ +--- +title: Manage Replication +--- + +# Manage Replication + +Harper instance clustering and replication can be configured directly through the Harper Studio. It is recommended to read through the [clustering documentation](../../developers/clustering/) first to gain a strong understanding of Harper clustering behavior. + + + +All clustering configuration is handled through the **replication** page of the Harper Studio, accessed with the following instructions: + +1) Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **replication** in the instance control bar. + +Note, the **replication** page will only be available to super users. + +--- +## Initial Configuration + +Harper instances do not have clustering configured by default. The Harper Studio will walk you through the initial configuration. Upon entering the **replication** screen for the first time you will need to complete the following configuration. Configurations are set in the **enable clustering** panel on the left while actions are described in the middle of the screen. It is worth reviewing the [Creating a Cluster User](../../developers/clustering/creating-a-cluster-user) document before proceeding. + +1) Enter Cluster User username. (Defaults to `cluster_user`). +2) Enter Cluster Password. +3) Review and/or Set Cluster Node Name. +4) Click **Enable Clustering**. + +At this point the Studio will restart your Harper Instance, required for the configuration changes to take effect. + +--- + +## Manage Clustering +Once initial clustering configuration is completed you a presented with a clustering management screen with the following properties: + +* **connected instances** + + Displays all instances within the Studio Organization that this instance manages a connection with. + +* **unconnected instances** + + Displays all instances within the Studio Organization that this instance does not manage a connection with. + +* **unregistered instances** + + Displays all instances outside the Studio Organization that this instance manages a connection with. + +* **manage clustering** + + Once instances are connected, this will display clustering management options for all connected instances and all databases and tables. +--- + +## Connect an Instance + +Harper Instances can be clustered together with the following instructions. + +1) Ensure clustering has been configured on both instances and a cluster user with identical credentials exists on both. + +2) Identify the instance you would like to connect from the **unconnected instances** panel. + +3) Click the plus icon next the appropriate instance. + +4) If configurations are correct, all databases will sync across the cluster, then appear in the **manage clustering** panel. If there is a configuration issue, a red exclamation icon will appear, click it to learn more about what could be causing the issue. + +--- + +## Disconnect an Instance + +Harper Instances can be disconnected with the following instructions. + +1) Identify the instance you would like to disconnect from the **connected instances** panel. + +2) Click the minus icon next the appropriate instance. + +--- + +## Manage Replication + +Subscriptions must be configured in order to move data between connected instances. Read more about subscriptions here: Creating A Subscription. The **manage clustering** panel displays a table with each row representing an channel per instance. Cells are bolded to indicate a change in the column. Publish and subscribe replication can be configured per table with the following instructions: + +1) Identify the instance, database, and table for replication to be configured. + +2) For publish, click the toggle switch in the **publish** column. + +3) For subscribe, click the toggle switch in the **subscribe** column. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/administration/harper-studio/organizations.md b/site/versioned_docs/version-4.4/administration/harper-studio/organizations.md new file mode 100644 index 00000000..fede2cd8 --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/harper-studio/organizations.md @@ -0,0 +1,105 @@ +--- +title: Organizations +--- + +# Organizations +Harper Studio organizations provide the ability to group Harper Cloud Instances. Organization behavior is as follows: + +* Billing occurs at the organization level to a single credit card. +* Organizations retain their own unique Harper Cloud subdomain. +* Cloud instances reside within an organization. +* Studio users can be invited to organizations to share instances. + + +An organization is automatically created for you when you sign up for Harper Studio. If you only have one organization, the Studio will automatically bring you to your organization’s page. + +--- + +## List Organizations +A summary view of all organizations your user belongs to can be viewed on the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. You can navigate to this page at any time by clicking the **all organizations** link at the top of the Harper Studio. + +## Create a New Organization +A new organization can be created as follows: + +1) Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the **Create a New Organization** card. +3) Fill out new organization details + * Enter Organization Name + *This is used for descriptive purposes only.* + * Enter Organization Subdomain + *Part of the URL that will be used to identify your Harper Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com.* +4) Click Create Organization. + +## Delete an Organization +An organization cannot be deleted until all instances have been removed. An organization can be deleted as follows: + +1) Navigate to the Harper Studio Organizations page. +2) Identify the proper organization card and click the trash can icon. +3) Enter the organization name into the text box. + + *This is done for confirmation purposes to ensure you do not accidentally delete an organization.* +4) Click the **Do It** button. + +## Manage Users +Harper Studio organization owners can manage users including inviting new users, removing users, and toggling ownership. + + + +#### Inviting a User +A new user can be invited to an organization as follows: + +1) Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the appropriate organization card. +3) Click **users** at the top of the screen. +4) In the **add user** box, enter the new user’s email address. +5) Click **Add User**. + +Users may or may not already be Harper Studio users when adding them to an organization. If the Harper Studio account already exists, the user will receive an email notification alerting them to the organization invitation. If the user does not have a Harper Studio account, they will receive an email welcoming them to Harper Studio. + +--- + +#### Toggle a User’s Organization Owner Status +Organization owners have full access to the organization including the ability to manage organization users, create, modify, and delete instances, and delete the organization. Users must have accepted their invitation prior to being promoted to an owner. A user’s organization owner status can be toggled owner as follows: + +1) Navigate to the Harper Studio Organizations page. +2) Click the appropriate organization card. +3) Click **users** at the top of the screen. +4) Click the appropriate user from the **existing users** section. +5) Toggle the **Is Owner** switch to the desired status. +--- + +#### Remove a User from an Organization +Users may be removed from an organization at any time. Removing a user from an organization will not delete their Harper Studio account, it will only remove their access to the specified organization. A user can be removed from an organization as follows: + +1) Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the appropriate organization card. +3) Click **users** at the top of the screen. +4) Click the appropriate user from the **existing users** section. +5) Type **DELETE** in the text box in the **Delete User** row. + + *This is done for confirmation purposes to ensure you do not accidentally delete a user.* +6) Click **Delete User**. + +## Manage Billing + +Billing is configured per organization and will be billed to the stored credit card at appropriate intervals (monthly or annually depending on the registered instance). Billing settings can be configured as follows: + +1) Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the appropriate organization card. +3) Click **billing** at the top of the screen. + +Here organization owners can view invoices, manage coupons, and manage the associated credit card. + + + +*Harper billing and payments are managed via Stripe.* + + + +### Add a Coupon + +Coupons are applicable towards any paid tier or enterprise instance and you can change your subscription at any time. Coupons can be added to your Organization as follows: + +1) In the coupons panel of the **billing** page, enter your coupon code. +2) Click **Add Coupon**. +3) The coupon will then be available and displayed in the coupons panel. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/administration/jobs.md b/site/versioned_docs/version-4.4/administration/jobs.md new file mode 100644 index 00000000..e71dd9cf --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/jobs.md @@ -0,0 +1,112 @@ +--- +title: Jobs +--- + +# Jobs + +Harper Jobs are asynchronous tasks performed by the Operations API. + +## Job Summary + +Jobs uses an asynchronous methodology to account for the potential of a long-running operation. For example, exporting millions of records to S3 could take some time, so that job is started and the id is provided to check on the status. + +The job status can be **COMPLETE** or **IN\_PROGRESS**. + +## Example Job Operations + +Example job operations include: + +[csv data load](../developers/operations-api/bulk-operations#csv-data-load) + +[csv file load](../developers/operations-api/bulk-operations#csv-file-load) + +[csv url load](../developers/operations-api/bulk-operations#csv-url-load) + +[import from s3](../developers/operations-api/bulk-operations#import-from-s3) + +[delete_records_before](../developers/operations-api/utilities#delete-records-before) + +[export_local](../developers/operations-api/utilities#export-local) + +[export_to_s3](../developers/operations-api/utilities#export-to-s3) + +Example Response from a Job Operation + +``` +{ + "message": "Starting job with id 062a1892-6a0a-4282-9791-0f4c93b12e16" +} +``` + +Whenever one of these operations is initiated, an asynchronous job is created and the request contains the ID of that job which can be used to check on its status. + +## Managing Jobs + +To check on a job's status, use the [get_job](../developers/operations-api/jobs#get-job) operation. + +Get Job Request + +``` +{ + "operation": "get_job", + "id": "4a982782-929a-4507-8794-26dae1132def" +} +``` + +Get Job Response + +``` +[ + { + "__createdtime__": 1611615798782, + "__updatedtime__": 1611615801207, + "created_datetime": 1611615798774, + "end_datetime": 1611615801206, + "id": "4a982782-929a-4507-8794-26dae1132def", + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "start_datetime": 1611615798805, + "status": "COMPLETE", + "type": "csv_url_load", + "user": "HDB_ADMIN", + "start_datetime_converted": "2021-01-25T23:03:18.805Z", + "end_datetime_converted": "2021-01-25T23:03:21.206Z" + } +] +``` + +## Finding Jobs + +To find jobs (if the ID is not known) use the [search_jobs_by_start_date](../developers/operations-api/jobs#search-jobs-by-start-date) operation. + +Search Jobs Request + +``` +{ + "operation": "search_jobs_by_start_date", + "from_date": "2021-01-25T22:05:27.464+0000", + "to_date": "2021-01-25T23:05:27.464+0000" +} +``` + +Search Jobs Response + +``` +[ + { + "id": "942dd5cb-2368-48a5-8a10-8770ff7eb1f1", + "user": "HDB_ADMIN", + "type": "csv_url_load", + "status": "COMPLETE", + "start_datetime": 1611613284781, + "end_datetime": 1611613287204, + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "created_datetime": 1611613284764, + "__createdtime__": 1611613284767, + "__updatedtime__": 1611613287207, + "start_datetime_converted": "2021-01-25T22:21:24.781Z", + "end_datetime_converted": "2021-01-25T22:21:27.204Z" + } +] +``` diff --git a/site/versioned_docs/version-4.4/administration/logging/audit-logging.md b/site/versioned_docs/version-4.4/administration/logging/audit-logging.md new file mode 100644 index 00000000..cfec1062 --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/logging/audit-logging.md @@ -0,0 +1,135 @@ +--- +title: Audit Logging +--- + +# Audit Logging + +### Audit log + +The audit log uses a standard Harper table to track transactions. For each table a user creates, a corresponding table will be created to track transactions against that table. + +Audit log is enabled by default. To disable the audit log, set `logging.auditLog` to false in the config file, `harperdb-config.yaml`. Then restart Harper for those changes to take place. Note, the audit is required to be enabled for real-time messaging. + +### Audit Log Operations + +#### read\_audit\_log + +The `read_audit_log` operation is flexible, enabling users to query with many parameters. All operations search on a single table. Filter options include timestamps, usernames, and table hash values. Additional examples found in the [Harper API documentation](../../developers/operations-api/logs). + +**Search by Timestamp** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "timestamp", + "search_values": [ + 1660585740558 + ] +} +``` + +There are three outcomes using timestamp. + +* `"search_values": []` - All records returned for specified table +* `"search_values": [1660585740558]` - All records after provided timestamp +* `"search_values": [1660585740558, 1760585759710]` - Records "from" and "to" provided timestamp + +*** + +**Search by Username** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "username", + "search_values": [ + "admin" + ] +} +``` + +The above example will return all records whose `username` is "admin." + +*** + +**Search by Primary Key** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "hash_value", + "search_values": [ + 318 + ] +} +``` + +The above example will return all records whose primary key (`hash_value`) is 318. + +*** + +#### read\_audit\_log Response + +The example that follows provides records of operations performed on a table. One thing of note is that the `read_audit_log` operation gives you the `original_records`. + +```json +{ + "operation": "update", + "user_name": "HDB_ADMIN", + "timestamp": 1607035559122.277, + "hash_values": [ + 1, + 2 + ], + "records": [ + { + "id": 1, + "breed": "Muttzilla", + "age": 6, + "__updatedtime__": 1607035559122 + }, + { + "id": 2, + "age": 7, + "__updatedtime__": 1607035559121 + } + ], + "original_records": [ + { + "__createdtime__": 1607035556801, + "__updatedtime__": 1607035556801, + "age": 5, + "breed": "Mutt", + "id": 2, + "name": "Penny" + }, + { + "__createdtime__": 1607035556801, + "__updatedtime__": 1607035556801, + "age": 5, + "breed": "Mutt", + "id": 1, + "name": "Harper" + } + ] +} +``` + +#### delete\_audit\_logs\_before + +Just like with transaction logs, you can clean up your audit logs with the `delete_audit_logs_before` operation. It will delete audit log data according to the given parameters. The example below will delete records older than the timestamp provided. + +```json +{ + "operation": "delete_audit_logs_before", + "schema": "dev", + "table": "cat", + "timestamp": 1598290282817 +} +``` diff --git a/site/versioned_docs/version-4.4/administration/logging/index.md b/site/versioned_docs/version-4.4/administration/logging/index.md new file mode 100644 index 00000000..7a9588ce --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/logging/index.md @@ -0,0 +1,11 @@ +--- +title: Logging +--- + +# Logging + +Harper provides many different logging options for various features and functionality. + +* [Standard Logging](./standard-logging): Harper maintains a log of events that take place throughout operation. +* [Audit Logging](./audit-logging): Harper uses a standard Harper table to track transactions. For each table a user creates, a corresponding table will be created to track transactions against that table. +* [Transaction Logging](./transaction-logging): Harper stores a verbose history of all transactions logged for specified database tables, including original data records. diff --git a/site/versioned_docs/version-4.4/administration/logging/standard-logging.md b/site/versioned_docs/version-4.4/administration/logging/standard-logging.md new file mode 100644 index 00000000..7194fff4 --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/logging/standard-logging.md @@ -0,0 +1,65 @@ +--- +title: Standard Logging +--- + +# Standard Logging + +Harper maintains a log of events that take place throughout operation. Log messages can be used for diagnostics purposes as well as monitoring. + +All logs (except for the install log) are stored in the main log file in the hdb directory `/log/hdb.log`. The install log is located in the Harper application directory most likely located in your npm directory `npm/harperdb/logs`. + +Each log message has several key components for consistent reporting of events. A log message has a format of: + +``` + [] [] ...[]: +``` + +For example, a typical log entry looks like: + +``` +2023-03-09T14:25:05.269Z [notify] [main/0]: HarperDB successfully started. +``` + +The components of a log entry are: + +* timestamp - This is the date/time stamp when the event occurred +* level - This is an associated log level that gives a rough guide to the importance and urgency of the message. The available log levels in order of least urgent (and more verbose) are: `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. +* thread/ID - This reports the name of the thread and the thread ID that the event was reported on. Note that NATS logs are recorded by their process name and there is no thread id for them since they are a separate process. Key threads are: + * main - This is the thread that is responsible for managing all other threads and routes incoming requests to the other threads + * http - These are the worker threads that handle the primary workload of incoming HTTP requests to the operations API and custom functions. + * Clustering\* - These are threads and processes that handle replication. + * job - These are job threads that have been started to handle operations that are executed in a separate job thread. +* tags - Logging from a custom function will include a "custom-function" tag in the log entry. Most logs will not have any additional tags. +* message - This is the main message that was reported. + +We try to keep logging to a minimum by default, to do this the default log level is `error`. If you require more information from the logs, increasing the log level down will provide that. + +The log level can be changed by modifying `logging.level` in the config file `harperdb-config.yaml`. + +## Clustering Logging + +Harper clustering utilizes two [NATS](https:/nats.io/) servers, named Hub and Leaf. The Hub server is responsible for establishing the mesh network that connects instances of Harper and the Leaf server is responsible for managing the message stores (streams) that replicate and store messages between instances. Due to the verbosity of these servers there is a separate log level configuration for them. To adjust their log verbosity, set `clustering.logLevel` in the config file `harperdb-config.yaml`. Valid log levels from least verbose are `error`, `warn`, `info`, `debug` and `trace`. + +## Log File vs Standard Streams + +Harper logs can optionally be streamed to standard streams. Logging to standard streams (stdout/stderr) is primarily used for container logging drivers. For more traditional installations, we recommend logging to a file. Logging to both standard streams and to a file can be enabled simultaneously. To log to standard streams effectively, make sure to directly run `harperdb` and don't start it as a separate process (don't use `harperdb start`) and `logging.stdStreams` must be set to true. Note, logging to standard streams only will disable clustering catchup. + +## Logging Rotation + +Log rotation allows for managing log files, such as compressing rotated log files, archiving old log files, determining when to rotate, and the like. This will allow for organized storage and efficient use of disk space. For more information see “logging” in our [config docs](../../deployments/configuration). + +## Read Logs via the API + +To access specific logs you may query the Harper API. Logs can be queried using the `read_log` operation. `read_log` returns outputs from the log based on the provided search criteria. + +```json +{ + "operation": "read_log", + "start": 0, + "limit": 1000, + "level": "error", + "from": "2021-01-25T22:05:27.464+0000", + "until": "2021-01-25T23:05:27.464+0000", + "order": "desc" +} +``` diff --git a/site/versioned_docs/version-4.4/administration/logging/transaction-logging.md b/site/versioned_docs/version-4.4/administration/logging/transaction-logging.md new file mode 100644 index 00000000..48860fdd --- /dev/null +++ b/site/versioned_docs/version-4.4/administration/logging/transaction-logging.md @@ -0,0 +1,87 @@ +--- +title: Transaction Logging +--- + +# Transaction Logging + +Harper offers two options for logging transactions executed against a table. The options are similar but utilize different storage layers. + +## Transaction log + +The first option is `read_transaction_log`. The transaction log is built upon clustering streams. Clustering streams are per-table message stores that enable data to be propagated across a cluster. Harper leverages streams for use with the transaction log. When clustering is enabled all transactions that occur against a table are pushed to its stream, and thus make up the transaction log. + +If you would like to use the transaction log, but have not set up clustering yet, please see ["How to Cluster"](../../developers/clustering/). + +## Transaction Log Operations + +### read\_transaction\_log + +The `read_transaction_log` operation returns a prescribed set of records, based on given parameters. The example below will give a maximum of 2 records within the timestamps provided. + +```json +{ + "operation": "read_transaction_log", + "schema": "dev", + "table": "dog", + "from": 1598290235769, + "to": 1660249020865, + "limit": 2 +} +``` + +_See example response below._ + +### read\_transaction\_log Response + +```json +[ + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619736, + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38, + "__updatedtime__": 1660165619688, + "__createdtime__": 1660165619688 + } + ] + }, + { + "operation": "update", + "user": "admin", + "timestamp": 1660165620040, + "records": [ + { + "id": 1, + "dog_name": "Penny B", + "__updatedtime__": 1660165620036 + } + ] + } +] +``` + +_See example request above._ + +### delete\_transaction\_logs\_before + +The `delete_transaction_logs_before` operation will delete transaction log data according to the given parameters. The example below will delete records older than the timestamp provided. + +```json +{ + "operation": "delete_transaction_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1598290282817 +} +``` + +_Note: Streams are used for catchup if a node goes down. If you delete messages from a stream there is a chance catchup won't work._ + +Read on for `read_audit_log`, the second option, for logging transactions executed against a table. diff --git a/site/versioned_docs/version-4.4/deployments/_category_.json b/site/versioned_docs/version-4.4/deployments/_category_.json new file mode 100644 index 00000000..8fdd6e17 --- /dev/null +++ b/site/versioned_docs/version-4.4/deployments/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Deployments", + "position": 3, + "link": { + "type": "generated-index", + "title": "Deployments Documentation", + "description": "Installation and deployment guides for HarperDB", + "keywords": [ + "deployments" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/deployments/configuration.md b/site/versioned_docs/version-4.4/deployments/configuration.md new file mode 100644 index 00000000..5aeb3f8a --- /dev/null +++ b/site/versioned_docs/version-4.4/deployments/configuration.md @@ -0,0 +1,1072 @@ +--- +title: Configuration File +--- + +# Configuration File + +Harper is configured through a [YAML](https:/yaml.org/) file called `harperdb-config.yaml` located in the Harper root directory (by default this is a directory named `hdb` located in the home directory of the current user). + +Some configuration will be populated by default in the config file on install, regardless of whether it is used. + +*** + +## Using the Configuration File and Naming Conventions + +The configuration elements in `harperdb-config.yaml` use camelcase: `operationsApi`. + +To change a configuration value edit the `harperdb-config.yaml` file and save any changes. Harper must be restarted for changes to take effect. + +Alternately, configuration can be changed via environment and/or command line variables or via the API. To access lower level elements, use underscores to append parent/child elements (when used this way elements are case insensitive): + +``` +- Environment variables: `OPERATIONSAPI_NETWORK_PORT=9925` +- Command line variables: `--OPERATIONSAPI_NETWORK_PORT 9925` +- Calling `set_configuration` through the API: `operationsApi_network_port: 9925` +``` + +_Note: Component configuration cannot be added or updated via CLI or ENV variables._ + +## Importing installation configuration + +To use a custom configuration file to set values on install, use the CLI/ENV variable `HDB_CONFIG` and set it to the path of your custom configuration file. + +To install Harper overtop of an existing configuration file, set `HDB_CONFIG` to the root path of your install `/harperdb-config.yaml` + +*** + +## Configuration Options + +### `http` + +`sessionAffinity` - _Type_: string; _Default_: null + +Harper is a multi-threaded server designed to scale to utilize many CPU cores with high concurrency. Session affinity can help improve the efficiency and fairness of thread utilization by routing multiple requests from the same client to the same thread. This provides a fairer method of request handling by keeping a single user contained to a single thread, can improve caching locality (multiple requests from a single user are more likely to access the same data), and can provide the ability to share information in-memory in user sessions. Enabling session affinity will cause subsequent requests from the same client to be routed to the same thread. + +To enable `sessionAffinity`, you need to specify how clients will be identified from the incoming requests. If you are using Harper to directly serve HTTP requests from users from different remote addresses, you can use a setting of `ip`. However, if you are using Harper behind a proxy server or application server, all the remote ip addresses will be the same and Harper will effectively only run on a single thread. Alternately, you can specify a header to use for identification. If you are using basic authentication, you could use the "Authorization" header to route requests to threads by the user's credentials. If you have another header that uniquely identifies users/clients, you can use that as the value of sessionAffinity. But be careful to ensure that the value does provide sufficient uniqueness and that requests are effectively distributed to all the threads and fully utilizing all your CPU cores. + +```yaml +http: + sessionAffinity: ip +``` + +`compressionThreshold` - _Type_: number; _Default_: 1200 (bytes) + +For HTTP clients that support (Brotli) compression encoding, responses that are larger than than this threshold will be compressed (also note that for clients that accept compression, any streaming responses from queries are compressed as well, since the size is not known beforehand). + +```yaml +http: + compressionThreshold: 1200 +``` + +`cors` - _Type_: boolean; _Default_: true + +Enable Cross Origin Resource Sharing, which allows requests across a domain. + +`corsAccessList` - _Type_: array; _Default_: null + +An array of allowable domains with CORS + +`corsAccessControlAllowHeaders` - _Type_: string; _Default_: 'Accept, Content-Type, Authorization' + +A string representation of a comma separated list of header keys for the [Access-Control-Allow-Headers](https:/developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers) header for OPTIONS requests. + +`headersTimeout` - _Type_: integer; _Default_: 60,000 milliseconds (1 minute) + +Limit the amount of time the parser will wait to receive the complete HTTP headers with. + +`maxHeaderSize` - _Type_: integer; _Default_: 16394 + +The maximum allowed size of HTTP headers in bytes. + +`keepAliveTimeout` - _Type_: integer; _Default_: 30,000 milliseconds (30 seconds) + +Sets the number of milliseconds of inactivity the server needs to wait for additional incoming data after it has finished processing the last response. + +`port` - _Type_: integer; _Default_: 9926 + +The port used to access the component server. + +`securePort` - _Type_: integer; _Default_: null + +The port the Harper component server uses for HTTPS connections. This requires a valid certificate and key. + +`timeout` - _Type_: integer; _Default_: Defaults to 120,000 milliseconds (2 minutes) + +The length of time in milliseconds after which a request will timeout. + +```yaml +http: + cors: true + corsAccessList: + - null + headersTimeout: 60000 + maxHeaderSize: 8192 + https: false + keepAliveTimeout: 30000 + port: 9926 + securePort: null + timeout: 120000 +``` + +`mlts` - _Type_: boolean | object; _Default_: false + +This can be configured to enable mTLS based authentication for incoming connections. If enabled with default options (by setting to `true`), the client certificate will be checked against the certificate authority specified with `tls.certificateAuthority`. And if the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. + +You can also define specific mTLS options by specifying an object for mtls with the following (optional) properties which may be included: + +`user` - _Type_: string; _Default_: Common Name + +This configures a specific username to authenticate as for mTLS connections. If a `user` is defined, any authorized mTLS connection (that authorizes against the certificate authority) will be authenticated as this user. This can also be set to `null`, which indicates that no authentication is performed based on the mTLS authorization. When combined with `required: true`, this can be used to enforce that users must have authorized mTLS _and_ provide credential-based authentication. + +`required` - _Type_: boolean; _Default_: false + +This can be enabled to require client certificates (mTLS) for all incoming MQTT connections. If enabled, any connection that doesn't provide an authorized certificate will be rejected/closed. By default, this is disabled, and authentication can take place with mTLS _or_ standard credential authentication. + +```yaml +http: + mtls: true +``` + +or + +```yaml +http: + mtls: + required: true + user: user-name +``` + +*** + +### `threads` + +The `threads` provides control over how many threads, how much heap memory they may use, and debugging of the threads: + +`count` - _Type_: number; _Default_: One less than the number of logical cores/processors + +The `threads.count` option specifies the number of threads that will be used to service the HTTP requests for the operations API and custom functions. Generally, this should be close to the number of CPU logical cores/processors to ensure the CPU is fully utilized (a little less because Harper does have other threads at work), assuming Harper is the main service on a server. + +```yaml +threads: + count: 11 +``` + +`debug` - _Type_: boolean | object; _Default_: false + +This enables debugging. If simply set to true, this will enable debugging on the main thread on port 9229 with the 127.0.0.1 host interface. This can also be an object for more debugging control. + +`debug.port` - The port to use for debugging the main thread `debug.startingPort` - This will set up a separate port for debugging each thread. This is necessary for debugging individual threads with devtools. `debug.host` - Specify the host interface to listen on `debug.waitForDebugger` - Wait for debugger before starting + +```yaml +threads: + debug: + port: 9249 +``` + +`maxHeapMemory` - _Type_: number; + +```yaml +threads: + maxHeapMemory: 300 +``` + +This specifies the heap memory limit for each thread, in megabytes. The default heap limit is a heuristic based on available memory and thread count. + +*** + +### `replication` + +The `replication` section configures [Harper replication](../developers/replication/), which is used to create Harper clusters and replicate data between the instances. + +```yaml +replication: + hostname: server-one + url: wss:/server-one:9925 + databases: "*" + routes: + - wss:/server-two:9925 + port: null + securePort: 9925, + enableRootCAs: true +``` + +`hostname` - _Type_: string; + +The hostname of the current Harper instance. + +`url` - _Type_: string; + +The URL of the current Harper instance. + +`databases` - _Type_: string/array; _Default_: "\*" (all databases) + +Configure which databases to replicate. This can be a string for all database or an array for specific databases. + +```yaml +replication: + databases: + - db1 + - db2 +``` + +`routes` - _Type_: array; + +An array of routes to connect to other nodes. Each element in the array can be either a string or an object with `hostname`, `port` and optionally `startTime` properties. + +`startTime` - _Type_: string; ISO formatted UTC date string. + +Replication will attempt to catch up on all remote data upon setup. To start replication from a specific date, set this property. + +```yaml +replication: + copyTablesToCatchUp: true + hostname: server-one + routes: + - wss:/server-two:9925 # URL based route + - hostname: server-three # define a hostname and port + port: 9930 + startTime: 2024-02-06T15:30:00Z +``` + +`port` - _Type_: integer; _Default_: 9925 (the operations API port `operationsApi.port`) + +The port to use for replication connections. + +`securePort` - _Type_: integer; + +The port to use for secure replication connections. + +`enableRootCAs` - _Type_: boolean; _Default_: true + +When true, Harper will verify certificates against the Node.js bundled CA store. The bundled CA store is a snapshot of the Mozilla CA store that is fixed at release time. + +`copyTablesToCatchUp` - _Type_: boolean; _Default_: true + +Replication will first attempt to catch up using the audit log. If unsuccessful, it will perform a full table copy. When set to `false`, replication will only use the audit log. + +*** + +### `clustering` using NATS + +The `clustering` section configures the NATS clustering engine, this is used to replicate data between instances of Harper. + +_Note: There exist two ways to create clusters and replicate data in Harper. One option is to use native Harper replication over Websockets. The other option is to use_ [_NATS_](https:/nats.io/about/) _to facilitate the cluster._ + +Clustering offers a lot of different configurations, however in a majority of cases the only options you will need to pay attention to are: + +* `clustering.enabled` Enable the clustering processes. +* `clustering.hubServer.cluster.network.port` The port other nodes will connect to. This port must be accessible from other cluster nodes. +* `clustering.hubServer.cluster.network.routes`The connections to other instances. +* `clustering.nodeName` The name of your node, must be unique within the cluster. +* `clustering.user` The name of the user credentials used for Inter-node authentication. + +`enabled` - _Type_: boolean; _Default_: false + +Enable clustering. + +_Note: If you enabled clustering but do not create and add a cluster user you will get a validation error. See `user` description below on how to add a cluster user._ + +```yaml +clustering: + enabled: true +``` + +`clustering.hubServer.cluster` + +Clustering’s `hubServer` facilitates the Harper mesh network and discovery service. + +```yaml +clustering: + hubServer: + cluster: + name: harperdb + network: + port: 9932 + routes: + - host: 3.62.184.22 + port: 9932 + - host: 3.735.184.8 + port: 9932 +``` + +`name` - _Type_: string, _Default_: harperdb + +The name of your cluster. This name needs to be consistent for all other nodes intended to be meshed in the same network. + +`port` - _Type_: integer, _Default_: 9932 + +The port the hub server uses to accept cluster connections + +`routes` - _Type_: array, _Default_: null + +An object array that represent the host and port this server will cluster to. Each object must have two properties `port` and `host`. Multiple entries can be added to create network resiliency in the event one server is unavailable. Routes can be added, updated and removed either by directly editing the `harperdb-config.yaml` file or by using the `cluster_set_routes` or `cluster_delete_routes` API endpoints. + +`host` - _Type_: string + +The host of the remote instance you are creating the connection with. + +`port` - _Type_: integer + +The port of the remote instance you are creating the connection with. This is likely going to be the `clustering.hubServer.cluster.network.port` on the remote instance. + +`clustering.hubServer.leafNodes` + +```yaml +clustering: + hubServer: + leafNodes: + network: + port: 9931 +``` + +`port` - _Type_: integer; _Default_: 9931 + +The port the hub server uses to accept leaf server connections. + +`clustering.hubServer.network` + +```yaml +clustering: + hubServer: + network: + port: 9930 +``` + +`port` - _Type_: integer; _Default_: 9930 + +Use this port to connect a client to the hub server, for example using the NATs SDK to interact with the server. + +`clustering.leafServer` + +Manages streams, streams are ‘message stores’ that store table transactions. + +```yaml +clustering: + leafServer: + network: + port: 9940 + routes: + - host: 3.62.184.22 + port: 9931 + - host: node3.example.com + port: 9931 + streams: + maxAge: 3600 + maxBytes: 10000000 + maxMsgs: 500 + path: /user/hdb/clustering/leaf +``` + +`port` - _Type_: integer; _Default_: 9940 + +Use this port to connect a client to the leaf server, for example using the NATs SDK to interact with the server. + +`routes` - _Type_: array; _Default_: null + +An object array that represent the host and port the leaf node will directly connect with. Each object must have two properties `port` and `host`. Unlike the hub server, the leaf server will establish connections to all listed hosts. Routes can be added, updated and removed either by directly editing the `harperdb-config.yaml` file or by using the `cluster_set_routes` or `cluster_delete_routes` API endpoints. + +`host` - _Type_: string + +The host of the remote instance you are creating the connection with. + +`port` - _Type_: integer + +The port of the remote instance you are creating the connection with. This is likely going to be the `clustering.hubServer.cluster.network.port` on the remote instance. + +`clustering.leafServer.streams` + +`maxAge` - _Type_: integer; _Default_: null + +The maximum age of any messages in the stream, expressed in seconds. + +`maxBytes` - _Type_: integer; _Default_: null + +The maximum size of the stream in bytes. Oldest messages are removed if the stream exceeds this size. + +`maxMsgs` - _Type_: integer; _Default_: null + +How many messages may be in a stream. Oldest messages are removed if the stream exceeds this number. + +`path` - _Type_: string; _Default_: \/clustering/leaf + +The directory where all the streams are kept. + +```yaml +clustering: + leafServer: + streams: + maxConsumeMsgs: 100 + maxIngestThreads: 2 +``` + +`maxConsumeMsgs` - _Type_: integer; _Default_: 100 + +The maximum number of messages a consumer can process in one go. + +`maxIngestThreads` - _Type_: integer; _Default_: 2 + +The number of Harper threads that are delegated to ingesting messages. + +*** + +`logLevel` - _Type_: string; _Default_: error + +Control the verbosity of clustering logs. + +```yaml +clustering: + logLevel: error +``` + +There exists a log level hierarchy in order as `trace`, `debug`, `info`, `warn`, and `error`. When the level is set to `trace` logs will be created for all possible levels. Whereas if the level is set to `warn`, the only entries logged will be `warn` and `error`. The default value is `error`. + +`nodeName` - _Type_: string; _Default_: null + +The name of this node in your Harper cluster topology. This must be a value unique from the rest of the cluster node names. + +_Note: If you want to change the node name make sure there are no subscriptions in place before doing so. After the name has been changed a full restart is required._ + +```yaml +clustering: + nodeName: great_node +``` + +`tls` + +Transport Layer Security default values are automatically generated on install. + +```yaml +clustering: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem + insecure: true + verify: true +``` + +`certificate` - _Type_: string; _Default_: \/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: \/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: \/keys/privateKey.pem + +Path to the private key file. + +`insecure` - _Type_: boolean; _Default_: true + +When true, will skip certificate verification. For use only with self-signed certs. + +`republishMessages` - _Type_: boolean; _Default_: false + +When true, all transactions that are received from other nodes are republished to this node's stream. When subscriptions are not fully connected between all nodes, this ensures that messages are routed to all nodes through intermediate nodes. This also ensures that all writes, whether local or remote, are written to the NATS transaction log. However, there is additional overhead with republishing, and setting this is to false can provide better data replication performance. When false, you need to ensure all subscriptions are fully connected between every node to every other node, and be aware that the NATS transaction log will only consist of local writes. + +`verify` - _Type_: boolean; _Default_: true + +When true, hub server will verify client certificate using the CA certificate. + +*** + +`user` - _Type_: string; _Default_: null + +The username given to the `cluster_user`. All instances in a cluster must use the same clustering user credentials (matching username and password). + +Inter-node authentication takes place via a special Harper user role type called `cluster_user`. + +The user can be created either through the API using an `add_user` request with the role set to `cluster_user`, or on install using environment variables `CLUSTERING_USER=cluster_person` `CLUSTERING_PASSWORD=pass123!` or CLI variables `harperdb --CLUSTERING_USER cluster_person` `--CLUSTERING_PASSWORD` `pass123!` + +```yaml +clustering: + user: cluster_person +``` + +*** + +### `localStudio` + +The `localStudio` section configures the local Harper Studio, a GUI for Harper hosted on the server. A hosted version of the Harper Studio with licensing and provisioning options is available at https:/studio.harperdb.io. Note, all database traffic from either `localStudio` or Harper Studio is made directly from your browser to the instance. + +`enabled` - _Type_: boolean; _Default_: false + +Enabled the local studio or not. + +```yaml +localStudio: + enabled: false +``` + +*** + +### `logging` + +The `logging` section configures Harper logging across all Harper functionality. This includes standard text logging of application and database events as well as structured data logs of record changes. Logging of application/database events are logged in text format to the `~/hdb/log/hdb.log` file (or location specified by `logging.root`). + +In addition, structured logging of data changes are also available: + +`auditLog` - _Type_: boolean; _Default_: false + +Enabled table transaction logging. + +```yaml +logging: + auditLog: false +``` + +To access the audit logs, use the API operation `read_audit_log`. It will provide a history of the data, including original records and changes made, in a specified table. + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog" +} +``` + +`file` - _Type_: boolean; _Default_: true + +Defines whether to log to a file. + +```yaml +logging: + file: true +``` + +`auditRetention` - _Type_: string|number; _Default_: 3d + +This specifies how long audit logs should be retained. + +`level` - _Type_: string; _Default_: warn + +Control the verbosity of text event logs. + +```yaml +logging: + level: warn +``` + +There exists a log level hierarchy in order as `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. When the level is set to `trace` logs will be created for all possible levels. Whereas if the level is set to `fatal`, the only entries logged will be `fatal` and `notify`. The default value is `error`. + +`console` - _Type_: boolean; _Default_: true + +Controls whether console.log and other console.\* calls (as well as another JS components that writes to `process.stdout` and `process.stderr`) are logged to the log file. By default, these are logged to the log file, but this can be disabled. + +```yaml +logging: + console: true +``` + +`root` - _Type_: string; _Default_: \/log + +The path where the log files will be written. + +```yaml +logging: + root: ~/hdb/log +``` + +`rotation` + +Rotation provides the ability for a user to systematically rotate and archive the `hdb.log` file. To enable `interval` and/or `maxSize` must be set. + +_**Note:**_ `interval` and `maxSize` are approximates only. It is possible that the log file will exceed these values slightly before it is rotated. + +```yaml +logging: + rotation: + enabled: true + compress: false + interval: 1D + maxSize: 100K + path: /user/hdb/log +``` + +`enabled` - _Type_: boolean; _Default_: false + +Enables logging rotation. + +`compress` - _Type_: boolean; _Default_: false + +Enables compression via gzip when logs are rotated. + +`interval` - _Type_: string; _Default_: null + +The time that should elapse between rotations. Acceptable units are D(ays), H(ours) or M(inutes). + +`maxSize` - _Type_: string; _Default_: null + +The maximum size the log file can reach before it is rotated. Must use units M(egabyte), G(igabyte), or K(ilobyte). + +`path` - _Type_: string; _Default_: \/log + +Where to store the rotated log file. File naming convention is `HDB-YYYY-MM-DDT-HH-MM-SSSZ.log`. + +`stdStreams` - _Type_: boolean; _Default_: false + +Log Harper logs to the standard output and error streams. + +```yaml +logging: + stdStreams: false +``` + +`auditAuthEvents` + +`logFailed` - _Type_: boolean; _Default_: false + +Log all failed authentication events. + +_Example:_ `[error] [auth-event]: {"username":"admin","status":"failure","type":"authentication","originating_ip":"127.0.0.1","request_method":"POST","path":"/","auth_strategy":"Basic"}` + +`logSuccessful` - _Type_: boolean; _Default_: false + +Log all successful authentication events. + +_Example:_ `[notify] [auth-event]: {"username":"admin","status":"success","type":"authentication","originating_ip":"127.0.0.1","request_method":"POST","path":"/","auth_strategy":"Basic"}` + +```yaml +logging: + auditAuthEvents: + logFailed: false + logSuccessful: false +``` + +*** + +### `authentication` + +The authentication section defines the configuration for the default authentication mechanism in Harper. + +```yaml +authentication: + authorizeLocal: true + cacheTTL: 30000 + enableSessions: true + operationTokenTimeout: 1d + refreshTokenTimeout: 30d +``` + +`authorizeLocal` - _Type_: boolean; _Default_: true + +This will automatically authorize any requests from the loopback IP address as the superuser. This should be disabled for any Harper servers that may be accessed by untrusted users from the same instance. For example, this should be disabled if you are using a local proxy, or for general server hardening. + +`cacheTTL` - _Type_: number; _Default_: 30000 + +This defines the length of time (in milliseconds) that an authentication (a particular Authorization header or token) can be cached. + +`enableSessions` - _Type_: boolean; _Default_: true + +This will enable cookie-based sessions to maintain an authenticated session. This is generally the preferred mechanism for maintaining authentication in web browsers as it allows cookies to hold an authentication token securely without giving JavaScript code access to token/credentials that may open up XSS vulnerabilities. + +`operationTokenTimeout` - _Type_: string; _Default_: 1d + +Defines the length of time an operation token will be valid until it expires. Example values: https:/github.com/vercel/ms. + +`refreshTokenTimeout` - _Type_: string; _Default_: 1d + +Defines the length of time a refresh token will be valid until it expires. Example values: https:/github.com/vercel/ms. + +### `operationsApi` + +The `operationsApi` section configures the Harper Operations API.\ +All the `operationsApi` configuration is optional. Any configuration that is not provided under this section will default to the `http` configuration section. + +`network` + +```yaml +operationsApi: + network: + cors: true + corsAccessList: + - null + domainSocket: /user/hdb/operations-server + headersTimeout: 60000 + keepAliveTimeout: 5000 + port: 9925 + securePort: null + timeout: 120000 +``` + +`cors` - _Type_: boolean; _Default_: true + +Enable Cross Origin Resource Sharing, which allows requests across a domain. + +`corsAccessList` - _Type_: array; _Default_: null + +An array of allowable domains with CORS + +`domainSocket` - _Type_: string; _Default_: \/hdb/operations-server + +The path to the Unix domain socket used to provide the Operations API through the CLI + +`headersTimeout` - _Type_: integer; _Default_: 60,000 milliseconds (1 minute) + +Limit the amount of time the parser will wait to receive the complete HTTP headers with. + +`keepAliveTimeout` - _Type_: integer; _Default_: 5,000 milliseconds (5 seconds) + +Sets the number of milliseconds of inactivity the server needs to wait for additional incoming data after it has finished processing the last response. + +`port` - _Type_: integer; _Default_: 9925 + +The port the Harper operations API interface will listen on. + +`securePort` - _Type_: integer; _Default_: null + +The port the Harper operations API uses for HTTPS connections. This requires a valid certificate and key. + +`timeout` - _Type_: integer; _Default_: Defaults to 120,000 milliseconds (2 minutes) + +The length of time in milliseconds after which a request will timeout. + +`tls` + +This configures the Transport Layer Security for HTTPS support. + +```yaml +operationsApi: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`certificate` - _Type_: string; _Default_: \/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: \/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: \/keys/privateKey.pem + +Path to the private key file. + +*** + +### `componentsRoot` + +`componentsRoot` - _Type_: string; _Default_: \/components + +The path to the folder containing the local component files. + +```yaml +componentsRoot: ~/hdb/components +``` + +*** + +### `rootPath` + +`rootPath` - _Type_: string; _Default_: home directory of the current user + +The Harper database and applications/API/interface are decoupled from each other. The `rootPath` directory specifies where the Harper application persists data, config, logs, and Custom Functions. + +```yaml +rootPath: /Users/jonsnow/hdb +``` + +*** + +### `storage` + +`writeAsync` - _Type_: boolean; _Default_: false + +The `writeAsync` option turns off disk flushing/syncing, allowing for faster write operation throughput. However, this does not provide storage integrity guarantees, and if a server crashes, it is possible that there may be data loss requiring restore from another backup/another node. + +```yaml +storage: + writeAsync: false +``` + +`caching` - _Type_: boolean; _Default_: true + +The `caching` option enables in-memory caching of records, providing faster access to frequently accessed objects. This can incur some extra overhead for situations where reads are extremely random and don't benefit from caching. + +```yaml +storage: + caching: true +``` + +`compression` - _Type_: boolean; _Default_: true + +The `compression` option enables compression of records in the database. This can be helpful for very large records in reducing storage requirements and potentially allowing more data to be cached. This uses the very fast LZ4 compression algorithm, but this still incurs extra costs for compressing and decompressing. + +```yaml +storage: + compression: false +``` + +`compression.dictionary` _Type_: number; _Default_: null + +Path to a compression dictionary file + +`compression.threshold` _Type_: number; _Default_: Either `4036` or if `storage.pageSize` provided `storage.pageSize - 60` + +Only entries that are larger than this value (in bytes) will be compressed. + +```yaml +storage: + compression: + dictionary: /users/harperdb/dict.txt + threshold: 1000 +``` + +`compactOnStart` - _Type_: boolean; _Default_: false + +When `true` all non-system databases will be compacted when starting Harper, read more [here](../administration/compact). + +`compactOnStartKeepBackup` - _Type_: boolean; _Default_: false + +Keep the backups made by compactOnStart. + +```yaml +storage: + compactOnStart: true + compactOnStartKeepBackup: false +``` + +`maxTransactionQueueTime` - _Type_: time; _Default_: 45s + +The `maxTransactionQueueTime` specifies how long the write queue can get before write requests are rejected (with a 503). + +```yaml +storage: + maxTransactionQueueTime: 2m +``` + +`noReadAhead` - _Type_: boolean; _Default_: false + +The `noReadAhead` option advises the operating system to not read ahead when reading from the database. This provides better memory utilization for databases with small records (less than one page), but can degrade performance in situations where large records are used or frequent range queries are used. + +```yaml +storage: + noReadAhead: true +``` + +`prefetchWrites` - _Type_: boolean; _Default_: true + +The `prefetchWrites` option loads data prior to write transactions. This should be enabled for databases that are larger than memory (although it can be faster to disable this for smaller databases). + +```yaml +storage: + prefetchWrites: true +``` + +`path` - _Type_: string; _Default_: `/schema` + +The `path` configuration sets where all database files should reside. + +```yaml +storage: + path: /users/harperdb/storage +``` + +_**Note:**_ This configuration applies to all database files, which includes system tables that are used internally by Harper. For this reason if you wish to use a non default `path` value you must move any existing schemas into your `path` location. Existing schemas is likely to include the system schema which can be found at `/schema/system`. + +`pageSize` - _Type_: number; _Default_: Defaults to the default page size of the OS + +Defines the page size of the database. + +```yaml +storage: + pageSize: 4096 +``` + +*** + +### `tls` + +The section defines the certificates, keys, and settings for Transport Layer Security (TLS) for HTTPS and TLS socket support. This is used for both the HTTP and MQTT protocols. The `tls` section can be a single object with the settings below, or it can be an array of objects, where each object is a separate TLS configuration. By using an array, the TLS configuration can be used to define multiple certificates for different domains/hosts (negotiated through SNI). + +```yaml +tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`certificate` - _Type_: string; _Default_: \/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: \/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: \/keys/privateKey.pem + +Path to the private key file. + +`ciphers` - _Type_: string; + +Allows specific ciphers to be set. + +If you want to define multiple certificates that are applied based on the domain/host requested via SNI, you can define an array of TLS configurations. Each configuration can have the same properties as the root TLS configuration, but can (optionally) also have an additional `host` property to specify the domain/host that the certificate should be used for: + +```yaml +tls: + - certificate: ~/hdb/keys/certificate1.pem + certificateAuthority: ~/hdb/keys/ca1.pem + privateKey: ~/hdb/keys/privateKey1.pem + host: example.com # the host is optional, and if not provided, this certificate's common name will be used as the host name. + - certificate: ~/hdb/keys/certificate2.pem + certificateAuthority: ~/hdb/keys/ca2.pem + privateKey: ~/hdb/keys/privateKey2.pem + +``` + +Note that a `tls` section can also be defined in the `operationsApi` section, which will override the root `tls` section for the operations API. + +*** + +### `mqtt` + +The MQTT protocol can be configured in this section. + +```yaml +mqtt: + network: + port: 1883 + securePort: 8883 + mtls: false + webSocket: true + requireAuthentication: true +``` + +`port` - _Type_: number; _Default_: 1883 + +This is the port to use for listening for insecure MQTT connections. + +`securePort` - _Type_: number; _Default_: 8883 + +This is the port to use for listening for secure MQTT connections. This will use the `tls` configuration for certificates. + +`webSocket` - _Type_: boolean; _Default_: true + +This enables access to MQTT through WebSockets. This will handle WebSocket connections on the http port (defaults to 9926), that have specified a (sub) protocol of `mqtt`. + +`requireAuthentication` - _Type_: boolean; _Default_: true + +This indicates if authentication should be required for establishing an MQTT connection (whether through MQTT connection credentials or mTLS). Disabling this allows unauthenticated connections, which are then subject to authorization for publishing and subscribing (and by default tables/resources do not authorize such access, but that can be enabled at the resource level). + +`mlts` - _Type_: boolean | object; _Default_: false + +This can be configured to enable mTLS based authentication for incoming connections. If enabled with default options (by setting to `true`), the client certificate will be checked against the certificate authority specified in the `tls` section. And if the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. + +You can also define specific mTLS options by specifying an object for mtls with the following (optional) properties which may be included: + +`user` - _Type_: string; _Default_: Common Name + +This configures a specific username to authenticate as for mTLS connections. If a `user` is defined, any authorized mTLS connection (that authorizes against the certificate authority) will be authenticated as this user. This can also be set to `null`, which indicates that no authentication is performed based on the mTLS authorization. When combined with `required: true`, this can be used to enforce that users must have authorized mTLS _and_ provide credential-based authentication. + +`required` - _Type_: boolean; _Default_: false + +This can be enabled to require client certificates (mTLS) for all incoming MQTT connections. If enabled, any connection that doesn't provide an authorized certificate will be rejected/closed. By default, this is disabled, and authentication can take place with mTLS _or_ standard credential authentication. + +`certificateAuthority` - _Type_: string; _Default_: Path from `tls.certificateAuthority` + +This can define a specific path to use for the certificate authority. By default, certificate authorization checks against the CA specified at `tls.certificateAuthority`, but if you need a specific/distinct CA for MQTT, you can set this. + +For example, you could specify that mTLS is required and will authenticate as "user-name": + +```yaml +mqtt: + network: + mtls: + user: user-name + required: true +``` + +*** + +### `databases` + +The `databases` section is an optional configuration that can be used to define where database files should reside down to the table level. This configuration should be set before the database and table have been created. The configuration will not create the directories in the path, that must be done by the user. + +To define where a database and all its tables should reside use the name of your database and the `path` parameter. + +```yaml +databases: + nameOfDatabase: + path: /path/to/database +``` + +To define where specific tables within a database should reside use the name of your database, the `tables` parameter, the name of your table and the `path` parameter. + +```yaml +databases: + nameOfDatabase: + tables: + nameOfTable: + path: /path/to/table +``` + +This same pattern can be used to define where the audit log database files should reside. To do this use the `auditPath` parameter. + +```yaml +databases: + nameOfDatabase: + auditPath: /path/to/database +``` + +**Setting the database section through the command line, environment variables or API** + +When using command line variables,environment variables or the API to configure the databases section a slightly different convention from the regular one should be used. To add one or more configurations use a JSON object array. + +Using command line variables: + +```bash +--DATABASES [{\"nameOfSchema\":{\"tables\":{\"nameOfTable\":{\"path\":\"\/path\/to\/table\"}}}}] +``` + +Using environment variables: + +```bash +DATABASES=[{"nameOfSchema":{"tables":{"nameOfTable":{"path":"/path/to/table"}}}}] +``` + +Using the API: + +```json +{ + "operation": "set_configuration", + "databases": [{ + "nameOfDatabase": { + "tables": { + "nameOfTable": { + "path": "/path/to/table" + } + } + } + }] +} +``` + +*** + +### Components + +`` - _Type_: string + +The name of the component. This will be used to name the folder where the component is installed and must be unique. + +`package` - _Type_: string + +A reference to your [component](../developers/components/managing#adding-components-to-root) package. This could be a remote git repo, a local folder/file or an NPM package. Harper will add this package to a package.json file and call `npm install` on it, so any reference that works with that paradigm will work here. + +Read more about npm install [here](https:/docs.npmjs.com/cli/v8/commands/npm-install) + +`port` - _Type_: number _Default_: whatever is set in `http.port` + +The port that your component should listen on. If no port is provided it will default to `http.port` + +```yaml +: + package: 'HarperDB-Add-Ons/package-name' + port: 4321 +``` diff --git a/site/versioned_docs/version-4.4/deployments/harper-cli.md b/site/versioned_docs/version-4.4/deployments/harper-cli.md new file mode 100644 index 00000000..91240516 --- /dev/null +++ b/site/versioned_docs/version-4.4/deployments/harper-cli.md @@ -0,0 +1,194 @@ +--- +title: Harper CLI +--- + +# Harper CLI + +## Harper CLI + +The Harper command line interface (CLI) is used to administer [self-installed Harper instances](./install-harper/). + +### Installing Harper + +To install Harper with CLI prompts, run the following command: + +```bash +harperdb install +``` + +Alternatively, Harper installations can be automated with environment variables or command line arguments; [see a full list of configuration parameters here](./configuration#using-the-configuration-file-and-naming-conventions). Note, when used in conjunction, command line arguments will override environment variables. + +**Environment Variables** + +```bash +#minimum required parameters for no additional CLI prompts +export TC_AGREEMENT=yes +export HDB_ADMIN_USERNAME=HDB_ADMIN +export HDB_ADMIN_PASSWORD=password +export ROOTPATH=/tmp/hdb/ +export OPERATIONSAPI_NETWORK_PORT=9925 +harperdb install +``` + +**Command Line Arguments** + +```bash +#minimum required parameters for no additional CLI prompts +harperdb install --TC_AGREEMENT yes --HDB_ADMIN_USERNAME HDB_ADMIN --HDB_ADMIN_PASSWORD password --ROOTPATH /tmp/hdb/ --OPERATIONSAPI_NETWORK_PORT 9925 +``` + +*** + +### Starting Harper + +To start Harper after it is installed, run the following command: + +```bash +harperdb start +``` + +*** + +### Stopping Harper + +To stop Harper once it is running, run the following command: + +```bash +harperdb stop +``` + +*** + +### Restarting Harper + +To restart Harper once it is running, run the following command: + +```bash +harperdb restart +``` + +*** + +### Getting the Harper Version + +To check the version of Harper that is installed run the following command: + +```bash +harperdb version +``` + +*** + +### Renew self-signed certificates + +To renew the Harper generated self-signed certificates, run: + +```bash +harperdb renew-certs +``` + +*** + +### Copy a database with compaction + +To copy a Harper database with compaction (to eliminate free-space and fragmentation), use + +```bash +harperdb copy-db +``` + +For example, to copy the default database: + +```bash +harperdb copy-db data /home/user/hdb/database/copy.mdb +``` + +*** + +### Get all available CLI commands + +To display all available Harper CLI commands along with a brief description run: + +```bash +harperdb help +``` + +*** + +### Get the status of Harper and clustering + +To display the status of the Harper process, the clustering hub and leaf processes, the clustering network and replication statuses, run: + +```bash +harperdb status +``` + +*** + +### Backups + +Harper uses a transactional commit process that ensures that data on disk is always transactionally consistent with storage. This means that Harper maintains database integrity in the event of a crash. It also means that you can use any standard volume snapshot tool to make a backup of a Harper database. Database files are stored in the hdb/database directory. As long as the snapshot is an atomic snapshot of these database files, the data can be copied/moved back into the database directory to restore a previous backup (with Harper shut down) , and database integrity will be preserved. Note that simply copying an in-use database file (using `cp`, for example) is _not_ a snapshot, and this would progressively read data from the database at different points in time, which yields unreliable copy that likely will not be usable. Standard copying is only reliable for a database file that is not in use. + +*** + +## Operations API through the CLI + +Some of the API operations are available through the CLI, this includes most operations that do not require nested parameters. To call the operation use the following convention: ` =`. By default, the result will be formatted as YAML, if you would like the result in JSON pass: `json=true`. + +Some examples are: + +```bash +$ harperdb describe_table database=dev table=dog + +schema: dev +name: dog +hash_attribute: id +audit: true +schema_defined: false +attributes: + - attribute: id + is_primary_key: true + - attribute: name + indexed: true +clustering_stream_name: 3307bb542e0081253klnfd3f1cf551b +record_count: 10 +last_updated_record: 1724483231970.9949 +``` + +`harperdb set_configuration logging_level=error` + +`harperdb deploy_component project=my-cool-app package=https:/github.com/HarperDB/application-template` + +`harperdb get_components` + +`harperdb search_by_id database=dev table=dog ids='["1"]' get_attributes='["*"]' json=true` + +`harperdb search_by_value table=dog search_attribute=name search_value=harper get_attributes='["id", "name"]'` + +`harperdb sql sql='select * from dev.dog where id="1"'` + +### Remote Operations + +The CLI can also be used to run operations on remote Harper instances. To do this, pass the `target` parameter with the HTTP address of the remote instance. You generally will also need to provide credentials and specify the `username` and `password` parameters, or you can set environment variables `CLI_TARGET_USERNAME` and `CLI_TARGET_PASSWORD`, for example: + +```bash +export CLI_TARGET_USERNAME=HDB_ADMIN +export CLI_TARGET_PASSWORD=password +harperdb describe_database database=dev target=https:/server.com:9925 +``` + +The same set of operations API are available for remote operations as well. + +#### Remote Component Deployment + +When using remote operations, you can deploy a local component to the remote instance. If you omit the `package` parameter, you can deploy the current directory. This will package the current directory and send it to the target server (also `deploy` is allowed as an alias to `deploy_component`): + +```bash +harperdb deploy target=https:/server.com:9925 +``` + +If you are interacting with a cluster, you may wish to include the `replicated=true` parameter to ensure that the deployment operation is replicated to all nodes in the cluster. You will also need to restart afterwards to apply the changes (here seen with the replicated parameter): + +```bash +harperdb restart target=https:/server.com:9925 replicated=true +``` diff --git a/site/versioned_docs/version-4.4/deployments/harper-cloud/alarms.md b/site/versioned_docs/version-4.4/deployments/harper-cloud/alarms.md new file mode 100644 index 00000000..72b4e7a7 --- /dev/null +++ b/site/versioned_docs/version-4.4/deployments/harper-cloud/alarms.md @@ -0,0 +1,20 @@ +--- +title: Alarms +--- + +# Alarms + +Harper Cloud instance alarms are triggered when certain conditions are met. Once alarms are triggered organization owners will immediately receive an email alert and the alert will be available on the [Instance Configuration](../../administration/harper-studio/instance-configuration) page. The below table describes each alert and their evaluation metrics. + +### Heading Definitions + +* **Alarm**: Title of the alarm. +* **Threshold**: Definition of the alarm threshold. +* **Intervals**: The number of occurrences before an alarm is triggered and the period that the metric is evaluated over. +* **Proposed Remedy**: Recommended solution to avoid the alert in the future. + +| Alarm | Threshold | Intervals | Proposed Remedy | +| ------- | ---------- | --------- | ------------------------------------------------------------------------------------------------------------------------------ | +| Storage | > 90% Disk | 1 x 5min | [Increased storage volume](../../administration/harper-studio/instance-configuration#update-instance-storage) | +| CPU | > 90% Avg | 2 x 5min | [Increase instance size for additional CPUs](../../administration/harper-studio/instance-configuration#update-instance-ram) | +| Memory | > 90% RAM | 2 x 5min | [Increase instance size](../../administration/harper-studio/instance-configuration#update-instance-ram) | diff --git a/site/versioned_docs/version-4.4/deployments/harper-cloud/index.md b/site/versioned_docs/version-4.4/deployments/harper-cloud/index.md new file mode 100644 index 00000000..fbf2d81e --- /dev/null +++ b/site/versioned_docs/version-4.4/deployments/harper-cloud/index.md @@ -0,0 +1,9 @@ +--- +title: Harper Cloud +--- + +# Harper Cloud + +[Harper Cloud](https:/studio.harperdb.io/) is the easiest way to test drive Harper, it’s Harper-as-a-Service. Cloud handles deployment and management of your instances in just a few clicks. Harper Cloud is currently powered by AWS with additional cloud providers on our roadmap for the future. + +You can create a new Harper Cloud instance in the Harper Studio. diff --git a/site/versioned_docs/version-4.4/deployments/harper-cloud/instance-size-hardware-specs.md b/site/versioned_docs/version-4.4/deployments/harper-cloud/instance-size-hardware-specs.md new file mode 100644 index 00000000..72979d8d --- /dev/null +++ b/site/versioned_docs/version-4.4/deployments/harper-cloud/instance-size-hardware-specs.md @@ -0,0 +1,23 @@ +--- +title: Instance Size Hardware Specs +--- + +# Instance Size Hardware Specs + +While Harper Cloud bills by RAM, each instance has other specifications associated with the RAM selection. The following table describes each instance size in detail\*. + +| AWS EC2 Instance Size | RAM (GiB) | # vCPUs | Network (Gbps) | Processor | +| --------------------- | --------- | ------- | -------------- | -------------------------------------- | +| t3.micro | 1 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| t3.small | 2 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| t3.medium | 4 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| m5.large | 8 | 2 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.xlarge | 16 | 4 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.2xlarge | 32 | 8 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.4xlarge | 64 | 16 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.8xlarge | 128 | 32 | 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.12xlarge | 192 | 48 | 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.16xlarge | 256 | 64 | 20 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.24xlarge | 384 | 96 | 25 | Up to 3.1 GHz Intel Xeon Platinum 8000 | + +\*Specifications are subject to change. For the most up to date information, please refer to AWS documentation: [https:/aws.amazon.com/ec2/instance-types/](https:/aws.amazon.com/ec2/instance-types/). diff --git a/site/versioned_docs/version-4.4/deployments/harper-cloud/iops-impact.md b/site/versioned_docs/version-4.4/deployments/harper-cloud/iops-impact.md new file mode 100644 index 00000000..f316fc30 --- /dev/null +++ b/site/versioned_docs/version-4.4/deployments/harper-cloud/iops-impact.md @@ -0,0 +1,42 @@ +--- +title: IOPS Impact on Performance +--- + +# IOPS Impact on Performance + +Harper, like any database, can place a tremendous load on its storage resources. Storage, not CPU or memory, will more often be the bottleneck of server, virtual machine, or a container running Harper. Understanding how storage works, and how much storage performance your workload requires, is key to ensuring that Harper performs as expected. + +## IOPS Overview + +The primary measure of storage performance is the number of input/output operations per second (IOPS) that a storage device can perform. Different storage devices can have dramatically different performance profiles. A hard drive (HDD) might only perform a hundred or so IOPS, while a solid state drive (SSD) might be able to perform tens or hundreds of thousands of IOPS. + +Cloud providers like AWS, which powers Harper Cloud, don’t typically attach individual disks to a virtual machine or container. Instead, they combine large numbers of storage drives to create very high performance storage servers. Chunks (volumes) of that storage are then carved out and presented to many different virtual machines and containers. Due to the shared nature of this type of storage, the cloud provider places configurable limits on the number of IOPS that a volume can perform. The same way that cloud providers charge more for larger capacity volumes, they also charge more for volumes with more IOPS. + +## Harper Cloud Storage + +Harper Cloud utilizes AWS Elastic Block Storage (EBS) General Purpose SSD (gp3) volumes. This is the most common storage type used in AWS, as it provides reasonable performance for most workloads, at a reasonable price. + +AWS EBS gp3 volumes have a baseline performance level of 3,000 IOPS, as a result, all Harper Cloud storage options will offer 3,000 IOPS. We plan to offer scalable IOPS as an option in the future. + +You can read more about AWS EBS volume IOPS here: https:/docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html. + +## Estimating IOPS for Harper Instance + +The number of IOPS required for a particular workload is influenced by many factors. Testing your particular application is the best way to determine the number of IOPS required. A reliable method is to estimate about two IOPS for every index, including the primary key itself. So if a table has two indices besides primary key, estimate that an insert or update will require about six IOPS. Note that that can often be closer to one IOPS per index under load due to internal batching of writes, and sometimes even better when doing sequential inserts. Again it is best to test to verify this with application specific data and write patterns. + +For assistance in estimating IOPS requirements feel free to contact Harper Support or join our Community Slack Channel. + +## Example Use Case IOPS Requirements + +* **Sensor Data Collection** + + In the case of IoT sensors where data collection will be sustained, high IOPS are required. While there are not typically large queries going on in this case, there is a high volume of data being ingested. This implies that IOPS will be sustained at a high level. For example, if you are collecting 100 records per second you would expect to need roughly 3,000 IOPS just to handle the data inserts. +* **Data Analytics/BI Server** + + Providing a server for analytics purposes typically requires a larger machine. Typically these cases involve large scale SQL joins and aggregations, which puts a large strain on reads. Harper utilizes an in-memory cache, which provides a significant performance boost on machines with large amounts of memory. However, if disparate datasets are constantly being queried and/or new data is frequently being loaded, you will find that the system still needs to have high IOPS to meet performance demand. +* **Web Services** + + Typical web service implementations with discrete reads and writes often do not need high IOPS to perform as expected. This is often the case in more transactional systems without the requirement for high performance load. A good rule to follow is that any Harper operation that requires a data scan will be IOPS intensive, but if these are not frequent then the EBS boost will suffice. Queries utilizing equals operations in either SQL or NoSQL do not require a scan due to Harper’s native indexing. +* **High Performance Database** + + Ultimately, if performance is your top priority, Harper should be run on bare metal hardware. Cloud providers offer these options at a higher cost, but they come with obvious performance improvements. diff --git a/site/versioned_docs/version-4.4/deployments/harper-cloud/verizon-5g-wavelength-instances.md b/site/versioned_docs/version-4.4/deployments/harper-cloud/verizon-5g-wavelength-instances.md new file mode 100644 index 00000000..1589acc3 --- /dev/null +++ b/site/versioned_docs/version-4.4/deployments/harper-cloud/verizon-5g-wavelength-instances.md @@ -0,0 +1,31 @@ +--- +title: Verizon 5G Wavelength +--- + +# Verizon 5G Wavelength + +These instances are only accessible from the Verizon network. When accessing your Harper instance please ensure you are connected to the Verizon network, examples include Verizon 5G Internet, Verizon Hotspots, or Verizon mobile devices. + +Harper on Verizon 5G Wavelength brings Harper closer to the end user exclusively on the Verizon network resulting in as little as single-digit millisecond response time from Harper to the client. + +Instances are built via AWS Wavelength. You can read more about [AWS Wavelength here](https:/aws.amazon.com/wavelength/). + +Harper 5G Wavelength Instance Specs While Harper 5G Wavelength bills by RAM, each instance has other specifications associated with the RAM selection. The following table describes each instance size in detail\*. + +| AWS EC2 Instance Size | RAM (GiB) | # vCPUs | Network (Gbps) | Processor | +| --------------------- | --------- | ------- | -------------- | ------------------------------------------- | +| t3.medium | 4 | 2 | Up to 5 | Up to 3.1 GHz Intel Xeon Platinum Processor | +| t3.xlarge | 16 | 4 | Up to 5 | Up to 3.1 GHz Intel Xeon Platinum Processor | +| r5.2xlarge | 64 | 8 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum Processor | + +\*Specifications are subject to change. For the most up to date information, please refer to [AWS documentation](https:/aws.amazon.com/ec2/instance-types/). + +## Harper 5G Wavelength Storage + +Harper 5G Wavelength utilizes AWS Elastic Block Storage (EBS) General Purpose SSD (gp2) volumes. This is the most common storage type used in AWS, as it provides reasonable performance for most workloads, at a reasonable price. + +AWS EBS gp2 volumes have a baseline performance level, which determines the number of IOPS it can perform indefinitely. The larger the volume, the higher its baseline performance. Additionally, smaller gp2 volumes are able to burst to a higher number of IOPS for periods of time. + +Smaller gp2 volumes are perfect for trying out the functionality of Harper, and might also work well for applications that don’t perform many database transactions. For applications that perform a moderate or high number of transactions, we recommend that you use a larger Harper volume. Learn more about the [impact of IOPS on performance here](./iops-impact). + +You can read more about [AWS EBS gp2 volume IOPS here](https:/docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html#ebsvolumetypes\_gp2). diff --git a/site/versioned_docs/version-4.4/deployments/install-harper/index.md b/site/versioned_docs/version-4.4/deployments/install-harper/index.md new file mode 100644 index 00000000..99335044 --- /dev/null +++ b/site/versioned_docs/version-4.4/deployments/install-harper/index.md @@ -0,0 +1,61 @@ +--- +title: Install Harper +--- + +# Install Harper + +## Install Harper + +This documentation contains information for installing Harper locally. Note that if you’d like to get up and running quickly, you can try a [managed instance with Harper Cloud](https:/studio.harperdb.io/sign-up). Harper is a cross-platform database; we recommend Linux for production use, but Harper can run on Windows and Mac as well, for development purposes. Installation is usually very simple and just takes a few steps, but there are a few different options documented here. + +Harper runs on Node.js, so if you do not have it installed, you need to do that first (if you have installed, you can skip to installing Harper, itself). Node.js can be downloaded and installed from [their site](https:/nodejs.org/). For Linux and Mac, we recommend installing and managing Node versions with [NVM, which has instructions for installation](https:/github.com/nvm-sh/nvm). Generally NVM can be installed with the following command: + +```bash +curl -o- https:/raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash +``` + +And then logout and login, and then install Node.js using nvm. We recommend using LTS, but support all currently maintained Node versions (which is currently version 14 and newer, and make sure to always uses latest minor/patch for the major version): + +```bash +nvm install --lts +``` + +#### `Install and Start Harper ` + +Then you can install Harper with NPM and start it: + +```bash +npm install -g harperdb +harperdb +``` + +Harper will automatically start after installation. Harper's installation can be configured with numerous options via CLI arguments, for more information visit the [Harper Command Line Interface](../harper-cli) guide. + +If you are setting up a production server on Linux, [we have much more extensive documentation on how to configure volumes for database storage, set up a systemd script, and configure your operating system to use as a database server in our linux installation guide](./linux). + +## With Docker + +If you would like to run Harper in Docker, install [Docker Desktop](https:/docs.docker.com/desktop/) on your Mac or Windows computer. Otherwise, install the [Docker Engine](https:/docs.docker.com/engine/install/) on your Linux server. + +Once Docker Desktop or Docker Engine is installed, visit our [Docker Hub page](https:/hub.docker.com/r/harperdb/harperdb) for information and examples on how to run a Harper container. + +## Offline Install + +If you need to install Harper on a device that doesn't have an Internet connection, you can choose your version and download the npm package and install it directly (you’ll still need Node.js and NPM): + +[Download Install Package](https:/products-harperdb-io.s3.us-east-2.amazonaws.com/index.html) + +Once you’ve downloaded the .tgz file, run the following command from the directory where you’ve placed it: + +```bash +npm install -g harperdb-X.X.X.tgz harperdb install +``` + +## Installation on Less Common Platforms + +Harper comes with binaries for standard AMD64/x64 or ARM64 CPU architectures on Linux, Windows (x64 only), and Mac (including Apple Silicon). However, if you are installing on a less common platform (Alpine, for example), you will need to ensure that you have build tools installed for the installation process to compile the binaries (this is handled automatically), including: + +* [Go](https:/go.dev/dl/): version 1.19.1 +* GCC +* Make +* Python v3.7, v3.8, v3.9, or v3.10 diff --git a/site/versioned_docs/version-4.4/deployments/install-harper/linux.md b/site/versioned_docs/version-4.4/deployments/install-harper/linux.md new file mode 100644 index 00000000..cece27b9 --- /dev/null +++ b/site/versioned_docs/version-4.4/deployments/install-harper/linux.md @@ -0,0 +1,225 @@ +--- +title: On Linux +--- + +# On Linux + +If you wish to install locally or already have a configured server, see the basic [Installation Guide](./) + +The following is a recommended way to configure Linux and install Harper. These instructions should work reasonably well for any public cloud or on-premises Linux instance. + +*** + +These instructions assume that the following has already been completed: + +1. Linux is installed +1. Basic networking is configured +1. A non-root user account dedicated to Harper with sudo privileges exists +1. An additional volume for storing Harper files is attached to the Linux instance +1. Traffic to ports 9925 (Harper Operations API) 9926 (Harper Application Interface) and 9932 (Harper Clustering) is permitted + +While you will need to access Harper through port 9925 for the administration through the operations API, and port 9932 for clustering, for higher level of security, you may want to consider keeping both of these ports restricted to a VPN or VPC, and only have the application interface (9926 by default) exposed to the public Internet. + +For this example, we will use an AWS Ubuntu Server 22.04 LTS m5.large EC2 Instance with an additional General Purpose SSD EBS volume and the default “ubuntu” user account. + +*** + +### (Optional) LVM Configuration + +Logical Volume Manager (LVM) can be used to stripe multiple disks together to form a single logical volume. If striping disks together is not a requirement, skip these steps. + +Find disk that already has a partition + +```bash +used_disk=$(lsblk -P -I 259 | grep "nvme.n1.*part" | grep -o "nvme.n1") +``` + +Create array of free disks + +```bash +declare -a free_disks +mapfile -t free_disks < <(lsblk -P -I 259 | grep "nvme.n1.*disk" | grep -o "nvme.n1" | grep -v "$used_disk") +``` + +Get quantity of free disks + +```bash +free_disks_qty=${#free_disks[@]} +``` + +Construct pvcreate command + +```bash +cmd_string="" +for i in "${free_disks[@]}" +do +cmd_string="$cmd_string /dev/$i" +done +``` + +Initialize disks for use by LVM + +```bash +pvcreate_cmd="pvcreate $cmd_string" +sudo $pvcreate_cmd +``` + +Create volume group + +```bash +vgcreate_cmd="vgcreate hdb_vg $cmd_string" +sudo $vgcreate_cmd +``` + +Create logical volume + +```bash +sudo lvcreate -n hdb_lv -i $free_disks_qty -l 100%FREE hdb_vg +``` + +### Configure Data Volume + +Run `lsblk` and note the device name of the additional volume + +```bash +lsblk +``` + +Create an ext4 filesystem on the volume (The below commands assume the device name is nvme1n1. If you used LVM to create logical volume, replace /dev/nvme1n1 with /dev/hdb\_vg/hdb\_lv) + +```bash +sudo mkfs.ext4 -L hdb_data /dev/nvme1n1 +``` + +Mount the file system and set the correct permissions for the directory + +```bash +mkdir /home/ubuntu/hdb +sudo mount -t ext4 /dev/nvme1n1 /home/ubuntu/hdb +sudo chown -R ubuntu:ubuntu /home/ubuntu/hdb +sudo chmod 775 /home/ubuntu/hdb +``` + +Create a fstab entry to mount the filesystem on boot + +```bash +echo "LABEL=hdb_data /home/ubuntu/hdb ext4 defaults,noatime 0 1" | sudo tee -a /etc/fstab +``` + +### Configure Linux and Install Prerequisites + +If a swap file or partition does not already exist, create and enable a 2GB swap file + +```bash +sudo dd if=/dev/zero of=/swapfile bs=128M count=16 +sudo chmod 600 /swapfile +sudo mkswap /swapfile +sudo swapon /swapfile +echo "/swapfile swap swap defaults 0 0" | sudo tee -a /etc/fstab +``` + +Increase the open file limits for the ubuntu user + +```bash +echo "ubuntu soft nofile 500000" | sudo tee -a /etc/security/limits.conf +echo "ubuntu hard nofile 1000000" | sudo tee -a /etc/security/limits.conf +``` + +Install Node Version Manager (nvm) + +```bash +curl -o- https:/raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh | bash +``` + +Load nvm (or logout and then login) + +```bash +. ~/.nvm/nvm.sh +``` + +Install Node.js using nvm ([read more about specific Node version requirements](https:/www.npmjs.com/package/harperdb#prerequisites)) + +```bash +nvm install +``` + +### `Install and Start Harper ` + +Here is an example of installing Harper with minimal configuration. + +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --ROOTPATH "/home/ubuntu/hdb" \ + --OPERATIONSAPI_NETWORK_PORT "9925" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" +``` + +Here is an example of installing Harper with commonly used additional configuration. + +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --ROOTPATH "/home/ubuntu/hdb" \ + --OPERATIONSAPI_NETWORK_PORT "9925" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" \ + --HTTP_SECUREPORT "9926" \ + --CLUSTERING_ENABLED "true" \ + --CLUSTERING_USER "cluster_user" \ + --CLUSTERING_PASSWORD "password" \ + --CLUSTERING_NODENAME "hdb1" +``` + +You can also use a custom configuration file to set values on install, use the CLI/ENV variable `HDB_CONFIG` and set it to the path of your [custom configuration file](../configuration): + +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" \ + --HDB_CONFIG "/path/to/your/custom/harperdb-config.yaml" +``` + +#### Start Harper on Boot + +Harper will automatically start after installation. If you wish Harper to start when the OS boots, you have two options: + +You can set up a crontab: + +```bash +(crontab -l 2>/dev/null; echo "@reboot PATH=\"/home/ubuntu/.nvm/versions/node/v18.15.0/bin:$PATH\" && harperdb start") | crontab - +``` + +Or you can create a systemd script at `/etc/systemd/system/harperdb.service` + +Pasting the following contents into the file: + +``` +[Unit] +Description=Harper + +[Service] +Type=simple +Restart=always +User=ubuntu +Group=ubuntu +WorkingDirectory=/home/ubuntu +ExecStart=/bin/bash -c 'PATH="/home/ubuntu/.nvm/versions/node/v18.15.0/bin:$PATH"; harperdb' + +[Install] +WantedBy=multi-user.target +``` + +And then running the following: + +``` +systemctl daemon-reload +systemctl enable harperdb +``` + +For more information visit the [Harper Command Line Interface guide](../harper-cli) and the [Harper Configuration File guide](../configuration). diff --git a/site/versioned_docs/version-4.4/deployments/upgrade-hdb-instance.md b/site/versioned_docs/version-4.4/deployments/upgrade-hdb-instance.md new file mode 100644 index 00000000..f5f403e7 --- /dev/null +++ b/site/versioned_docs/version-4.4/deployments/upgrade-hdb-instance.md @@ -0,0 +1,139 @@ +--- +title: Upgrade a Harper Instance +--- + +# Upgrade a Harper Instance + +This document describes best practices for upgrading self-hosted Harper instances. Harper can be upgraded using a combination of npm and built-in Harper upgrade scripts. Whenever upgrading your Harper installation it is recommended you make a backup of your data first. Note: This document applies to self-hosted Harper instances only. All [Harper Cloud instances](./harper-cloud/) will be upgraded by the Harper Cloud team. + +## Upgrading + +Upgrading Harper is a two-step process. First the latest version of Harper must be downloaded from npm, then the Harper upgrade scripts will be utilized to ensure the newest features are available on the system. + +1. Install the latest version of Harper using `npm install -g harperdb`. + + Note `-g` should only be used if you installed Harper globally (which is recommended). +1. Run `harperdb` to initiate the upgrade process. + + Harper will then prompt you for all appropriate inputs and then run the upgrade directives. + +## Node Version Manager (nvm) + +[Node Version Manager (nvm)](http:/nvm.sh/) is an easy way to install, remove, and switch between different versions of Node.js as required by various applications. More information, including directions on installing nvm can be found here: https:/nvm.sh/. + +Harper supports Node.js versions 14.0.0 and higher, however, **please check our** [**NPM page**](https:/www.npmjs.com/package/harperdb) **for our recommended Node.js version.** To install a different version of Node.js with nvm, run the command: + +```bash +nvm install +``` + +To switch to a version of Node run: + +```bash +nvm use +``` + +To see the current running version of Node run: + +```bash +node --version +``` + +With a handful of different versions of Node.js installed, run nvm with the `ls` argument to list out all installed versions: + +```bash +nvm ls +``` + +When upgrading Harper, we recommend also upgrading your Node version. Here we assume you're running on an older version of Node; the execution may look like this: + +Switch to the older version of Node that Harper is running on (if it is not the current version): + +```bash +nvm use 14.19.0 +``` + +Make sure Harper is not running: + +```bash +harperdb stop +``` + +Uninstall Harper. Note, this step is not required, but will clean up old artifacts of Harper. We recommend removing all other Harper installations to ensure the most recent version is always running. + +```bash +npm uninstall -g harperdb +``` + +Switch to the newer version of Node: + +```bash +nvm use +``` + +Install Harper globally + +```bash +npm install -g harperdb +``` + +Run the upgrade script + +```bash +harperdb +``` + +Start Harper + +```bash +harperdb start +``` + +*** + +## Upgrading Nats to Plexus 4.4 + +To upgrade from NATS clustering to Plexus replication, follow these manual steps. They are designed for a fully replicating cluster to ensure minimal disruption during the upgrade process. + +The core of this upgrade is the _bridge node_. This node will run both NATS and Plexus simultaneously, ensuring that transactions are relayed between the two systems during the transition. The bridge node is crucial in preventing any replication downtime, as it will handle transactions from NATS nodes to Plexus nodes and vice versa. + +### Enabling Plexus + +To enable Plexus on a node that is already running NATS, you will need to update [two values](./configuration) in the `harperdb-config.yaml` file: + +```yaml +replication: + url: wss:/my-cluster-node-1:9925 + hostname: node-1 +``` + +`replication.url` – This should be set to the URL of the current Harper instance. + +`replication.hostname` – Since we are upgrading from NATS, this value should match the `clustering.nodeName` of the current instance. + +### Upgrade Steps + +1. Set up the bridge node: + * Choose one node to be the bridge node. + * On this node, follow the "Enabling Plexus" steps from the previous section, but **do not disable NATS clustering on this instance.** + * Stop the instance and perform the upgrade. + * Start the instance. This node should now be running both Plexus and NATS. +1. Upgrade a node: + * Choose a node that needs upgrading and enable Plexus by following the "Enable Plexus" steps. + * Disable NATS by setting `clustering.enabled` to `false`. + * Stop the instance and upgrade it. + * Start the instance. + * Call [`add_node`](../developers/operations-api/clustering#add-node) on the upgraded instance. In this call, omit `subscriptions` so that a fully replicating cluster is built. The target node for this call should be the bridge node. _Note: depending on your setup, you may need to expand this `add_node` call to include_ [_authorization and/or tls information_](../developers/operations-api/clustering#add-node)_._ + +```json +{ + "operation": "add_node", + "hostname:": "node-1", + "url": "wss:/my-cluster-node-1:9925" +} +``` + +1. Repeat Step 2 on all remaining nodes that need to be upgraded. +1. Disable NATS on the bridge node by setting `clustering.enabled` to `false` and restart the instance. + +Your cluster upgrade should now be complete, with no NATS processes running on any of the nodes. diff --git a/site/versioned_docs/version-4.4/developers/_category_.json b/site/versioned_docs/version-4.4/developers/_category_.json new file mode 100644 index 00000000..9fe399bf --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Developers", + "position": 1, + "link": { + "type": "generated-index", + "title": "Developers Documentation", + "description": "Comprehensive guides and references for building applications with HarperDB", + "keywords": [ + "developers" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/developers/applications/caching.md b/site/versioned_docs/version-4.4/developers/applications/caching.md new file mode 100644 index 00000000..c857c2a3 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/applications/caching.md @@ -0,0 +1,288 @@ +--- +title: Caching +--- + +# Caching + +Harper has integrated support for caching data from external sources. With built-in caching capabilities and distributed high-performance low-latency responsiveness, Harper makes an ideal data caching server. Harper can store cached data in standard tables, as queryable structured data, so data can easily be consumed in one format (for example JSON or CSV) and provided to end users in different formats with different selected properties (for example MessagePack, with a subset of selected properties), or even with customized querying capabilities. Harper also manages and provides timestamps/tags for proper caching control, facilitating further downstreaming caching. With these combined capabilities, Harper is an extremely fast, interoperable, flexible, and customizable caching server. + +## Configuring Caching + +To set up caching, first you will need to define a table that you will use as your cache (to store the cached data). You can review the [introduction to building applications](./) for more information on setting up the application (and the [defining schemas documentation](./defining-schemas)), but once you have defined an application folder with a schema, you can add a table for caching to your `schema.graphql`: + +```graphql +type MyCache @table(expiration: 3600) @export { + id: ID @primaryKey +} +``` + +You may also note that we can define a time-to-live (TTL) expiration on the table, indicating when table records/entries should expire and be evicted from this table. This is generally necessary for "passive" caches where there is no active notification of when entries expire. However, this is not needed if you provide a means of notifying when data is invalidated and changed. The units for expiration, and other duration-based properties, are in seconds. + +While you can provide a single expiration time, there are actually several expiration timings that are potentially relevant, and can be independently configured. These settings are available as directive properties on the table configuration (like `expiration` above): stale expiration: The point when a request for a record should trigger a request to origin (but might possibly return the current stale record depending on policy) must-revalidate expiration: The point when a request for a record must make a request to origin first and return the latest value from origin. eviction expiration: The point when a record is actually removed from the caching table. + +You can provide a single expiration and it defines the behavior for all three. You can also provide three settings for expiration, through table directives: +* expiration - The amount of time until a record goes stale. +* eviction - The amount of time after expiration before a record can be evicted (defaults to zero). +* scanInterval - The interval for scanning for expired records (defaults to one quarter of the total of expiration and eviction). + +## Define External Data Source + +Next, you need to define the source for your cache. External data sources could be HTTP APIs, other databases, microservices, or any other source of data. This can be defined as a resource class in your application's `resources.js` module. You can extend the `Resource` class (which is available as a global variable in the Harper environment) as your base class. The first method to implement is a `get()` method to define how to retrieve the source data. For example, if we were caching an external HTTP API, we might define it as such: + +```javascript +class ThirdPartyAPI extends Resource { + async get() { + return (await fetch(`http:/some-api.com/${this.getId()}`)).json(); + } +} +``` + +Next, we define this external data resource as the "source" for the caching table we defined above: + +```javascript +const { MyTable } = tables; +MyTable.sourcedFrom(ThirdPartyAPI); +``` + +Now we have a fully configured and connected caching table. If you access data from `MyCache` (for example, through the REST API, like `/MyCache/some-id`), Harper will check to see if the requested entry is in the table and return it if it is available (and hasn't expired). If there is no entry, or it has expired (it is older than one hour in this case), it will go to the source, calling the `get()` method, which will then retrieve the requested entry. Once the entry is retrieved, it will be saved/cached in the caching table (for one hour based on our expiration time). + +```mermaid +flowchart TD + Client1(Client 1)-->Cache(Caching Table) + Client2(Client 2)-->Cache + Cache-->Resource(Data Source Connector) + Resource-->API(Remote Data Source API) +``` + + +Harper handles waiting for an existing cache resolution to finish and uses its result. This prevents a "cache stampede" when entries expire, ensuring that multiple requests to a cache entry will all wait on a single request to the data source. + +Cache tables with an expiration are periodically pruned for expired entries. Because this is done periodically, there is usually some amount of time between when a record has expired and when the record is actually evicted (the cached data is removed). But when a record is checked for availability, the expiration time is used to determine if the record is fresh (and the cache entry can be used). + +### Eviction with Indexing + +Eviction is the removal of a locally cached copy of data, but it does not imply the deletion of the actual data from the canonical or origin data source. Because evicted records still exist (just not in the local cache), if a caching table uses expiration (and eviction), and has indexing on certain attributes, the data is not removed from the indexes. The indexes that reference the evicted record are preserved, along with the attribute data necessary to maintain these indexes. Therefore eviction means the removal of non-indexed data (in this case evictions are stored as "partial" records). Eviction only removes the data that can be safely removed from a cache without affecting the integrity or behavior of the indexes. If a search query is performed that matches this evicted record, the record will be requested on-demand to fulfill the search query. + +### Specifying a Timestamp + +In the example above, we simply retrieved data to fulfill a cache request. We may want to supply the timestamp of the record we are fulfilling as well. This can be set on the context for the request: + +```javascript +class ThirdPartyAPI extends Resource { + async get() { + let response = await fetch(`http:/some-api.com/${this.getId()}`); + this.getContext().lastModified = response.headers.get('Last-Modified'); + return response.json(); + } +} +``` + +#### Specifying an Expiration + +In addition, we can also specify when a cached record "expires". When a cached record expires, this means that a request for that record will trigger a request to the data source again. This does not necessarily mean that the cached record has been evicted (removed), although expired records will be periodically evicted. If the cached record still exists, the data source can revalidate it and return it. For example: + +```javascript +class ThirdPartyAPI extends Resource { + async get() { + const context = this.getContext(); + let headers = new Headers(); + if (context.replacingVersion) / this is the existing cached record + headers.set('If-Modified-Since', new Date(context.replacingVersion).toUTCString()); + let response = await fetch(`http:/some-api.com/${this.getId()}`, { headers }); + let cacheInfo = response.headers.get('Cache-Control'); + let maxAge = cacheInfo?.match(/max-age=(\d)/)?.[1]; + if (maxAge) / we can set a specific expiration time by setting context.expiresAt + context.expiresAt = Date.now() + maxAge * 1000; / convert from seconds to milliseconds and add to current time + / we can just revalidate and return the record if the origin has confirmed that it has the same version: + if (response.status === 304) return context.replacingRecord; + ... +``` + +## Active Caching and Invalidation + +The cache we have created above is a "passive" cache; it only pulls data from the data source as needed, and has no knowledge of if and when data from the data source has actually changed, so it must rely on timer-based expiration to periodically retrieve possibly updated data. This means that it is possible that the cache may have stale data for a while (if the underlying data has changed, but the cached data hasn't expired), and the cache may have to refresh more than necessary if the data source data hasn't changed. Consequently it can be significantly more effective to implement an "active" cache, in which the data source is monitored and notifies the cache when any data changes. This ensures that when data changes, the cache can immediately load the updated data, and unchanged data can remain cached much longer (or indefinitely). + +### Invalidate + +One way to provide more active caching is to specifically invalidate individual records. Invalidation is useful when you know the source data has changed, and the cache needs to re-retrieve data from the source the next time that record is accessed. This can be done by executing the `invalidate()` method on a resource. For example, you could extend a table (in your resources.js) and provide a custom POST handler that does invalidation: + +```javascript +const { MyTable } = tables; +export class MyTableEndpoint extends MyTable { + async post(data) { + if (data.invalidate) / use this flag as a marker + this.invalidate(); + } +} +``` + +(Note that if you are now exporting this endpoint through resources.js, you don't necessarily need to directly export the table separately in your schema.graphql). + +### Subscriptions + +We can provide more control of an active cache with subscriptions. If there is a way to receive notifications from the external data source of data changes, we can implement this data source as an "active" data source for our cache by implementing a `subscribe` method. A `subscribe` method should return an asynchronous iterable that iterates and returns events indicating the updates. One straightforward way of creating an asynchronous iterable is by defining the `subscribe` method as an asynchronous generator. If we had an endpoint that we could poll for changes every second, we could implement this like: + +```javascript +class ThirdPartyAPI extends Resource { + async *subscribe() { + setInterval(() => { / every second retrieve more data + / get the next data change event from the source + let update = (await fetch(`http:/some-api.com/latest-update`)).json(); + const event = { / define the change event (which will update the cache) + type: 'put', / this would indicate that the event includes the new data value + id: / the primary key of the record that updated + value: / the new value of the record that updated + timestamp: / the timestamp of when the data change occurred + }; + yield event; / this returns this event, notifying the cache of the change + }, 1000); + } + async get() { +... +``` + +Notification events should always include an `id` property to indicate the primary key of the updated record. The event should have a `value` property for `put` and `message` event types. The `timestamp` is optional and can be used to indicate the exact timestamp of the change. The following event `type`s are supported: + +* `put` - This indicates that the record has been updated and provides the new value of the record. +* `invalidate` - Alternately, you can notify with an event type of `invalidate` to indicate that the data has changed, but without the overhead of actually sending the data (the `value` property is not needed), so the data only needs to be sent if and when the data is requested through the cache. An `invalidate` will evict the entry and update the timestamp to indicate that there is new data that should be requested (if needed). +* `delete` - This indicates that the record has been deleted. +* `message` - This indicates a message is being passed through the record. The record value has not changed, but this is used for [publish/subscribe messaging](../real-time). +* `transaction` - This indicates that there are multiple writes that should be treated as a single atomic transaction. These writes should be included as an array of data notification events in the `writes` property. + +And the following properties can be defined on event objects: + +* `type`: The event type as described above. +* `id`: The primary key of the record that updated +* `value`: The new value of the record that updated (for put and message) +* `writes`: An array of event properties that are part of a transaction (used in conjunction with the transaction event type). +* `table`: The name of the table with the record that was updated. This can be used with events within a transaction to specify events across multiple tables. +* `timestamp`: The timestamp of when the data change occurred + +With an active external data source with a `subscribe` method, the data source will proactively notify the cache, ensuring a fresh and efficient active cache. Note that with an active data source, we still use the `sourcedFrom` method to register the source for a caching table, and the table will automatically detect and call the subscribe method on the data source. + +By default, Harper will only run the subscribe method on one thread. Harper is multi-threaded and normally runs many concurrent worker threads, but typically running a subscription on multiple threads can introduce overlap in notifications and race conditions and running on a subscription on a single thread is preferable. However, if you want to enable subscribe on multiple threads, you can define a `static subscribeOnThisThread` method to specify if the subscription should run on the current thread: + +```javascript +class ThirdPartyAPI extends Resource { + static subscribeOnThisThread(threadIndex) { + return threadIndex < 2; / run on two threads (the first two threads) + } + async *subscribe() { + .... +``` + +An alternative to using asynchronous generators is to use a subscription stream and send events to it. A default subscription stream (that doesn't generate its own events) is available from the Resource's default subscribe method: + +```javascript +class ThirdPartyAPI extends Resource { + subscribe() { + const subscription = super.subscribe(); + setupListeningToRemoteService().on('update', (event) => { + subscription.send(event); + }); + return subscription; + } +} +``` + +## Downstream Caching + +It is highly recommended that you utilize the [REST interface](../rest) for accessing caching tables, as it facilitates downstreaming caching for clients. Timestamps are recorded with all cached entries. Timestamps are then used for incoming [REST requests to specify the `ETag` in the response](../rest#cachingconditional-requests). Clients can cache data themselves and send requests using the `If-None-Match` header to conditionally get a 304 and preserve their cached data based on the timestamp/`ETag` of the entries that are cached in Harper. Caching tables also have [subscription capabilities](./caching#subscribing-to-caching-tables), which means that downstream caches can be fully "layered" on top of Harper, both as passive or active caches. + +## Write-Through Caching + +The cache we have defined so far only has data flowing from the data source to the cache. However, you may wish to support write methods, so that writes to the cache table can flow through to underlying canonical data source, as well as populate the cache. This can be accomplished by implementing the standard write methods, like `put` and `delete`. If you were using an API with standard RESTful methods, you can pass writes through to the data source like this: + +```javascript +class ThirdPartyAPI extends Resource { + async put(data) { + await fetch(`http:/some-api.com/${this.getId()}`, { + method: 'PUT', + body: JSON.stringify(data) + }); + } + async delete() { + await fetch(`http:/some-api.com/${this.getId()}`, { + method: 'DELETE', + }); + } + ... +``` + +When doing an insert or update to the MyCache table, the data will be sent to the underlying data source through the `put` method and the new record value will be stored in the cache as well. + +### Loading from Source in Methods + +When you are using a caching table, it is important to remember that any resource methods besides `get()`, will not automatically load data from the source. If you have defined a `put()`, `post()`, or `delete()` method and you need the source data, you can ensure it is loaded by calling the `ensureLoaded()` method. For example, if you want to modify the existing record from the source, adding a property to it: + +```javascript +class MyCache extends tables.MyCache { + async post(data) { + / if the data is not cached locally, retrieves from source: + await this.ensuredLoaded(); + / now we can be sure that the data is loaded, and can access properties + this.quantity = this.quantity - data.purchases; + } +} +``` + +### Subscribing to Caching Tables + +You can subscribe to a caching table just like any other table. The one difference is that normal tables do not usually have `invalidate` events, but an active caching table may have `invalidate` events. Again, this event type gives listeners an opportunity to choose whether or not to actually retrieve the value that changed. + +### Passive-Active Updates + +With our passive update examples, we have provided a data source handler with a `get()` method that returns the specific requested record as the response. However, we can also actively update other records in our response handler (if our data source provides data that should be propagated to other related records). This can be done transactionally, to ensure that all updates occur atomically. The context that is provided to the data source holds the transaction information, so we can simply pass the context to any update/write methods that we call. For example, let's say we are loading a blog post, which also includes comment records: + +```javascript +const { Post, Comment } = tables; +class BlogSource extends Resource { + get() { + const post = await (await fetch(`http:/my-blog-server/${this.getId()}`).json()); + for (let comment of post.comments) { + await Comment.put(comment, this); / save this comment as part of our current context and transaction + } + return post; + } +} +Post.sourcedFrom(BlogSource); +``` + +Here both the update to the post and the update to the comments will be atomically/transactionally committed together with the same timestamp. + +## Cache-Control header + +When interacting with cached data, you can also use the `Cache-Control` request header to specify certain caching behaviors. When performing a PUT (or POST) method, you can use the `max-age` directive to indicate how long the resource should be cached (until stale): + +```http +PUT /my-resource/id +Cache-Control: max-age=86400 +``` + +You can use the `only-if-cached` directive on GET requests to only return a resource if it is cached (otherwise will return 504). Note, that if the entry is not cached, this will still trigger a request for the source data from the data source. If you do not want source data retrieved, you can add the `no-store` directive. You can also use the `no-cache` directive if you do not want to use the cached resource. If you wanted to check if there is a cached resource without triggering a request to the data source: + +```http +GET /my-resource/id +Cache-Control: only-if-cached, no-store +``` + +You may also use the `stale-if-error` to indicate if it is acceptable to return a stale cached resource when the data source returns an error (network connection error, 500, 502, 503, or 504). The `must-revalidate` directive can indicate a stale cached resource can not be returned, even when the data source has an error (by default a stale cached resource is returned when there is a network connection error). + + +## Caching Flow +It may be helpful to understand the flow of a cache request. When a request is made to a caching table: +* Harper will first create a resource instance to handle the process, and ensure that the data is loaded for the resource instance. To do this, it will first check if the record is in the table/cache. + * If the record is not in the cache, Harper will first check if there is a current request to get the record from the source. If there is, Harper will wait for the request to complete and return the record from the cache. + * If not, Harper will call the `get()` method on the source to retrieve the record. The record will then be stored in the cache. + * If the record is in the cache, Harper will check if the record is stale. If the record is not stale, Harper will immediately return the record from the cache. If the record is stale, Harper will call the `get()` method on the source to retrieve the record. + * The record will then be stored in the cache. This will write the record to the cache in a separate asynchronous/background write-behind transaction, so it does not block the current request, then return the data immediately once it has it. +* The `get()` method will be called on the resource instance to return the record to the client (or perform any querying on the record). If this is overriden, the method will be called at this time. + +### Caching Flow with Write-Through +When a writes are performed on a caching table (in `put()` or `post()` method, for example), the flow is slightly different: +* Harper will have first created a resource instance to handle the process, and this resource instance that will be the current `this` for a call to `put()` or `post()`. +* If a `put()` or `update()` is called, for example, this action will be record in the current transaction. +* Once the transaction is committed (which is done automatically as the request handler completes), the transaction write will be sent to the source to update the data. + * The local writes will wait for the source to confirm the writes have completed (note that this effectively allows you to perform a two-phase transactional write to the source, and the source can confirm the writes have completed before the transaction is committed locally). + * The transaction writes will then be written the local caching table. +* The transaction handler will wait for the local commit to be written, then the transaction will be resolved and a response will be sent to the client. diff --git a/site/versioned_docs/version-4.4/developers/applications/debugging.md b/site/versioned_docs/version-4.4/developers/applications/debugging.md new file mode 100644 index 00000000..c7c085bf --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/applications/debugging.md @@ -0,0 +1,39 @@ +--- +title: Debugging Applications +--- + +# Debugging Applications + +Harper components and applications run inside the Harper process, which is a standard Node.js process that can be debugged with standard JavaScript development tools like Chrome's devtools, VSCode, and WebStorm. Debugging can be performed by launching the Harper entry script with your IDE, or you can start Harper in dev mode and connect your debugger to the running process (defaults to standard 9229 port): + +``` +harperdb dev +# or to run and debug a specific app +harperdb dev /path/to/app +``` + +Once you have connected a debugger, you may set breakpoints in your application and fully debug it. Note that when using the `dev` command from the CLI, this will run Harper in single-threaded mode. This would not be appropriate for production use, but makes it easier to debug applications. + +For local debugging and development, it is recommended that you use standard console log statements for logging. For production use, you may want to use Harper's logging facilities, so you aren't logging to the console. The logging functions are available on the global `logger` variable that is provided by Harper. This logger can be used to output messages directly to the Harper log using standardized logging level functions, described below. The log level can be set in the [Harper Configuration File](../../deployments/configuration). + +Harper Logger Functions + +* `trace(message)`: Write a 'trace' level log, if the configured level allows for it. +* `debug(message)`: Write a 'debug' level log, if the configured level allows for it. +* `info(message)`: Write a 'info' level log, if the configured level allows for it. +* `warn(message)`: Write a 'warn' level log, if the configured level allows for it. +* `error(message)`: Write a 'error' level log, if the configured level allows for it. +* `fatal(message)`: Write a 'fatal' level log, if the configured level allows for it. +* `notify(message)`: Write a 'notify' level log. + +For example, you can log a warning: + +```javascript +logger.warn('You have been warned'); +``` + +If you want to ensure a message is logged, you can use `notify` as these messages will appear in the log regardless of log level configured. + +## Viewing the Log + +The Harper Log can be found in your local `~/hdb/log/hdb.log` file (or in the log folder if you have specified an alternate hdb root), or in the Studio Status page. Additionally, you can use the [`read_log` operation](../operations-api/logs) to query the Harper log. diff --git a/site/versioned_docs/version-4.4/developers/applications/define-routes.md b/site/versioned_docs/version-4.4/developers/applications/define-routes.md new file mode 100644 index 00000000..9d3a1526 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/applications/define-routes.md @@ -0,0 +1,118 @@ +--- +title: Define Fastify Routes +--- + +# Define Fastify Routes + +Harper’s applications provide an extension for loading [Fastify](https:/www.fastify.io/) routes as a way to handle endpoints. While we generally recommend building your endpoints/APIs with Harper's [REST interface](../rest) for better performance and standards compliance, Fastify's route can provide an extensive API for highly customized path handling. Below is a very simple example of a route declaration. + +The fastify route handler can be configured in your application's config.yaml (this is the default config if you used the [application template](https:/github.com/HarperDB/application-template)): + +```yaml +fastifyRoutes: # This loads files that define fastify routes using fastify's auto-loader + files: routes/*.js # specify the location of route definition modules + path: . # relative to the app-name, like http:/server/app-name/route-name +``` + +By default, route URLs are configured to be: + +* \[**Instance URL**]:\[**HTTP Port**]/\[**Project Name**]/\[**Route URL**] + +However, you can specify the path to be `/` if you wish to have your routes handling the root path of incoming URLs. + +* The route below, using the default config, within the **dogs** project, with a route of **breeds** would be available at **http:/localhost:9926/dogs/breeds**. + +In effect, this route is just a pass-through to Harper. The same result could have been achieved by hitting the core Harper API, since it uses **hdbCore.preValidation** and **hdbCore.request**, which are defined in the “helper methods” section, below. + +```javascript +export default async (server, { hdbCore, logger }) => { + server.route({ + url: '/', + method: 'POST', + preValidation: hdbCore.preValidation, + handler: hdbCore.request, + }) +} +``` + +## Custom Handlers + +For endpoints where you want to execute multiple operations against Harper, or perform additional processing (like an ML classification, or an aggregation, or a call to a 3rd party API), you can define your own logic in the handler. The function below will execute a query against the dogs table, and filter the results to only return those dogs over 4 years in age. + +**IMPORTANT: This route has NO preValidation and uses hdbCore.requestWithoutAuthentication, which- as the name implies- bypasses all user authentication. See the security concerns and mitigations in the “helper methods” section, below.** + +```javascript +export default async (server, { hdbCore, logger }) => { + server.route({ + url: '/:id', + method: 'GET', + handler: (request) => { + request.body= { + operation: 'sql', + sql: `SELECT * FROM dev.dog WHERE id = ${request.params.id}` + }; + + const result = await hdbCore.requestWithoutAuthentication(request); + return result.filter((dog) => dog.age > 4); + } + }); +} +``` + +## Custom preValidation Hooks + +The simple example above was just a pass-through to Harper- the exact same result could have been achieved by hitting the core Harper API. But for many applications, you may want to authenticate the user using custom logic you write, or by conferring with a 3rd party service. Custom preValidation hooks let you do just that. + +Below is an example of a route that uses a custom validation hook: + +```javascript +import customValidation from '../helpers/customValidation'; + +export default async (server, { hdbCore, logger }) => { + server.route({ + url: '/:id', + method: 'GET', + preValidation: (request) => customValidation(request, logger), + handler: (request) => { + request.body= { + operation: 'sql', + sql: `SELECT * FROM dev.dog WHERE id = ${request.params.id}` + }; + + return hdbCore.requestWithoutAuthentication(request); + } + }); +} +``` + +Notice we imported customValidation from the **helpers** directory. To include a helper, and to see the actual code within customValidation, see [Helper Methods](./define-routes#helper-methods). + +## Helper Methods + +When declaring routes, you are given access to 2 helper methods: hdbCore and logger. + +**hdbCore** + +hdbCore contains three functions that allow you to authenticate an inbound request, and execute operations against Harper directly, by passing the standard Operations API. + +* **preValidation** + + This is an array of functions used for fastify authentication. The second function takes the authorization header from the inbound request and executes the same authentication as the standard Harper Operations API (for example, `hdbCore.preValidation[1](./req, resp, callback)`). It will determine if the user exists, and if they are allowed to perform this operation. **If you use the request method, you have to use preValidation to get the authenticated user**. +* **request** + + This will execute a request with Harper using the operations API. The `request.body` should contain a standard Harper operation and must also include the `hdb_user` property that was in `request.body` provided in the callback. +* **requestWithoutAuthentication** + + Executes a request against Harper without any security checks around whether the inbound user is allowed to make this request. For security purposes, you should always take the following precautions when using this method: + + * Properly handle user-submitted values, including url params. User-submitted values should only be used for `search_value` and for defining values in records. Special care should be taken to properly escape any values if user-submitted values are used for SQL. + +**logger** + +This helper allows you to write directly to the log file, hdb.log. It’s useful for debugging during development, although you may also use the console logger. There are 5 functions contained within logger, each of which pertains to a different **logging.level** configuration in your harperdb-config.yaml file. + +* logger.trace(‘Starting the handler for /dogs’) +* logger.debug(‘This should only fire once’) +* logger.warn(‘This should never ever fire’) +* logger.error(‘This did not go well’) +* logger.fatal(‘This did not go very well at all’) diff --git a/site/versioned_docs/version-4.4/developers/applications/defining-roles.md b/site/versioned_docs/version-4.4/developers/applications/defining-roles.md new file mode 100644 index 00000000..d6c766fc --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/applications/defining-roles.md @@ -0,0 +1,51 @@ +--- +title: Defining Roles +--- + +In addition to [defining a database schema](./defining-schemas), you can also define roles in your application. Roles are a way to group permissions together and assign them to users as part of Harper's [role based access control](../security/users-and-roles). An application component may declare roles that should exist for the application in a roles configuration file. To use this, first specify your roles config file in the `config.yaml` in your application directory: + +```yaml +roles: + files: roles.yaml +``` +Now you can create a roles.yaml in your application directory: +```yaml +declared-role: + super_user: false # This is a boolean value that indicates if the role is a super user or not + # Now we can grant the permissions to databases, here we grant permissions to the default data database + data: # This is the same structure as role object that is used in the roles operations APIs + TableOne: + read: true + insert: true + TableTwo: + read: true + insert: false + update: true + delete: true + attributes: + name: + read: true + insert: false + update: true +``` + +With this in place, where Harper starts up, it will create the roles in the roles.yaml file if they do not already exist. If they do exist, it will update the roles with the new permissions. This allows you to manage your roles in your application code and have them automatically created or updated when the application starts. + +The structure of the roles.yaml file is: +```yaml +: + permission: # contains the permissions for the role, this structure is optional, and you can place flags like super_user here as a shortcut + super_user: + : # each database with permissions can be added as named properties on the role + tables: # this structure is optional, and table names can be placed directly under the database as a shortcut + : + read: # indicates if the role has read permission to this table + insert: # indicates if the role has insert permission to this table + update: # indicates if the role has update permission to this table + delete: # indicates if the role has delete permission to this table + attributes: + : # individual attributes can have permissions as well + read: + insert: + update: +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/developers/applications/defining-schemas.md b/site/versioned_docs/version-4.4/developers/applications/defining-schemas.md new file mode 100644 index 00000000..35ef736d --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/applications/defining-schemas.md @@ -0,0 +1,219 @@ +--- +title: Defining Schemas +--- + +# Defining Schemas + +Schemas define tables and their attributes. Schemas can be declaratively defined in Harper's using GraphQL schema definitions. Schemas definitions can be used to ensure that tables exist (that are required for applications), and have the appropriate attributes. Schemas can define the primary key, data types for attributes, if they are required, and specify which attributes should be indexed. The [introduction to applications provides](./) a helpful introduction to how to use schemas as part of database application development. + +Schemas can be used to define the expected structure of data, but are also highly flexible and support heterogeneous data structures and by default allows data to include additional properties. The standard types for GraphQL schemas are specified in the [GraphQL schema documentation](https:/graphql.org/learn/schema/). + +An example schema that defines a couple tables might look like: + +```graphql +# schema.graphql: +type Dog @table { + id: ID @primaryKey + name: String + breed: String + age: Int +} + +type Breed @table { + id: ID @primaryKey +} +``` + +In this example, you can see that we specified the expected data structure for records in the Dog and Breed table. For example, this will enforce that Dog records are required to have a `name` property with a string (or null, unless the type were specified to be non-nullable). This does not preclude records from having additional properties (see `@sealed` for preventing additional properties. For example, some Dog records could also optionally include a `favoriteTrick` property. + +In this page, we will describe the specific directives that Harper uses for defining tables and attributes in a schema. + +### Type Directives + +#### `@table` + +The schema for tables are defined using GraphQL type definitions with a `@table` directive: + +```graphql +type TableName @table +``` + +By default the table name is inherited from the type name (in this case the table name would be "TableName"). The `@table` directive supports several optional arguments (all of these are optional and can be freely combined): + +* `@table(table: "table_name")` - This allows you to explicitly specify the table name. +* `@table(database: "database_name")` - This allows you to specify which database the table belongs to. This defaults to the "data" database. +* `@table(expiration: 3600)` - Sets an expiration time on entries in the table before they are automatically cleared (primarily useful for caching tables). This is specified in seconds. +* `@table(audit: true)` - This enables the audit log for the table so that a history of record changes are recorded. This defaults to [configuration file's setting for `auditLog`](../../deployments/configuration#logging). + +#### `@export` + +This indicates that the specified table should be exported as a resource that is accessible as an externally available endpoints, through REST, MQTT, or any of the external resource APIs. + +This directive also accepts a `name` parameter to specify the name that should be used for the exported resource (how it will appear in the URL path). For example: + +``` +type MyTable @table @export(name: "my-table") +``` + +This table would be available at the URL path `/my-table/`. Without the `name` parameter, the exported name defaults to the name of the table type ("MyTable" in this example). + +### Relationships: `@relationship` + +Defining relationships is the foundation of using "join" queries in Harper. A relationship defines how one table relates to another table using a foreign key. Using the `@relationship` directive will define a property as a computed property, which resolves to the an record/instance from a target type, based on the referenced attribute, which can be in this table or the target table. The `@relationship` directive must be used in combination with an attribute with a type that references another table. + +#### `@relationship(from: attribute)` + +This defines a relationship where the foreign key is defined in this table, and relates to the primary key of the target table. If the foreign key is single-valued, this establishes a many-to-one relationship with the target table. The foreign key may also be a multi-valued array, in which case this will be a many-to-many relationship. For example, we can define a foreign key that references another table and then define the relationship. Here we create a `brandId` attribute that will be our foreign key (it will hold an id that references the primary key of the Brand table), and we define a relationship to the `Brand` table through the `brand` attribute: + +```graphql +type Product @table @export { + id: ID @primaryKey + brandId: ID @indexed + brand: Brand @relationship(from: brandId) +} +type Brand @table @export { + id: ID @primaryKey +} +``` + +Once this is defined we can use the `brand` attribute as a [property in our product instances](../../technical-details/reference/resource) and allow for querying by `brand` and selecting brand attributes as returned properties in [query results](../rest). + +Again, the foreign key may be a multi-valued array (array of keys referencing the target table records). For example, if we had a list of features that references a Feature table: + +```graphql +type Product @table @export { + id: ID @primaryKey + featureIds: [ID] @indexed # array of ids + features: [Feature] @relationship(from: featureIds) # array of referenced feature records +} +type Feature @table { + id: ID @primaryKey + ... +} +``` + +#### `@relationship(to: attribute)` + +This defines a relationship where the foreign key is defined in the target table and relates to primary key of this table. If the foreign key is single-valued, this establishes a one-to-many relationship with the target table. Note that the target table type must be an array element type (like `[Table]`). The foreign key may also be a multi-valued array, in which case this will be a many-to-many relationship. For example, we can define on a reciprocal relationship, from the example above, adding a relationship from brand back to product. Here we use continue to use the `brandId` attribute from the `Product` schema, and we define a relationship to the `Product` table through the `products` attribute: + +```graphql +type Brand @table @export { + id: ID @primaryKey + name: String + products: [Product] @relationship(to: brandId) +} +``` + +Once this is defined we can use the `products` attribute as a property in our brand instances and allow for querying by `products` and selecting product attributes as returned properties in query results. + +Note that schemas can also reference themselves with relationships, allowing records to define relationships like parent-child relationships between records in the same table. Also note, that for a many-to-many relationship, you must not combine the `to` and `from` property in the same relationship directive. + +### Computed Properties: `@computed` + +The `@computed` directive specifies that a field is computed based on other fields in the record. This is useful for creating derived fields that are not stored in the database, but are computed when specific record fields is queried/accessed. The `@computed` directive must be used in combination with a field that is a function that computes the value of the field. For example: + +```graphql +type Product @table { + id: ID @primaryKey + price: Float + taxRate: Float + totalPrice: Float @computed(from: "price + (price * taxRate)") +} +``` + +The `from` argument specifies the expression that computes the value of the field. The expression can reference other fields in the record. The expression is evaluated when the record is queried or indexed. + +The `computed` directive may also be defined in a JavaScript module, which is useful for more complex computations. You can specify a computed attribute, and then define the function with the `setComputedAttribute` method. For example: + +```graphql +type Product @table { +... + totalPrice: Float @computed +} +``` + +```javascript +tables.Product.setComputedAttribute('totalPrice', (record) => { + return record.price + (record.price * record.taxRate); +}); +``` + +Computed properties may also be indexed, which provides a powerful mechanism for creating indexes on derived fields with custom querying capabilities. This can provide a mechanism for composite indexes, custom full-text indexing, vector indexing, or other custom indexing strategies. A computed property can be indexed by adding the `@indexed` directive to the computed property. When using a JavaScript module for a computed property that is indexed, it is highly recommended that you specify a `version` argument to ensure that the computed attribute is re-evaluated when the function is updated. For example: + +```graphql +type Product @table { +... + totalPrice: Float @computed(version: 1) @indexed +} +``` + +If you were to update the `setComputedAttribute` function for the `totalPrice` attribute, to use a new formula, you must increment the `version` argument to ensure that the computed attribute is re-indexed (note that on a large database, re-indexing may be a lengthy operation). Failing to increment the `version` argument with a modified function can result in an inconsistent index. The computed function must be deterministic, and should not have side effects, as it may be re-evaluated multiple times during indexing. + +Note that computed properties will not be included by default in a query result, you must explicitly include them in query results using the `select` query function. + +Another example of using a computed custom index, is that we could index all the comma-separated words in a `tags` property by doing (similar techniques are used for full-text indexing): + +```graphql +type Product @table { + id: ID @primaryKey + tags: String # comma delimited set of tags + tagsSeparated: String[] @computed(from: "tags.split(/\\s*,\\s*/)") @indexed # split and index the tags +} +``` + +For more in-depth information on computed properties, visit our blog [here](https:/www.harpersystems.dev/development/tutorials/how-to-create-custom-indexes-with-computed-properties) + +### Field Directives + +The field directives can be used for information about each attribute in table type definition. + +#### `@primaryKey` + +The `@primaryKey` directive specifies that an attribute is the primary key for a table. These must be unique and when records are created, this will be auto-generated if no primary key is provided. When a primary key is auto-generated, it will be a UUID (as a string) if the primary key type is `String` or `ID`. If the primary key type is `Int`, `Long`, or `Any`, then the primary key will be an auto-incremented number. Using numeric primary keys is more efficient than using UUIDs. Note that if the type is `Int`, the primary key will be limited to 32-bit, which can be limiting and problematic for large tables. It is recommended that if you will be relying on auto-generated keys, that you use a primary key type of `Long` or `Any` (the latter will allow you to also use strings as primary keys). + +#### `@indexed` + +The `@indexed` directive specifies that an attribute should be indexed. This is necessary if you want to execute queries using this attribute (whether that is through RESTful query parameters, SQL, or NoSQL operations). + +#### `@createdTime` + +The `@createdTime` directive indicates that this property should be assigned a timestamp of the creation time of the record (in epoch milliseconds). + +#### `@updatedTime` + +The `@updatedTime` directive indicates that this property should be assigned a timestamp of each updated time of the record (in epoch milliseconds). + +#### `@sealed` + +The `@sealed` directive specifies that no additional properties should be allowed on records besides though specified in the type itself + +### Defined vs Dynamic Schemas + +If you do not define a schema for a table and create a table through the operations API (without specifying attributes) or studio, such a table will not have a defined schema and will follow the behavior of a ["dynamic-schema" table](../../technical-details/reference/dynamic-schema). It is generally best-practice to define schemas for your tables to ensure predictable, consistent structures with data integrity. + +### Field Types + +Harper supports the following field types in addition to user defined (object) types: + +* `String`: String/text. +* `Int`: A 32-bit signed integer (from -2147483648 to 2147483647). +* `Long`: A 54-bit signed integer (from -9007199254740992 to 9007199254740992). +* `Float`: Any number (any number that can be represented as a [64-bit double precision floating point number](https:/en.wikipedia.org/wiki/Double-precision_floating-point_format). Note that all numbers are stored in the most compact representation available). +* `BigInt`: Any integer (negative or positive) with less than 300 digits. (Note that `BigInt` is a distinct and separate type from standard numbers in JavaScript, so custom code should handle this type appropriately.) +* `Boolean`: true or false. +* `ID`: A string (but indicates it is not intended to be human readable). +* `Any`: Any primitive, object, or array is allowed. +* `Date`: A Date object. +* `Bytes`: Binary data (as a Buffer or Uint8Array). + +#### Renaming Tables + +It is important to note that Harper does not currently support renaming tables. If you change the name of a table in your schema definition, this will result in the creation of a new, empty table. + +### OpenAPI Specification + +_The_ [_OpenAPI Specification_](https:/spec.openapis.org/oas/v3.1.0) _defines a standard, programming language-agnostic interface description for HTTP APIs, which allows both humans and computers to discover and understand the capabilities of a service without requiring access to source code, additional documentation, or inspection of network traffic._ + +If a set of endpoints are configured through a Harper GraphQL schema, those endpoints can be described by using a default REST endpoint called `GET /openapi`. + +_Note: The `/openapi` endpoint should only be used as a starting guide, it may not cover all the elements of an endpoint._ diff --git a/site/versioned_docs/version-4.4/developers/applications/example-projects.md b/site/versioned_docs/version-4.4/developers/applications/example-projects.md new file mode 100644 index 00000000..466ce267 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/applications/example-projects.md @@ -0,0 +1,37 @@ +--- +title: Example Projects +--- + +# Example Projects + +**Library of example Harper applications and components:** + +* [Authorization in Harper using Okta Customer Identity Cloud](https:/www.harperdb.io/post/authorization-in-harperdb-using-okta-customer-identity-cloud), by Yitaek Hwang + +* [How to Speed Up your Applications by Caching at the Edge with Harper](https:/dev.to/doabledanny/how-to-speed-up-your-applications-by-caching-at-the-edge-with-harperdb-3o2l), by Danny Adams + +* [OAuth Authentication in Harper using Auth0 & Node.js](https:/www.harperdb.io/post/oauth-authentication-in-harperdb-using-auth0-and-node-js), by Lucas Santos + +* [How To Create a CRUD API with Next.js & Harper Custom Functions](https:/www.harperdb.io/post/create-a-crud-api-w-next-js-harperdb), by Colby Fayock + +* [Build a Dynamic REST API with Custom Functions](https:/harperdb.io/blog/build-a-dynamic-rest-api-with-custom-functions/), by Terra Roush + +* [How to use Harper Custom Functions to Build your Entire Backend](https:/dev.to/andrewbaisden/how-to-use-harperdb-custom-functions-to-build-your-entire-backend-a2m), by Andrew Baisden + +* [Using TensorFlowJS & Harper Custom Functions for Machine Learning](https:/harperdb.io/blog/using-tensorflowjs-harperdb-for-machine-learning/), by Kevin Ashcraft + +* [Build & Deploy a Fitness App with Python & Harper](https:/www.youtube.com/watch?v=KMkmA4i2FQc), by Patrick Löber + +* [Create a Discord Slash Bot using Harper Custom Functions](https:/geekysrm.hashnode.dev/discord-slash-bot-with-harperdb-custom-functions), by Soumya Ranjan Mohanty + +* [How I used Harper Custom Functions to Build a Web App for my Newsletter](https:/blog.hrithwik.me/how-i-used-harperdb-custom-functions-to-build-a-web-app-for-my-newsletter), by Hrithwik Bharadwaj + +* [How I used Harper Custom Functions and Recharts to create Dashboard](https:/blog.greenroots.info/how-to-create-dashboard-with-harperdb-custom-functions-and-recharts), by Tapas Adhikary + +* [How To Use Harper Custom Functions With Your React App](https:/dev.to/tyaga001/how-to-use-harperdb-custom-functions-with-your-react-app-2c43), by Ankur Tyagi + +* [Build a Web App Using Harper’s Custom Functions](https:/www.youtube.com/watch?v=rz6prItVJZU), livestream by Jaxon Repp + +* [How to Web Scrape Using Python, Snscrape & Custom Functions](https:/hackernoon.com/how-to-web-scrape-using-python-snscrape-and-harperdb), by Davis David + +* [What’s the Big Deal w/ Custom Functions](https:/rss.com/podcasts/harperdb-select-star/278933/), Select* Podcast \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/developers/applications/index.md b/site/versioned_docs/version-4.4/developers/applications/index.md new file mode 100644 index 00000000..6c40ca60 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/applications/index.md @@ -0,0 +1,378 @@ +--- +title: Applications +--- + +# Applications + +## Overview of Harper Applications + +Harper is more than a database, it's a distributed clustering platform allowing you to package your schema, endpoints and application logic and deploy them to an entire fleet of Harper instances optimized for on-the-edge scalable data delivery. + +In this guide, we are going to explore the evermore extensible architecture that Harper provides by building a Harper component, a fundamental building-block of the Harper ecosystem. + +When working through this guide, we recommend you use the [Harper Application Template](https:/github.com/HarperDB/application-template) repo as a reference. + +## Understanding the Component Application Architecture + +Harper provides several types of components. Any package that is added to Harper is called a "component", and components are generally categorized as either "applications", which deliver a set of endpoints for users, or "extensions", which are building blocks for features like authentication, additional protocols, and connectors that can be used by other components. Components can be added to the `hdb/components` directory and will be loaded by Harper when it starts. Components that are remotely deployed to Harper (through the studio or the operation API) are installed into the `hdb/node_modules` directory. Using `harperdb run .` or `harperdb dev .` allows us to specifically load a certain application in addition to any that have been manually added to `hdb/components` or installed (in `hdb/node_modules`). + +```mermaid +flowchart LR + Client(Client)-->Endpoints + Client(Client)-->HTTP + Client(Client)-->Extensions + subgraph Harper + direction TB + Applications(Applications)-- "Schemas" --> Tables[(Tables)] + Applications-->Endpoints[/Custom Endpoints/] + Applications-->Extensions + Endpoints-->Tables + HTTP[/REST/HTTP/]-->Tables + Extensions[/Extensions/]-->Tables + end +``` + +## Getting up and Running + +### Pre-Requisites + +We assume you are running Harper version 4.2 or greater, which supports Harper Application architecture (in previous versions, this is 'custom functions'). + +### Scaffolding our Application Directory + +Let's create and initialize a new directory for our application. It is recommended that you start by using the [Harper application template](https:/github.com/HarperDB/application-template). Assuming you have `git` installed, you can create your project directory by cloning: + +```shell +> git clone https:/github.com/HarperDB/application-template my-app +> cd my-app +``` + +
+ +You can also start with an empty application directory if you'd prefer. + +To create your own application from scratch, you'll may want to initialize it as an npm package with the \`type\` field set to \`module\` in the \`package.json\` so that you can use the EcmaScript module syntax used in this tutorial: + +```shell +> mkdir my-app +> cd my-app +> npm init -y esnext +``` + +
+ +
+ +If you want to version control your application code, you can adjust the remote URL to your repository. + +Here's an example for a github repo: + +```shell +> git remote set-url origin git@github.com:// +``` + +Locally developing your application and then committing your app to a source control is a great way to manage your code and configuration, and then you can [directly deploy from your repository](./#deploying-your-application). + +
+ +## Creating our first Table + +The core of a Harper application is the database, so let's create a database table! + +A quick and expressive way to define a table is through a [GraphQL Schema](https:/graphql.org/learn/schema). Using your editor of choice, edit the file named `schema.graphql` in the root of the application directory, `my-app`, that we created above. To create a table, we will need to add a `type` of `@table` named `Dog` (and you can remove the example table in the template): + +```graphql +type Dog @table { + # properties will go here soon +} +``` + +And then we'll add a primary key named `id` of type `ID`: + +_(Note: A GraphQL schema is a fast method to define tables in Harper, but you are by no means required to use GraphQL to query your application, nor should you necessarily do so)_ + +```graphql +type Dog @table { + id: ID @primaryKey +} +``` + +Now we tell Harper to run this as an application: + +```shell +> harperdb dev . # tell Harper cli to run current directory as an application in dev mode +``` + +Harper will now create the `Dog` table and its `id` attribute we just defined. Not only is this an easy way to get create a table, but this schema is included in our application, which will ensure that this table exists wherever we deploy this application (to any Harper instance). + +## Adding Attributes to our Table + +Next, let's expand our `Dog` table by adding additional typed attributes for dog `name`, `breed` and `age`. + +```graphql +type Dog @table { + id: ID @primaryKey + name: String + breed: String + age: Int +} +``` + +This will ensure that new records must have these properties with these types. + +Because we ran `harperdb dev .` earlier (dev mode), Harper is now monitoring the contents of our application directory for changes and reloading when they occur. This means that once we save our schema file with these new attributes, Harper will automatically reload our application, read `my-app/schema.graphql` and update the `Dog` table and attributes we just defined. The dev mode will also ensure that any logging or errors are immediately displayed in the console (rather only in the log file). + +As a NoSQL database, Harper supports heterogeneous records (also referred to as documents), so you can freely specify additional properties on any record. If you do want to restrict the records to only defined properties, you can always do that by adding the `sealed` directive: + +```graphql +type Dog @table @sealed { + id: ID @primaryKey + name: String + breed: String + age: Int + tricks: [String] +} +``` + +If you are using Harper Studio, we can now add JSON-formatted records to this new table in the studio or upload data as CSV from a local file or URL. A third, more advanced, way to add data to your database is to use the [operations API](../operations-api/), which provides full administrative control over your new Harper instance and tables. + +## Adding an Endpoint + +Now that we have a running application with a database (with data if you imported any data), let's make this data accessible from a RESTful URL by adding an endpoint. To do this, we simply add the `@export` directive to our `Dog` table: + +```graphql +type Dog @table @export { + id: ID @primaryKey + name: String + breed: String + age: Int + tricks: [String] +} +``` + +By default the application HTTP server port is `9926` (this can be [configured here](../../deployments/configuration#http)), so the local URL would be [http:/localhost:9926/Dog/](http:/localhost:9926/Dog/) with a full REST API. We can PUT or POST data into this table using this new path, and then GET or DELETE from it as well (you can even view data directly from the browser). If you have not added any records yet, we could use a PUT or POST to add a record. PUT is appropriate if you know the id, and POST can be used to assign an id: + +```http +POST /Dog/ +Content-Type: application/json + +{ + "name": "Harper", + "breed": "Labrador", + "age": 3, + "tricks": ["sits"] +} +``` + +With this a record will be created and the auto-assigned id will be available through the `Location` header. If you added a record, you can visit the path `/Dog/` to view that record. Alternately, the curl command `curl http:/localhost:9926/Dog/` will achieve the same thing. + +## Authenticating Endpoints + +These endpoints automatically support `Basic`, `Cookie`, and `JWT` authentication methods. See the documentation on [security](../security/) for more information on different levels of access. + +By default, Harper also automatically authorizes all requests from loopback IP addresses (from the same computer) as the superuser, to make it simple to interact for local development. If you want to test authentication/authorization, or enforce stricter security, you may want to disable the [`authentication.authorizeLocal` setting](../../deployments/configuration#authentication). + +### Content Negotiation + +These endpoints support various content types, including `JSON`, `CBOR`, `MessagePack` and `CSV`. Simply include an `Accept` header in your requests with the preferred content type. We recommend `CBOR` as a compact, efficient encoding with rich data types, but `JSON` is familiar and great for web application development, and `CSV` can be useful for exporting data to spreadsheets or other processing. + +Harper works with other important standard HTTP headers as well, and these endpoints are even capable of caching interaction: + +``` +Authorization: Basic +Accept: application/cbor +If-None-Match: "etag-id" # browsers can automatically provide this +``` + +## Querying + +Querying your application database is straightforward and easy, as tables exported with the `@export` directive are automatically exposed via [REST endpoints](../rest). Simple queries can be crafted through [URL query parameters](https:/en.wikipedia.org/wiki/Query_string). + +In order to maintain reasonable query speed on a database as it grows in size, it is critical to select and establish the proper indexes. So, before we add the `@export` declaration to our `Dog` table and begin querying it, let's take a moment to target some table properties for indexing. We'll use `name` and `breed` as indexed table properties on our `Dog` table. All we need to do to accomplish this is tag these properties with the `@indexed` directive: + +```graphql +type Dog @table { + id: ID @primaryKey + name: String @indexed + breed: String @indexed + owner: String + age: Int + tricks: [String] +} +``` + +And finally, we'll add the `@export` directive to expose the table as a RESTful endpoint + +```graphql +type Dog @table @export { + id: ID @primaryKey + name: String @indexed + breed: String @indexed + owner: String + age: Int + tricks: [String] +} +``` + +Now we can start querying. Again, we just simply access the endpoint with query parameters (basic GET requests), like: + +``` +http:/localhost:9926/Dog/?name=Harper +http:/localhost:9926/Dog/?breed=Labrador +http:/localhost:9926/Dog/?breed=Husky&name=Balto&select=id,name,breed +``` + +Congratulations, you now have created a secure database application backend with a table, a well-defined structure, access controls, and a functional REST endpoint with query capabilities! See the [REST documentation for more information on HTTP access](../rest) and see the [Schema reference](./defining-schemas) for more options for defining schemas. + +> Additionally, you may now use GraphQL (over HTTP) to create queries. See the documentation for that new feature [here](../../technical-details/reference/graphql). + +## Deploying your Application + +This guide assumes that you're building a Harper application locally. If you have a cloud instance available, you can deploy it by doing the following: + +* Commit and push your application component directory code (i.e., the `my-app` directory) to a Github repo. In this tutorial we started with a clone of the application-template. To commit and push to your own repository, change the origin to your repo: `git remote set-url origin git@github.com:your-account/your-repo.git` +* Go to the applications section of your target cloud instance in the Harper Studio. +* In the left-hand menu of the applications IDE, click 'deploy' and specify a package location reference that follows the [npm package specification](https:/docs.npmjs.com/cli/v8/using-npm/package-spec) (i.e., a string like `HarperDB/Application-Template` or a URL like `https:/github.com/HarperDB/application-template`, for example, that npm knows how to install). + +You can also deploy your application from your repository by directly using the [`deploy_component` operation](../operations-api/components#deploy-component). + +Once you have deployed your application to a Harper cloud instance, you can start scaling your application by adding additional instances in other regions. + +With the help of a global traffic manager/load balancer configured, you can distribute incoming requests to the appropriate server. You can deploy and re-deploy your application to all the nodes in your mesh. + +Now, with an application that you can deploy, update, and re-deploy, you have an application that is horizontally and globally scalable! + +## Custom Functionality with JavaScript + +So far we have built an application entirely through schema configuration. However, if your application requires more custom functionality, you will probably want to employ your own JavaScript modules to implement more specific features and interactions. This gives you tremendous flexibility and control over how data is accessed and modified in Harper. Let's take a look at how we can use JavaScript to extend and define "resources" for custom functionality. Let's add a property to the dog records when they are returned, that includes their age in human years. In Harper, data is accessed through our [Resource API](../../technical-details/reference/resource), a standard interface to access data sources, tables, and make them available to endpoints. Database tables are `Resource` classes, and so extending the function of a table is as simple as extending their class. + +To define custom (JavaScript) resources as endpoints, we need to create a `resources.js` module (this goes in the root of your application folder). And then endpoints can be defined with Resource classes that `export`ed. This can be done in addition to, or in lieu of the `@export`ed types in the schema.graphql. If you are exporting and extending a table you defined in the schema make sure you remove the `@export` from the schema so that don't export the original table or resource to the same endpoint/path you are exporting with a class. Resource classes have methods that correspond to standard HTTP/REST methods, like `get`, `post`, `patch`, and `put` to implement specific handling for any of these methods (for tables they all have default implementations). To do this, we get the `Dog` class from the defined tables, extend it, and export it: + +```javascript +/ resources.js: +const { Dog } = tables; / get the Dog table from the Harper provided set of tables (in the default database) + +export class DogWithHumanAge extends Dog { + get(query) { + this.humanAge = 15 + this.age * 5; / silly calculation of human age equivalent + return super.get(query); + } +} +``` + +Here we exported the `DogWithHumanAge` class (exported with the same name), which directly maps to the endpoint path. Therefore, now we have a `/DogWithHumanAge/` endpoint based on this class, just like the direct table interface that was exported as `/Dog/`, but the new endpoint will return objects with the computed `humanAge` property. Resource classes provide getters/setters for every defined attribute so that accessing instance properties like `age`, will get the value from the underlying record. The instance holds information about the primary key of the record so updates and actions can be applied to the correct record. And changing or assigning new properties can be saved or included in the resource as it returned and serialized. The `return super.get(query)` call at the end allows for any query parameters to be applied to the resource, such as selecting individual properties (with a [`select` query parameter](../rest#select-properties)). + +Often we may want to incorporate data from other tables or data sources in your data models. Next, let's say that we want a `Breed` table that holds detailed information about each breed, and we want to add that information to the returned dog object. We might define the Breed table as (back in schema.graphql): + +```graphql +type Breed @table { + name: String @primaryKey + description: String @indexed + lifespan: Int + averageWeight: Float +} +``` + +And next we will use this table in our `get()` method. We will call the new table's (static) `get()` method to retrieve a breed by id. To do this correctly, we access the table using our current context by passing in `this` as the second argument. This is important because it ensures that we are accessing the data atomically, in a consistent snapshot across tables. This provides automatically tracking of most recently updated timestamps across resources for caching purposes. This allows for sharing of contextual metadata (like user who requested the data), and ensure transactional atomicity for any writes (not needed in this get operation, but important for other operations). The resource methods are automatically wrapped with a transaction (will commit/finish when the method completes), and this allows us to fully utilize multiple resources in our current transaction. With our own snapshot of the database for the Dog and Breed table we can then access data like this: + +```javascript +/resource.js: +const { Dog, Breed } = tables; / get the Breed table too +export class DogWithBreed extends Dog { + async get(query) { + let breedDescription = await Breed.get(this.breed, this); + this.breedDescription = breedDescription; + return super.get(query); + } +} +``` + +The call to `Breed.get` will return an instance of the `Breed` resource class, which holds the record specified the provided id/primary key. Like the `Dog` instance, we can access or change properties on the Breed instance. + +Here we have focused on customizing how we retrieve data, but we may also want to define custom actions for writing data. While HTTP PUT method has a specific semantic definition (replace current record), a common method for custom actions is through the HTTP POST method. the POST method has much more open-ended semantics and is a good choice for custom actions. POST requests are handled by our Resource's post() method. Let's say that we want to define a POST handler that adds a new trick to the `tricks` array to a specific instance. We might do it like this, and specify an action to be able to differentiate actions: + +```javascript +export class CustomDog extends Dog { + async post(data) { + if (data.action === 'add-trick') + this.tricks.push(data.trick); + } +} +``` + +And a POST request to /CustomDog/ would call this `post` method. The Resource class then automatically tracks changes you make to your resource instances and saves those changes when this transaction is committed (again these methods are automatically wrapped in a transaction and committed once the request handler is finished). So when you push data on to the `tricks` array, this will be recorded and persisted when this method finishes and before sending a response to the client. + +The `post` method automatically marks the current instance as being update. However, you can also explicitly specify that you are changing a resource by calling the `update()` method. If you want to modify a resource instance that you retrieved through a `get()` call (like `Breed.get()` call above), you can call its `update()` method to ensure changes are saved (and will be committed in the current transaction). + +We can also define custom authorization capabilities. For example, we might want to specify that only the owner of a dog can make updates to a dog. We could add logic to our `post` method or `put` method to do this, but we may want to separate the logic so these methods can be called separately without authorization checks. The [Resource API](../../technical-details/reference/resource) defines `allowRead`, `allowUpdate`, `allowCreate`, and `allowDelete`, or to easily configure individual capabilities. For example, we might do this: + +```javascript +export class CustomDog extends Dog { + allowUpdate(user) { + return this.owner === user.username; + } +} +``` + +Any methods that are not defined will fall back to Harper's default authorization procedure based on users' roles. If you are using/extending a table, this is based on Harper's [role based access](../security/users-and-roles). If you are extending the base `Resource` class, the default access requires super user permission. + +You can also use the `default` export to define the root path resource handler. For example: + +```javascript +/ resources.json +export default class CustomDog extends Dog { + ... +``` + +This will allow requests to url like / to be directly resolved to this resource. + +## Define Custom Data Sources + +We can also directly implement the Resource class and use it to create new data sources from scratch that can be used as endpoints. Custom resources can also be used as caching sources. Let's say that we defined a `Breed` table that was a cache of information about breeds from another source. We could implement a caching table like: + +```javascript +const { Breed } = tables; / our Breed table +class BreedSource extends Resource { / define a data source + async get() { + return (await fetch(`http:/best-dog-site.com/${this.getId()}`)).json(); + } +} +/ define that our breed table is a cache of data from the data source above, with a specified expiration +Breed.sourcedFrom(BreedSource, { expiration: 3600 }); +``` + +The [caching documentation](./caching) provides much more information on how to use Harper's powerful caching capabilities and set up data sources. + +Harper provides a powerful JavaScript API with significant capabilities that go well beyond a "getting started" guide. See our documentation for more information on using the [`globals`](../../technical-details/reference/globals) and the [Resource interface](../../technical-details/reference/resource). + +## Configuring Applications/Components + +Every application or component can define their own configuration in a `config.yaml`. If you are using the application template, you will have a [default configuration in this config file](https:/github.com/HarperDB/application-template/blob/main/config.yaml) (which is default configuration if no config file is provided). Within the config file, you can configure how different files and resources are loaded and handled. The default configuration file itself is documented with directions. Each entry can specify any `files` that the loader will handle, and can also optionally specify what, if any, URL `path`s it will handle. A path of `/` means that the root URLs are handled by the loader, and a path of `.` indicates that the URLs that start with this application's name are handled. + +This config file allows you define a location for static files, as well (that are directly delivered as-is for incoming HTTP requests). + +Each configuration entry can have the following properties, in addition to properties that may be specific to the individual component: + +* `files`: This specifies the set of files that should be handled the component. This is a glob pattern, so a set of files can be specified like "directory/**". +* `path`: This is the URL path that is handled by this component. +* `root`: This specifies the root directory for mapping file paths to the URLs. For example, if you want all the files in `web/**` to be available in the root URL path via the static handler, you could specify a root of `web`, to indicate that the web directory maps to the root URL path. +* `package`: This is used to specify that this component is a third party package, and can be loaded from the specified package reference (which can be an NPM package, Github reference, URL, etc.). + +## Define Fastify Routes + +Exporting resource will generate full RESTful endpoints. But, you may prefer to define endpoints through a framework. Harper includes a resource plugin for defining routes with the Fastify web framework. Fastify is a full-featured framework with many plugins, that provides sophisticated route definition capabilities. + +By default, applications are configured to load any modules in the `routes` directory (matching `routes/*.js`) with Fastify's autoloader, which will allow these modules to export a function to define fastify routes. See the [defining routes documentation](./define-routes) for more information on how to create Fastify routes. + +However, Fastify is not as fast as Harper's RESTful endpoints (about 10%-20% slower/more-overhead), nor does it automate the generation of a full uniform interface with correct RESTful header interactions (for caching control), so generally the Harper's REST interface is recommended for optimum performance and ease of use. + +## Restarting Your Instance + +Generally, Harper will auto-detect when files change and auto-restart the appropriate threads. However, if there are changes that aren't detected, you may manually restart, with the `restart_service` operation: + +```json +{ + "operation": "restart_service", + "service": "http_workers" +} +``` diff --git a/site/versioned_docs/version-4.4/developers/applications/web-applications.md b/site/versioned_docs/version-4.4/developers/applications/web-applications.md new file mode 100644 index 00000000..d9892b9a --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/applications/web-applications.md @@ -0,0 +1,63 @@ +--- +title: Web Applications on Harper +--- + +# Web Applications on Harper + +Harper is an efficient, capable, and robust platform for developing web applications, with numerous capabilities designed +specifically for optimized web application delivery. In addition, there are a number of tools and frameworks that can be used +with Harper to create web applications with standard best-practice design and development patterns. Running these frameworks +on Harper can unlock tremendous scalability and performance benefits by leveraging Harper's built-in multi-threading, +caching, and distributed design. + +Harper's unique ability to run JavaScript code directly on the server side, combined with its built-in database for data storage, querying, and caching +allows you to create full-featured web applications with a single platform. This eliminates the overhead of legacy solutions that +require separate application servers, databases, and caching layers, and their requisite communication overhead and latency, while +allowing the full stack to deployed to distributed locations with full local response handling, providing an incredibly low latency web experience. + +## Web Application Frameworks + +With built-in caching mechanisms, and an easy-to-use JavaScript API for interacting with data, creating full-featured applications +using popular frameworks is a simple and straightforward process. + +Get started today with one of our examples: + +- [Next.js](https:/github.com/HarperDB/nextjs-example) +- [React SSR](https:/github.com/HarperDB/react-ssr-example) +- [Vue SSR](https:/github.com/HarperDB/vue-ssr-example) +- [Svelte SSR](https:/github.com/HarperDB/svelte-ssr-example) +- [Solid SSR](https:/github.com/HarperDB/solid-ssr-example) + +## Cookie Support + +Harper includes support for authenticated sessions using cookies. This allows you to create secure, authenticated web applications +using best-practice security patterns, allowing users to login and maintain a session without any credential storage on the client side +that can be compromised. A login endpoint can be defined by exporting a resource and calling the `login` method on the request object. For example, this could be a login endpoint in your resources.js file: + +```javascript +export class Login extends Resource { + async post(data) { + const { username, password } = data; + await request.login(username, password); + return { message: 'Logged in!' }; + } +} +``` + +This endpoint can be called from the client side using a standard fetch request, a cookie will be returned, and the session will be maintained by Harper. +This allows web applications to directly interact with Harper and database resources, without needing to go through extra layers of authentication handling. + +## Browser Caching Negotiation + +Browsers support caching negotiation with revalidation, which allows requests for locally cached data to be sent to servers with a tag or timestamp. Harper REST functionality can fully interact with these headers, and return `304 Not Modified` response based on prior `Etag` sent in headers. It is highly recommended that you utilize the [REST interface](../rest) for accessing tables, as it facilitates this downstream browser caching. Timestamps are recorded with all records and are then returned [as the `ETag` in the response](../rest#cachingconditional-requests). Utilizing this browser caching can greatly reduce the load on your server and improve the performance of your web application by being able to instantly use locally cached data after revalidation from the server. + +## Built-in Cross-Origin Resource Sharing (CORS) + +Harper includes built-in support for Cross-Origin Resource Sharing (CORS), which allows you to define which domains are allowed to access your Harper instance. This is a critical security feature for web applications, as it prevents unauthorized access to your data from other domains, while allowing cross-domain access from known hosts. You can define the allowed domains in your [Harper configuration file](../../deployments/configuration#http), and Harper will automatically handle the CORS headers for you. + +## More Resources + +Make sure to check out our developer videos too: + +- [Next.js on Harper | Step-by-Step Guide for Next Level Next.js Performance](https:/youtu.be/GqLEwteFJYY) +- [Server-side Rendering (SSR) with Multi-Tier Cache Demo](https:/youtu.be/L-tnBNhO9Fc) \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/developers/clustering/certificate-management.md b/site/versioned_docs/version-4.4/developers/clustering/certificate-management.md new file mode 100644 index 00000000..11ff0a6c --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/clustering/certificate-management.md @@ -0,0 +1,70 @@ +--- +title: Certificate Management +--- + +# Certificate Management + +## Development + +Out of the box Harper generates certificates that are used when Harper nodes are clustered together to securely share data between nodes. These certificates are meant for testing and development purposes. Because these certificates do not have Common Names (CNs) that will match the Fully Qualified Domain Name (FQDN) of the Harper node, the following settings (see the full [configuration file](../../deployments/configuration) docs for more details) are defaulted & recommended for ease of development: + +``` +clustering: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem + insecure: true + verify: true +``` + +The certificates that Harper generates are stored in your `/keys/`. + +`insecure` is set to `true` to accept the certificate CN mismatch due to development certificates. + +`verify` is set to `true` to enable mutual TLS between the nodes. + +## Production + +In a production environment, we recommend using your own certificate authority (CA), or a public CA such as LetsEncrypt to generate certs for your Harper cluster. This will let you generate certificates with CNs that match the FQDN of your nodes. + +Once you generate new certificates, to make Harper start using them you can either replace the generated files with your own, or update the configuration to point to your new certificates, and then restart Harper. + +Since these new certificates can be issued with correct CNs, you should set `insecure` to `false` so that nodes will do full validation of the certificates of the other nodes. + +### Certificate Requirements + +* Certificates must have an `Extended Key Usage` that defines both `TLS Web Server Authentication` and `TLS Web Client Authentication` as these certificates will be used to accept connections from other Harper nodes and to make requests to other Harper nodes. Example: + +``` +X509v3 Key Usage: critical + Digital Signature, Key Encipherment +X509v3 Extended Key Usage: + TLS Web Server Authentication, TLS Web Client Authentication +``` + +* If you are using an intermediate CA to issue the certificates, the entire certificate chain (to the root CA) must be included in the `certificateAuthority` file. +* If your certificates expire you will need a way to issue new certificates to the nodes and then restart Harper. If you are using a public CA such as LetsEncrypt, a tool like `certbot` can be used to renew certificates. + +### Certificate Troubleshooting + +If you are having TLS issues with clustering, use the following steps to verify that your certificates are valid. + +1. Make sure certificates can be parsed and that you can view the contents: + +``` +openssl x509 -in .pem -noout -text` +``` + +1. Make sure the certificate validates with the CA: + +``` +openssl verify -CAfile .pem .pem` +``` + +1. Make sure the certificate and private key are a valid pair by verifying that the output of the following commands match: + +``` +openssl rsa -modulus -noout -in .pem | openssl md5 +openssl x509 -modulus -noout -in .pem | openssl md5 +``` diff --git a/site/versioned_docs/version-4.4/developers/clustering/creating-a-cluster-user.md b/site/versioned_docs/version-4.4/developers/clustering/creating-a-cluster-user.md new file mode 100644 index 00000000..5569ff04 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/clustering/creating-a-cluster-user.md @@ -0,0 +1,59 @@ +--- +title: Creating a Cluster User +--- + +# Creating a Cluster User + +Inter-node authentication takes place via Harper users. There is a special role type called `cluster_user` that exists by default and limits the user to only clustering functionality. + +A `cluster_user` must be created and added to the `harperdb-config.yaml` file for clustering to be enabled. + +All nodes that are intended to be clustered together need to share the same `cluster_user` credentials (i.e. username and password). + +There are multiple ways a `cluster_user` can be created, they are: + +1. Through the operations API by calling `add_user` + +```json +{ + "operation": "add_user", + "role": "cluster_user", + "username": "cluster_account", + "password": "letsCluster123!", + "active": true +} +``` + +When using the API to create a cluster user the `harperdb-config.yaml` file must be updated with the username of the new cluster user. + +This can be done through the API by calling `set_configuration` or by editing the `harperdb-config.yaml` file. + +```json +{ + "operation": "set_configuration", + "clustering_user": "cluster_account" +} +``` + +In the `harperdb-config.yaml` file under the top-level `clustering` element there will be a user element. Set this to the name of the cluster user. + +```yaml +clustering: + user: cluster_account +``` + +_Note: When making any changes to the `harperdb-config.yaml` file, Harper must be restarted for the changes to take effect._ + +1. Upon installation using **command line variables**. This will automatically set the user in the `harperdb-config.yaml` file. + +_Note: Using command line or environment variables for setting the cluster user only works on install._ + +``` +harperdb install --CLUSTERING_USER cluster_account --CLUSTERING_PASSWORD letsCluster123! +``` + +1. Upon installation using **environment variables**. This will automatically set the user in the `harperdb-config.yaml` file. + +``` +CLUSTERING_USER=cluster_account CLUSTERING_PASSWORD=letsCluster123 +``` diff --git a/site/versioned_docs/version-4.4/developers/clustering/enabling-clustering.md b/site/versioned_docs/version-4.4/developers/clustering/enabling-clustering.md new file mode 100644 index 00000000..2b80d4e7 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/clustering/enabling-clustering.md @@ -0,0 +1,49 @@ +--- +title: Enabling Clustering +--- + +# Enabling Clustering + +Clustering does not run by default; it needs to be enabled. + +To enable clustering the `clustering.enabled` configuration element in the `harperdb-config.yaml` file must be set to `true`. + +There are multiple ways to update this element, they are: + +1. Directly editing the `harperdb-config.yaml` file and setting enabled to `true` + +```yaml +clustering: + enabled: true +``` + +_Note: When making any changes to the `harperdb-config.yaml` file Harper must be restarted for the changes to take effect._ + +1. Calling `set_configuration` through the operations API + +```json +{ + "operation": "set_configuration", + "clustering_enabled": true +} +``` + +_Note: When making any changes to Harper configuration Harper must be restarted for the changes to take effect._ + +1. Using **command line variables**. + +``` +harperdb --CLUSTERING_ENABLED true +``` + +1. Using **environment variables**. + +``` +CLUSTERING_ENABLED=true +``` + +An efficient way to **install Harper**, **create the cluster user**, **set the node name** and **enable clustering** in one operation is to combine the steps using command line and/or environment variables. Here is an example using command line variables. + +``` +harperdb install --CLUSTERING_ENABLED true --CLUSTERING_NODENAME Node1 --CLUSTERING_USER cluster_account --CLUSTERING_PASSWORD letsCluster123! +``` diff --git a/site/versioned_docs/version-4.4/developers/clustering/establishing-routes.md b/site/versioned_docs/version-4.4/developers/clustering/establishing-routes.md new file mode 100644 index 00000000..894b9e9f --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/clustering/establishing-routes.md @@ -0,0 +1,73 @@ +--- +title: Establishing Routes +--- + +# Establishing Routes + +A route is a connection between two nodes. It is how the clustering network is established. + +Routes do not need to cross connect all nodes in the cluster. You can select a leader node or a few leaders and all nodes connect to them, you can chain, etc… As long as there is one route connecting a node to the cluster all other nodes should be able to reach that node. + +Using routes the clustering servers will create a mesh network between nodes. This mesh network ensures that if a node drops out all other nodes can still communicate with each other. That being said, we recommend designing your routing with failover in mind, this means not storing all your routes on one node but dispersing them throughout the network. + +A simple route example is a two node topology, if Node1 adds a route to connect it to Node2, Node2 does not need to add a route to Node1. That one route configuration is all that’s needed to establish a bidirectional connection between the nodes. + +A route consists of a `port` and a `host`. + +`port` - the clustering port of the remote instance you are creating the connection with. This is going to be the `clustering.hubServer.cluster.network.port` in the Harper configuration on the node you are connecting with. + +`host` - the host of the remote instance you are creating the connection with.This can be an IP address or a URL. + +Routes are set in the `harperdb-config.yaml` file using the `clustering.hubServer.cluster.network.routes` element, which expects an object array, where each object has two properties, `port` and `host`. + +```yaml +clustering: + hubServer: + cluster: + network: + routes: + - host: 3.62.184.22 + port: 9932 + - host: 3.735.184.8 + port: 9932 +``` + +![figure 1](/img/v4.4/clustering/figure1.png) + +This diagram shows one way of using routes to connect a network of nodes. Node2 and Node3 do not reference any routes in their config. Node1 contains routes for Node2 and Node3, which is enough to establish a network between all three nodes. + +There are multiple ways to set routes, they are: + +1. Directly editing the `harperdb-config.yaml` file (refer to code snippet above). +1. Calling `cluster_set_routes` through the API. + +```json +{ + "operation": "cluster_set_routes", + "server": "hub", + "routes":[ {"host": "3.735.184.8", "port": 9932} ] +} +``` + +_Note: When making any changes to Harper configuration Harper must be restarted for the changes to take effect._ + +1. From the command line. + +```bash +--CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES "[{\"host\": \"3.735.184.8\", \"port\": 9932}]" +``` + +1. Using environment variables. + +```bash +CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES=[{"host": "3.735.184.8", "port": 9932}] +``` + +The API also has `cluster_get_routes` for getting all routes in the config and `cluster_delete_routes` for deleting routes. + +```json +{ + "operation": "cluster_delete_routes", + "routes":[ {"host": "3.735.184.8", "port": 9932} ] +} +``` diff --git a/site/versioned_docs/version-4.4/developers/clustering/index.md b/site/versioned_docs/version-4.4/developers/clustering/index.md new file mode 100644 index 00000000..14556f3c --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/clustering/index.md @@ -0,0 +1,31 @@ +--- +title: NATS Clustering +--- + +# NATS Clustering + +Harper 4.0 - 4.3 used a clustering system based on NATS for replication. In 4.4+, Harper has moved to a new native replication system that has better performance, reliability, and data consistency. This document describes the legacy NATS clustering system. Harper clustering is the process of connecting multiple Harper databases together to create a database mesh network that enables users to define data replication patterns. + +Harper’s clustering engine replicates data between instances of Harper using a highly performant, bi-directional pub/sub model on a per-table basis. Data replicates asynchronously with eventual consistency across the cluster following the defined pub/sub configuration. Individual transactions are sent in the order in which they were transacted, once received by the destination instance, they are processed in an ACID-compliant manner. Conflict resolution follows a last writer wins model based on recorded transaction time on the transaction and the timestamp on the record on the node. + +*** + +### Common Use Case + +A common use case is an edge application collecting and analyzing sensor data that creates an alert if a sensor value exceeds a given threshold: + +* The edge application should not be making outbound http requests for security purposes. +* There may not be a reliable network connection. +* Not all sensor data will be sent to the cloud--either because of the unreliable network connection, or maybe it’s just a pain to store it. +* The edge node should be inaccessible from outside the firewall. +* The edge node will send alerts to the cloud with a snippet of sensor data containing the offending sensor readings. + +Harper simplifies the architecture of such an application with its bi-directional, table-level replication: + +* The edge instance subscribes to a “thresholds” table on the cloud instance, so the application only makes localhost calls to get the thresholds. +* The application continually pushes sensor data into a “sensor\_data” table via the localhost API, comparing it to the threshold values as it does so. +* When a threshold violation occurs, the application adds a record to the “alerts” table. +* The application appends to that record array “sensor\_data” entries for the 60 seconds (or minutes, or days) leading up to the threshold violation. +* The edge instance publishes the “alerts” table up to the cloud instance. + +By letting Harper focus on the fault-tolerant logistics of transporting your data, you get to write less code. By moving data only when and where it’s needed, you lower storage and bandwidth costs. And by restricting your app to only making local calls to Harper, you reduce the overall exposure of your application to outside forces. diff --git a/site/versioned_docs/version-4.4/developers/clustering/managing-subscriptions.md b/site/versioned_docs/version-4.4/developers/clustering/managing-subscriptions.md new file mode 100644 index 00000000..8d2cafef --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/clustering/managing-subscriptions.md @@ -0,0 +1,199 @@ +--- +title: Managing subscriptions +--- + +Tables are replicated when the table is designated as replicating and there is subscription between the nodes. +Tables designated as replicating by default, but can be changed by setting `replicate` to `false` in the table definition: +```graphql +type Product @table(replicate: false) { + id: ID! + name: String! +} +``` +Or in your harperdb-config.yaml, you can set the default replication behavior for databases, and indicate which databases +should be replicated by default: + +```yaml +replication: + databases: data +``` +If a table is not in the list of databases to be replicated, it will not be replicated unless the table is specifically set to replicate: + +```graphql +type Product @table(replicate: true) { + id: ID! + name: String! +} +``` + +Reading hdb_nodes (what we do _to_ the node, not what the node does). + +The subscription can be set to publish, subscribe, or both. + + + + +# Managing subscriptions + +Subscriptions can be added, updated, or removed through the API. + +_Note: The databases and tables in the subscription must exist on either the local or the remote node. Any databases or tables that do not exist on one particular node, for example, the local node, will be automatically created on the local node._ + +To add a single node and create one or more subscriptions use `set_node_replication`. + +```json +{ + "operation": "set_node_replication", + "node_name": "Node2", + "subscriptions": [ + { + "database": "data", + "table": "dog", + "publish": false, + "subscribe": true + }, + { + "database": "data", + "table": "chicken", + "publish": true, + "subscribe": true + } + ] +} +``` + +This is an example of adding Node2 to your local node. Subscriptions are created for two tables, dog and chicken. + +To update one or more subscriptions with a single node you can also use `set_node_replication`, however this will behave as a PATCH/upsert, where only the subscription(s) changing will be inserted/update while the others will be left untouched. + +```json +{ + "operation": "set_node_replication", + "node_name": "Node2", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ] +} +``` + +This call will update the subscription with the dog table. Any other subscriptions with Node2 will not change. + +To add or update subscriptions with one or more nodes in one API call use `configure_cluster`. + +```json +{ + "operation": "configure_cluster", + "connections": [ + { + "node_name": "Node2", + "subscriptions": [ + { + "database": "dev", + "table": "chicken", + "publish": false, + "subscribe": true + }, + { + "database": "prod", + "table": "dog", + "publish": true, + "subscribe": true + } + ] + }, + { + "node_name": "Node3", + "subscriptions": [ + { + "database": "dev", + "table": "chicken", + "publish": true, + "subscribe": false + } + ] + } + ] +} +``` + +_Note: `configure_cluster` will override **any and all** existing subscriptions defined on the local node. This means that before going through the connections in the request and adding the subscriptions, it will first go through **all existing subscriptions the local node has** and remove them. To get all existing subscriptions use `cluster_status`._ + +#### Start time + +There is an optional property called `start_time` that can be passed in the subscription. This property accepts an ISO formatted UTC date. + +`start_time` can be used to set from what time you would like to source transactions from a table when creating or updating a subscription. + +```json +{ + "operation": "set_node_replication", + "node_name": "Node2", + "subscriptions": [ + { + "database": "dev", + "table": "dog", + "publish": false, + "subscribe": true, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] +} +``` + +This example will get all transactions on Node2’s dog table starting from `2022-09-02T20:06:35.993Z` and replicate them locally on the dog table. + +If no start time is passed it defaults to the current time. + +_Note: start time utilizes clustering to back source transactions. For this reason it can only source transactions that occurred when clustering was enabled._ + +#### Remove node + +To remove a node and all its subscriptions use `remove_node`. + +```json +{ + "operation":"remove_node", + "node_name":"Node2" +} +``` + +#### Cluster status + +To get the status of all connected nodes and see their subscriptions use `cluster_status`. + +```json +{ + "node_name": "Node1", + "is_enabled": true, + "connections": [ + { + "node_name": "Node2", + "status": "open", + "ports": { + "clustering": 9932, + "operations_api": 9925 + }, + "latency_ms": 65, + "uptime": "11m 19s", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ], + "system_info": { + "hdb_version": "4.0.0", + "node_version": "16.17.1", + "platform": "linux" + } + } + ] +} +``` diff --git a/site/versioned_docs/version-4.4/developers/clustering/naming-a-node.md b/site/versioned_docs/version-4.4/developers/clustering/naming-a-node.md new file mode 100644 index 00000000..67ac2c49 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/clustering/naming-a-node.md @@ -0,0 +1,45 @@ +--- +title: Naming a Node +--- + +# Naming a Node + +Node name is the name given to a node. It is how nodes are identified within the cluster and must be unique to the cluster. + +The name cannot contain any of the following characters: `.,*>` . Dot, comma, asterisk, greater than, or whitespace. + +The name is set in the `harperdb-config.yaml` file using the `clustering.nodeName` configuration element. + +_Note: If you want to change the node name make sure there are no subscriptions in place before doing so. After the name has been changed a full restart is required._ + +There are multiple ways to update this element, they are: + +1. Directly editing the `harperdb-config.yaml` file. + +```yaml +clustering: + nodeName: Node1 +``` + +_Note: When making any changes to the `harperdb-config.yaml` file Harper must be restarted for the changes to take effect._ + +1. Calling `set_configuration` through the operations API + +```json +{ + "operation": "set_configuration", + "clustering_nodeName":"Node1" +} +``` + +1. Using command line variables. + +``` +harperdb --CLUSTERING_NODENAME Node1 +``` + +1. Using environment variables. + +``` +CLUSTERING_NODENAME=Node1 +``` diff --git a/site/versioned_docs/version-4.4/developers/clustering/requirements-and-definitions.md b/site/versioned_docs/version-4.4/developers/clustering/requirements-and-definitions.md new file mode 100644 index 00000000..22bc3977 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/clustering/requirements-and-definitions.md @@ -0,0 +1,11 @@ +--- +title: Requirements and Definitions +--- + +# Requirements and Definitions + +To create a cluster you must have two or more nodes\* (aka instances) of Harper running. + +\*_A node is a single instance/installation of Harper. A node of Harper can operate independently with clustering on or off._ + +On the following pages we'll walk you through the steps required, in order, to set up a Harper cluster. diff --git a/site/versioned_docs/version-4.4/developers/clustering/subscription-overview.md b/site/versioned_docs/version-4.4/developers/clustering/subscription-overview.md new file mode 100644 index 00000000..7b478fdf --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/clustering/subscription-overview.md @@ -0,0 +1,45 @@ +--- +title: Subscription Overview +--- + +# Subscription Overview + +A subscription defines how data should move between two nodes. They are exclusively table level and operate independently. They connect a table on one node to a table on another node, the subscription will apply to a matching database name and table name on both nodes. + +_Note: ‘local’ and ‘remote’ will often be referred to. In the context of these docs ‘local’ is the node that is receiving the API request to create/update a subscription and remote is the other node that is referred to in the request, the node on the other end of the subscription._ + +A subscription consists of: + +`database` - the name of the database that the table you are creating the subscription for belongs to. *Note, this was previously referred to as schema and may occasionally still be referenced that way.* + +`table` - the name of the table the subscription will apply to. + +`publish` - a boolean which determines if transactions on the local table should be replicated on the remote table. + +`subscribe` - a boolean which determines if transactions on the remote table should be replicated on the local table. + +#### Publish subscription + +![figure 2](/img/v4.4/clustering/figure2.png) + +This diagram is an example of a `publish` subscription from the perspective of Node1. + +The record with id 2 has been inserted in the dog table on Node1, after it has completed that insert it is sent to Node 2 and inserted in the dog table there. + +#### Subscribe subscription + +![figure 3](/img/v4.4/clustering/figure3.png) + +This diagram is an example of a `subscribe` subscription from the perspective of Node1. + +The record with id 3 has been inserted in the dog table on Node2, after it has completed that insert it is sent to Node1 and inserted there. + +#### Subscribe and Publish + +![figure 4](/img/v4.4/clustering/figure4.png) + +This diagram shows both subscribe and publish but publish is set to false. You can see that because subscribe is true the insert on Node2 is being replicated on Node1 but because publish is set to false the insert on Node1 is _**not**_ being replicated on Node2. + +![figure 5](/img/v4.4/clustering/figure5.png) + +This shows both subscribe and publish set to true. The insert on Node1 is replicated on Node2 and the update on Node2 is replicated on Node1. diff --git a/site/versioned_docs/version-4.4/developers/clustering/things-worth-knowing.md b/site/versioned_docs/version-4.4/developers/clustering/things-worth-knowing.md new file mode 100644 index 00000000..f378dfd9 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/clustering/things-worth-knowing.md @@ -0,0 +1,43 @@ +--- +title: Things Worth Knowing +--- + +# Things Worth Knowing + +Additional information that will help you define your clustering topology. + +*** + +### Transactions + +Transactions that are replicated across the cluster are: + +* Insert +* Update +* Upsert +* Delete +* Bulk loads + * CSV data load + * CSV file load + * CSV URL load + * Import from S3 + +When adding or updating a node any databases and tables in the subscription that don’t exist on the remote node will be automatically created. + +**Destructive database operations do not replicate across a cluster**. Those operations include `drop_database`, `drop_table`, and `drop_attribute`. If the desired outcome is to drop database information from any nodes then the operation(s) will need to be run on each node independently. + +Users and roles are not replicated across the cluster. + +*** + +### Queueing + +Harper has built-in resiliency for when network connectivity is lost within a subscription. When connections are reestablished, a catchup routine is executed to ensure data that was missed, specific to the subscription, is sent/received as defined. + +*** + +### Topologies + +Harper clustering creates a mesh network between nodes giving end users the ability to create an infinite number of topologies. subscription topologies can be simple or as complex as needed. + +![](/img/v4.4/clustering/figure6.png) diff --git a/site/versioned_docs/version-4.4/developers/components/built-in.md b/site/versioned_docs/version-4.4/developers/components/built-in.md new file mode 100644 index 00000000..81d4ddbf --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/components/built-in.md @@ -0,0 +1,117 @@ +--- +title: Built-In Components +--- + +# Built-In Components + +Harper provides extended features using built-in components. They do **not** need to be installed with a package manager, and simply must be specified in a config to run. These are used throughout many Harper docs, guides, and examples. Unlike external components which have their own semantic versions, built-in components follow Harper's semantic version. + +* [Built-In Components](./built-in#built-in-components) + * [fastifyRoutes](./built-in#fastifyroutes) + * [graphql](./built-in#graphql) + * [graphqlSchema](./built-in#graphqlschema) + * [jsResource](./built-in#jsresource) + * [rest](./built-in#rest) + * [roles](./built-in#roles) + * [static](./built-in#static) + +## fastifyRoutes + +Specify custom endpoints using [Fastify](https:/fastify.dev/). + +This component is a [Resource Extension](./reference#resource-extension) and can be configured with the [`files`, `path`, and `root`](./reference#resource-extension-configuration) configuration options. + +Complete documentation for this feature is available here: [Define Fastify Routes](../applications/define-routes) + +```yaml +fastifyRoutes: + files: './routes/*.js' +``` + +## graphql + +> GraphQL querying provides functionality for mapping GraphQL querying functionality to exported resources, and is based on the [GraphQL Over HTTP / GraphQL specifications](https:/graphql.github.io/graphql-over-http/draft/#) (it is designed to intuitively map queries to Harper resources, but does not implement the full [specification](https:/spec.graphql.org/) of resolvers, subscribers, and mutations). + +Enables GraphQL querying via a `/graphql` endpoint loosely implementing the GraphQL Over HTTP specification. + +Complete documentation for this feature is available here: [GraphQL](../../technical-details/reference/graphql) + +```yaml +graphql: true +``` + +## graphqlSchema + +Specify schemas for Harper tables and resources via GraphQL schema syntax. + +This component is a [Resource Extension](./reference#resource-extension) and can be configured with the [`files`, `path`, and `root`](./reference#resource-extension-configuration) configuration options. + +Complete documentation for this feature is available here: [Defining Schemas](../applications/defining-schemas) + +```yaml +graphqlSchema: + files: './schemas.graphql' +``` + +## jsResource + +Specify custom, JavaScript based Harper resources. + +Refer to the Application [Custom Functionality with JavaScript](../applications/#custom-functionality-with-javascript) guide, or [Resource Class](../../technical-details/reference/resource) reference documentation for more information on custom resources. + +This component is a [Resource Extension](./reference#resource-extension) and can be configured with the [`files`, `path`, and `root`](./reference#resource-extension-configuration) configuration options. + +```yaml +jsResource: + files: './resource.js' +``` + +## rest + +Enable automatic REST endpoint generation for exported resources with this component. + +Complete documentation for this feature is available here: [REST](../rest) + +```yaml +rest: true +``` + +This component contains additional options: + +To enable `Last-Modified` header support: + +```yaml +rest: + lastModified: true +``` + +To disable automatic WebSocket support: + +```yaml +rest: + webSocket: false +``` + +## roles + +Specify roles for Harper tables and resources. + +This component is a [Resource Extension](./reference#resource-extension) and can be configured with the [`files`, `path`, and `root`](./reference#resource-extension-configuration) configuration options. + +Complete documentation for this feature is available here: [Defining Roles](../applications/defining-roles) + +```yaml +roles: + files: './roles.yaml' +``` + +## static + +Specify which files to server statically from the Harper HTTP endpoint. Built using the [send](https:/www.npmjs.com/package/send) and [serve-static](https:/www.npmjs.com/package/serve-static) modules. + +This component is a [Resource Extension](./reference#resource-extension) and can be configured with the [`files`, `path`, and `root`](./reference#resource-extension-configuration) configuration options. + +```yaml +static: + files: './web/*' +``` diff --git a/site/versioned_docs/version-4.4/developers/components/index.md b/site/versioned_docs/version-4.4/developers/components/index.md new file mode 100644 index 00000000..14b3cde8 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/components/index.md @@ -0,0 +1,25 @@ +--- +title: Components +--- + +# Components + +Harper components are a core Harper concept defined as flexible JavaScript based _extensions_ of the highly extensible core Harper platform. They are executed by Harper directly and have complete access to the Harper [Global APIs](../../technical-details/reference/globals) (such as `Resource`, `databases`, and `tables`). + +A key aspect to components are their extensibility; components can be built on other components. For example, a [Harper Application](../applications/) is a component that uses many other components. The [application template](https:/github.com/HarperDB/application-template) demonstrates many of Harper's built-in components such as `rest` (for automatic REST endpoint generation), `graphqlSchema` (for table schema definitions), and many more. + +From management to development, the following pages document everything a developer needs to know about Harper components. + +- [Managing Components](./managing) - developing, installing, deploying, and executing Harper components locally and remotely +- [Technical Reference](./reference) - detailed, technical reference for component development +- [Built-In Components](./built-in) - documentation for all of Harper's built-in components (i.e. `rest`) + +## Custom Components + +The following list is all of Harper's officially maintained, custom components. They are all available on npm and GitHub. + +- [`@harperdb/nextjs`](https:/github.com/HarperDB/nextjs) +- [`@harperdb/apollo`](https:/github.com/HarperDB/apollo) +- [`@harperdb/status-check`](https:/github.com/HarperDB/status-check) +- [`@harperdb/prometheus-exporter`](https:/github.com/HarperDB/prometheus-exporter) +- [`@harperdb/acl-connect`](https:/github.com/HarperDB/acl-connect) \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/developers/components/managing.md b/site/versioned_docs/version-4.4/developers/components/managing.md new file mode 100644 index 00000000..31155ed3 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/components/managing.md @@ -0,0 +1,179 @@ +--- +title: Managing +--- + +# Managing + +Harper offers several approaches to managing components that differ between local development and Harper managed instances. This page will cover the recommended methods of developing, installing, deploying, and running Harper components. + +## Local Development + +Harper is designed to be simple to run locally. Generally, Harper should be installed locally on a machine using a global package manager install (i.e. `npm i -g harperdb`). + +> Before continuing, ensure Harper is installed and the `harperdb` CLI is available. For more information, review the [installation guide](../../deployments/install-harper/). + +When developing a component locally there are a number of ways to run it on Harper. + +### `dev` and `run` commands + +The quickest way to run a component is by using the `dev` command within the component directory. + +The `harperdb dev .` command will automatically watch for file changes within the component directory and restart the Harper threads when changes are detected. + +The `dev` command will **not** restart the main thread; if this is a requirement, switch to using `run` instead and manually start/stop the process to execute the main thread. + +Stop execution for either of these processes by sending a SIGINT (generally CTRL/CMD+C) signal to the process. + +### Deploying to a local Harper instance + +Alternatively, to mimic interfacing with a hosted Harper instance, use operation commands instead. + +1. Start up Harper with `harperdb` +1. _Deploy_ the component to the local instance by executing: + + ```sh + harperdb deploy_component \ + project= \ + package= \ + restart=true + ``` + + * Make sure to omit the `target` option so that it _deploys_ to the Harper instance running locally + * The `package=` option creates a symlink to the component simplifying restarts + * By default, the `deploy_component` operation command will _deploy_ the current directory by packaging it up and streaming the bytes. By specifying `package`, it skips this and references the file path directly + * The `restart=true` option automatically restarts Harper threads after the component is deployed + * If set to `'rolling'`, a rolling restart will be triggered after the component is deployed +1. In another terminal, use the `harperdb restart` command to restart the instance's threads at any time + * With `package=`, the component source is symlinked so changes will automatically be picked up between restarts + * If `package` was omitted, run the `deploy_component` command again with any new changes +1. To remove the component use `harperdb drop_component project=` + +Similar to the previous section, if the main thread needs to be restarted, start and stop the Harper instance manually (with the component deployed). Upon Harper startup, the component will automatically be loaded and executed across all threads. + +> Not all [component operations](../operations-api/components) are available via CLI. When in doubt, switch to using the Operations API via network requests to the local Harper instance. + +For example, to properly _deploy_ a `test-component` locally, the command would look like: + +```sh +harperdb deploy_component \ + project=test-component \ + package=/Users/dev/test-component \ + restart=true +``` + +> If the current directory is the component directory, use a shortcut such as `package=$(pwd)` to avoid typing out the complete path. + +## Remote Management + +Managing components on a remote Harper instance is best accomplished through [component operations](../operations-api/components), similar to using the `deploy_component` command locally. Before continuing, always backup critical Harper instances. Managing, deploying, and executing components can directly impact a live system. + +Remote Harper instances work very similarly to local Harper instances. The primary component management operations still include `deploy_component`, `drop_component`, and `restart`. + +The key to remote management is specifying a remote `target` along with appropriate username/password values. These can all be specified using CLI arguments: `target`, `username`, and `password`. Alternatively, the `CLI_TARGET_USERNAME` and `CLI_TARGET_PASSWORD` environment variables can replace the `username` and `password` arguments. + +All together: + +```sh +harperdb deploy_component \ + project= \ + package= \ + username= \ + password= \ + target= \ + restart=true \ + replicated=true +``` + +Or, using environment variables: + +```sh +export CLI_TARGET_USERNAME= +export CLI_TARGET_PASSWORD= +harperdb deploy_component \ + project= \ + package= \ + target= \ + restart=true \ + replicated=true +``` + +Unlike local development where `package` should be set to a local file path for symlinking and improved development experience purposes, now it has some additional options. + +A local component can be deployed to a remote instance by **omitting** the `package` field. Harper will automatically package the local directory and include that along with the rest of the deployment operation. + +Furthermore, the `package` field can be set to any valid [npm dependency value](https:/docs.npmjs.com/cli/v11/configuring-npm/package-json#dependencies). + +* For components deployed to npm, specify the package name: `package="@harperdb/status-check"` +* For components on GitHub, specify the URL: `package="https:/github.com/HarperDB/status-check"`, or the shorthand `package=HarperDB/status-check` +* Private repositories also work if the correct SSH keys are on the server: `package="git+ssh:/git@github.com:HarperDB/secret-component.git"` + * Reference the [SSH Key](../operations-api/components#add-ssh-key) operations for more information on managing SSH keys on a remote instance +* Even tarball URLs are supported: `package="https:/example.com/component.tar.gz"` + +> When using git tags, we highly recommend that you use the semver directive to ensure consistent and reliable installation by npm. In addition to tags, you can also reference branches or commit numbers. + +These `package` values are all supported because behind-the-scenes, Harper is generating a `package.json` file for the components. Then, it uses a form of `npm install` to resolve them as dependencies. This is why symlinks are generated when specifying a file path locally. The following [Advanced](./managing#advanced) section explores this pattern in more detail. + +Finally, don't forget to include `restart=true`, or run `harperdb restart target=`. + +## Advanced + +The following methods are advanced and should be executed with caution as they can have unintended side-effects. Always backup any critical Harper instances before continuing. + +First, locate the Harper installation `rootPath` directory. Generally, this is `~/hdb`. It can be retrieved by running `harperdb get_configuration` and looking for the `rootPath` field. + +> For a useful shortcut on POSIX compliant machines run: `harperdb get_configuration json=true | jq ".rootPath" | sed 's/"/g'` + +This path is the Harper instance. Within this directory, locate the root config titled `harperdb-config.yaml`, and the components root path. The components root path will be `/components` by default (thus, `~/hdb/components`), but it can also be configured. If necessary, use `harperdb get_configuration` again and look for the `componentsRoot` field for the exact path. + +### Adding components to root + +Similar to how components can specify other components within their `config.yaml`, components can be added to Harper by adding them to the `harperdb-config.yaml`. + +The configuration is very similar to that of `config.yaml`. Entries are comprised of a top-level `:`, and an indented `package: ` field. Any additional component options can also be included as indented fields. + +```yaml +status-check: + package: "@harperdb/status-check" +``` + +The key difference between this and a component's `config.yaml` is that the name does **not** need to be associated with a `package.json` dependency. When Harper starts up, it transforms these configurations into a `package.json` file, and then executes a form of `npm install`. Thus, the `package: ` can be any valid dependency syntax such as npm packages, GitHub repos, tarballs, and local directories are all supported. + +Given a root config like: + +```yaml +myGithubComponent: + package: HarperDB-Add-Ons/package#v2.2.0 # install from GitHub +myNPMComponent: + package: harperdb # install from npm +myTarBall: + package: /Users/harper/cool-component.tar # install from tarball +myLocal: + package: /Users/harper/local # install from local path +myWebsite: + package: https:/harperdb-component # install from URL +``` + +Harper will generate a `package.json` like: + +```json +{ + "dependencies": { + "myGithubComponent": "github:HarperDB-Add-Ons/package#v2.2.0", + "myNPMComponent": "npm:harperdb", + "myTarBall": "file:/Users/harper/cool-component.tar", + "myLocal": "file:/Users/harper/local", + "myWebsite": "https:/harperdb-component" + } +} +``` + +npm will install all the components and store them in ``. A symlink back to `/node_modules` is also created for dependency resolution purposes. + +The package prefix is automatically added, however you can manually set it in your package reference. + +```yaml +myCoolComponent: + package: file:/Users/harper/cool-component.tar +``` + +By specifying a file path, npm will generate a symlink and then changes will be automatically picked up between restarts. diff --git a/site/versioned_docs/version-4.4/developers/components/reference.md b/site/versioned_docs/version-4.4/developers/components/reference.md new file mode 100644 index 00000000..22d55063 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/components/reference.md @@ -0,0 +1,251 @@ +--- +title: Component Reference +--- + +# Component Reference + +The technical definition of a Harper component is fairly loose. In the absolute, simplest form, a component is any JavaScript module that is compatible with the [default component configuration](#default-component-configuration). For example, a module with a singular `resources.js` file is technically a valid component. + +Harper provides many features as _built-in components_, these can be used directly without installing any other dependencies. + +Other features are provided by _custom components_. These can be npm packages such as [@harperdb/nextjs](https:/github.com/HarperDB/nextjs) and [@harperdb/apollo](https:/github.com/HarperDB/apollo) (which are maintained by Harper), or something maintained by the community. Custom components follow the same configuration rules and use the same APIs that Harper's built-in components do. The only difference is that they must be apart of the component's dependencies. + +> Documentation is available for all [built-in](./built-in) and [custom](./#custom-components) Harper components. + + + +## Component Configuration + +Harper components are configured with a `config.yaml` file located in the root of the component module directory. This file is how a component configures other components it depends on. Each entry in the file starts with a component name, and then configuration values are indented below it. + +```yaml +name: + option-1: value + option-2: value +``` + +It is the entry's `name` that is used for component resolution. It can be one of the [built-in components](./built-in), or it must match a package dependency of the component as specified by `package.json`. The [Custom Component Configuration](#custom-component-configuration) section provides more details and examples. + +For some built-in components they can be configured with as little as a top-level boolean; for example, the [rest](./built-in#rest) extension can be enabled with just: + +```yaml +rest: true +``` + +Other components (built-in or custom), will generally have more configuration options. Some options are ubiquitous to the Harper platform, such as the `files`, `path`, and `root` options for a [Resource Extension](#resource-extension-configuration), or `package` for a [custom component](#custom-component-configuration). Additionally, [custom options](#protocol-extension-configuration) can be defined for [Protocol Extensions](#protocol-extension). + +### Custom Component Configuration + +Any custom component **must** be configured with the `package` option in order for Harper to load that component. When enabled, the name of package must match a dependency of the component. For example, to use the `@harperdb/nextjs` extension, it must first be included in `package.json`: + +```json +{ + "dependencies": { + "@harperdb/nextjs": "^1.0.0" + } +} +``` + +Then, within `config.yaml` it can be enabled and configured using: + +```yaml +'@harperdb/nextjs': + package: '@harperdb/nextjs' + # ... +``` + +Since npm allows for a [variety of dependency configurations](https:/docs.npmjs.com/cli/configuring-npm/package-json#dependencies), this can be used to create custom references. For example, to depend on a specific GitHub branch, first update the `package.json`: + +```json +{ + "dependencies": { + "harper-nextjs-test-feature": "HarperDB/nextjs#test-feature" + } +} +``` + +And now in `config.yaml`: + +```yaml +harper-nextjs-test-feature: + package: '@harperdb/nextjs' + files: '/*' + # ... +``` + +### Default Component Configuration + +Harper components do not need to specify a `config.yaml`. Harper uses the following default configuration to load components. + +```yaml +rest: true +graphql: true +graphqlSchema: + files: '*.graphql' +roles: + files: 'roles.yaml' +jsResource: + files: 'resources.js' +fastifyRoutes: + files: 'routes/*.js' + path: '.' +static: + files: 'web/**' +``` + +Refer to the [built-in components](./built-in) documentation for more information on these fields. + +If a `config.yaml` is defined, it will **not** be merged with the default config. + +## Extensions + +A Harper Extension is a extensible component that is intended to be used by other components. The built-in components [graphqlSchema](./built-in#graphqlschema) and [jsResource](./built-in#jsresource) are both examples of extensions. + +There are two key types of Harper Extensions: **Resource Extension** and **Protocol Extensions**. The key difference is a **Protocol Extensions** can return a **Resource Extension**. + +Functionally, what makes an extension a component is the contents of `config.yaml`. Unlike the Application Template referenced earlier, which specified multiple components within the `config.yaml`, an extension will specify an `extensionModule` option. + +- **extensionModule** - `string` - _required_ - A path to the extension module source code. The path must resolve from the root of the extension module directory. + +For example, the [Harper Next.js Extension](https:/github.com/HarperDB/nextjs) `config.yaml` specifies `extensionModule: ./extension.js`. + +If the extension is being written in something other than JavaScript (such as TypeScript), ensure that the path resolves to the built version, (i.e. `extensionModule: ./dist/index.js`) + +It is also recommended that all extensions have a `package.json` that specifies JavaScript package metadata such as name, version, type, etc. Since extensions are just JavaScript packages, they can do anything a JavaScript package can normally do. It can be written in TypeScript, and compiled to JavaScript. It can export an executable (using the [bin](https:/docs.npmjs.com/cli/configuring-npm/package-json#bin) property). It can be published to npm. The possibilities are endless! + +Furthermore, what defines an extension separately from a component is that it leverages any of the [Resource Extension](#resource-extension-api) or [Protocol Extension](#protocol-extension-api) APIs. The key is in the name, **extensions are extensible**. + +### Resource Extension + +A Resource Extension is for processing a certain type of file or directory. For example, the built-in [jsResource](./built-in#jsresource) extension handles executing JavaScript files. + +Resource Extensions are comprised of four distinct function exports, [`handleFile()`](#handlefilecontents-urlpath-path-resources-void--promisevoid), [`handleDirectory()`](#handledirectoryurlpath-path-resources-boolean--void--promiseboolean--void), [`setupFile()`](#setupfilecontents-urlpath-path-resources-void--promisevoid), and [`setupDirectory()`](#setupdirectoryurlpath-path-resources-boolean--void--promiseboolean--void). The `handleFile()` and `handleDirectory()` methods are executed on **all worker threads**, and are _executed again during restarts_. The `setupFile()` and `setupDirectory()` methods are only executed **once** on the **main thread** during the initial system start sequence. + +> Keep in mind that the CLI command `harperdb restart` or CLI argument `restart=true` only restarts the worker threads. If a component is deployed using `harperdb deploy`, the code within the `setupFile()` and `setupDirectory()` methods will not be executed until the system is completely shutdown and turned back on. + +Other than their execution behavior, the `handleFile()` and `setupFile()` methods, and `handleDirectory()` and `setupDirectory()` methods have identical function definitions (arguments and return value behavior). + +#### Resource Extension Configuration + +Any [Resource Extension](#resource-extension) can be configured with the `files`, `path`, and `root` options. These options control how _files_ and _directories_ are resolved in order to be passed to the extension's `handleFile()`, `setupFile()`, `handleDirectory()`, and `setupDirectory()` methods. + +- **files** - `string` - *required* - Specifies the set of files and directories that should be handled by the component. Can be a glob pattern. +- **path** - `string` - *optional* - Specifies the URL path to be handled by the component. +- **root** - `string` - *optional* - Specifies the root directory for mapping file paths to the URLs. + +For example, to configure the [static](./built-in#static) component to server all files from `web` to the root URL path: + +```yaml +static: + files: 'web/**' + root: 'web' +``` + +Or, to configure the [graphqlSchema](./built-in#graphqlschema) component to load all schemas within the `src/schema` directory: + +```yaml +graphqlSchema: + files: 'src/schema/*.schema' +``` + +#### Resource Extension API + +In order for an extension to be classified as a Resource Extension it must implement at least one of the `handleFile()`, `handleDirectory()`, `setupFile()`, or `setupDirectory()` methods. As a standalone extension, these methods should be named and exported directly. For example: + +```js +/ ESM +export function handleFile() {} +export function setupDirectory() {} + +/ or CJS +function handleDirectory() {} +function setupFile() {} + +module.exports = { handleDirectory, setupFile } +``` + +When returned by a [Protocol Extension](#protocol-extension), these methods should be defined on the object instead: + +```js +export function start() { + return { + handleFile () {} + } +} +``` + +##### `handleFile(contents, urlPath, path, resources): void | Promise` +##### `setupFile(contents, urlPath, path, resources): void | Promise` + +These methods are for processing individual files. They can be async. + +> Remember! +> +> `setupFile()` is executed **once** on the **main thread** during the main start sequence. +> +> `handleFile()` is executed on **worker threads** and is executed again during restarts. + +Parameters: + +- **contents** - `Buffer` - The contents of the file +- **urlPath** - `string` - The recommended URL path of the file +- **path** - `string` - The relative path of the file + +- **resources** - `Object` - A collection of the currently loaded resources + +Returns: `void | Promise` + +##### `handleDirectory(urlPath, path, resources): boolean | void | Promise` +##### `setupDirectory(urlPath, path, resources): boolean | void | Promise` + +These methods are for processing directories. They can be async. + +If the function returns or resolves a truthy value, then the component loading sequence will end and no other entries within the directory will be processed. + +> Remember! +> +> `setupFile()` is executed **once** on the **main thread** during the main start sequence. +> +> `handleFile()` is executed on **worker threads** and is executed again during restarts. + +Parameters: + +- **urlPath** - `string` - The recommended URL path of the file +- **path** - `string` - The relative path of the directory + +- **resources** - `Object` - A collection of the currently loaded resources + +Returns: `boolean | void | Promise` + +### Protocol Extension + +A Protocol Extension is a more advanced form of a Resource Extension and is mainly used for implementing higher level protocols. For example, the [Harper Next.js Extension](https:/github.com/HarperDB/nextjs) handles building and running a Next.js project. A Protocol Extension is particularly useful for adding custom networking handlers (see the [`server`](../../technical-details/reference/globals#server) global API documentation for more information). + +#### Protocol Extension Configuration + +In addition to the `files`, `path`, and `root` [Resource Extension configuration](#resource-extension-configuration) options, and the `package` [Custom Component configuration](#custom-component-configuration) option, Protocol Extensions can also specify additional configuration options. Any options added to the extension configuration (in `config.yaml`), will be passed through to the `options` object of the `start()` and `startOnMainThread()` methods. + +For example, the [Harper Next.js Extension](https:/github.com/HarperDB/nextjs#options) specifies multiple option that can be included in its configuration. For example, a Next.js app using `@harperdb/nextjs` may specify the following `config.yaml`: + +```yaml +'@harperdb/nextjs': + package: '@harperdb/nextjs' + files: '/*' + prebuilt: true + dev: false +``` + +Many protocol extensions will use the `port` and `securePort` options for configuring networking handlers. Many of the [`server`](../../technical-details/reference/globals#server) global APIs accept `port` and `securePort` options, so components replicated this for simpler pass-through. + +#### Protocol Extension API + +A Protocol Extension is made up of two distinct methods, [`start()`](#startoptions-resourceextension--promiseresourceextension) and [`startOnMainThread()`](#startonmainthreadoptions-resourceextension--promiseresourceextension). Similar to a Resource Extension, the `start()` method is executed on _all worker threads_, and _executed again on restarts_. The `startOnMainThread()` method is **only** executed **once** during the initial system start sequence. These methods have identical `options` object parameter, and can both return a Resource Extension (i.e. an object containing one or more of the methods listed above). + +##### `start(options): ResourceExtension | Promise` +##### `startOnMainThread(options): ResourceExtension | Promise` + +Parameters: + +- **options** - `Object` - An object representation of the extension's configuration options. + +Returns: `Object` - An object that implements any of the [Resource Extension APIs](#resource-extension-api) diff --git a/site/versioned_docs/version-4.4/developers/miscellaneous/google-data-studio.md b/site/versioned_docs/version-4.4/developers/miscellaneous/google-data-studio.md new file mode 100644 index 00000000..47fd80bd --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/miscellaneous/google-data-studio.md @@ -0,0 +1,37 @@ +--- +title: Google Data Studio +--- + +# Google Data Studio + +[Google Data Studio](https:/datastudio.google.com/) is a free collaborative visualization tool which enables users to build configurable charts and tables quickly. The Harper Google Data Studio connector seamlessly integrates your Harper data with Google Data Studio so you can build custom, real-time data visualizations. + +The Harper Google Data Studio Connector is subject to our [Terms of Use](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/) and [Privacy Policy](https:/harperdb.io/legal/privacy-policy/). + +## Requirements + +The Harper database must be accessible through the Internet in order for Google Data Studio servers to access it. The database may be hosted by you or via [Harper Cloud](../../deployments/harper-cloud/). + +## Get Started + +Get started by selecting the Harper connector from the [Google Data Studio Partner Connector Gallery](https:/datastudio.google.com/u/0/datasources/create). + +1. Log in to https:/datastudio.google.com/. +1. Add a new Data Source using the Harper connector. The current release version can be added as a data source by following this link: [Harper Google Data Studio Connector](https:/datastudio.google.com/datasources/create?connectorId=AKfycbxBKgF8FI5R42WVxO-QCOq7dmUys0HJrUJMkBQRoGnCasY60_VJeO3BhHJPvdd20-S76g). +1. Authorize the connector to access other servers on your behalf (this allows the connector to contact your database). +1. Enter the Web URL to access your database (preferably with HTTPS), as well as the Basic Auth key you use to access the database. Just include the key, not the word “Basic” at the start of it. +1. Check the box for “Secure Connections Only” if you want to always use HTTPS connections for this data source; entering a Web URL that starts with https:/ will do the same thing, if you prefer. +1. Check the box for “Allow Bad Certs” if your Harper instance does not have a valid SSL certificate. [Harper Cloud](../../deployments/harper-cloud/) always has valid certificates, and so will never require this to be checked. Instances you set up yourself may require this, if you are using self-signed certs. If you are using [Harper Cloud](../../deployments/harper-cloud/) or another instance you know should always have valid SSL certificates, do not check this box. +1. Choose your Query Type. This determines what information the configuration will ask for after pressing the Next button. + * Table will ask you for a Schema and a Table to return all fields of using `SELECT *`. + * SQL will ask you for the SQL query you’re using to retrieve fields from the database. You may `JOIN` multiple tables together, and use Harper specific SQL functions, along with the usual power SQL grants. +1. When all information is entered correctly, press the Connect button in the top right of the new Data Source view to generate the Schema. You may also want to name the data source at this point. If the connector encounters any errors, a dialog box will tell you what went wrong so you can correct the issue. +1. If there are no errors, you now have a data source you can use in your reports! You may change the types of the generated fields in the Schema view if you need to (for instance, changing a Number field to a specific currency), as well as creating new fields from the report view that do calculations on other fields. + +## Considerations + +* Both Postman and the [Harper Studio](../../deployments/harper-cloud/) app have ways to convert a user:password pair to a Basic Auth token. Use either to create the token for the connector’s user. + * You may sign out of your current user by going to the instances tab in Harper Studio, then clicking on the lock icon at the top-right of a given instance’s box. Click the lock again to sign in as any user. The Basic Auth token will be visible in the Authorization header portion of any code created in the Sample Code tab. +* It’s highly recommended that you create a read-only user role in Harper Studio, and create a user with that role for your data sources to use. This prevents that authorization token from being used to alter your database, should someone else ever get ahold of it. +* The RecordCount field is intended for use as a metric, for counting how many instances of a given set of values appear in a report’s data set. +* _Do not attempt to create fields with spaces in their names_ for any data sources! Google Data Studio will crash when attempting to retrieve a field with such a name, producing a System Error instead of a useful chart on your reports. Using CamelCase or snake\_case gets around this. diff --git a/site/versioned_docs/version-4.4/developers/miscellaneous/index.md b/site/versioned_docs/version-4.4/developers/miscellaneous/index.md new file mode 100644 index 00000000..13ee450a --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/miscellaneous/index.md @@ -0,0 +1,7 @@ +--- +title: Miscellaneous +--- + +# Miscellaneous + +This section covers a grouping of reference documents for various external developer tools, packages, SDKs, etc. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/developers/miscellaneous/query-optimization.md b/site/versioned_docs/version-4.4/developers/miscellaneous/query-optimization.md new file mode 100644 index 00000000..4a2dbc6c --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/miscellaneous/query-optimization.md @@ -0,0 +1,37 @@ +--- +title: Query Optimization +--- + +## Query Optimization + +Harper has powerful query functionality with excellent performance characteristics. However, like any database, different queries can vary significantly in performance. It is important to understand how querying works to help you optimize your queries for the best performance. + +### Query Execution + +At a fundamental level, querying involves defining conditions to find matching data and then executing those conditions against the database and delivering the results based on required fields, relationships, and ordering. Harper supports indexed fields, and these indexes are used to speed up query execution. When conditions are specified in a query, Harper will attempt to utilize indexes to optimize the speed of query execution. When a field is not indexed, a query specifies a condition on that field, and the database check each potential record to determine if it matches the condition. + +When a query is performed with multiple conditions, Harper will attempt to optimize the ordering of these conditions. When using intersecting conditions (the default, an `and` operator, matching records must all match all conditions), Harper will attempt to to apply the most selective and performant condition first. This means that if one condition can use an index and is more selective than another, it will be used first to find the initial matching set of data and then filter based on the remaining conditions. If a condition can search an indexed field, with a selective condition, it will be used before conditions that aren't indexed, or as selective. The `search` method includes an `explain` flag that can be used to return a query execution order to understand how the query is being executed. This can be useful for debugging and optimizing queries. + +For a union query, each condition is executed separately and the results are combined/merged. + +### Condition, Operators, and Indexing + +When a query is performed, the conditions specified in the query are evaluated against the data in the database. The conditions can be simple or complex, and can include scalar operators such as `=`, `!=`, `>`, `<`, `>=`, `<=`, as well as `starts_with`, `contains`, and `ends_with`. The use of these operators can affect the performance of the query, especially when used with indexed fields. If an indexed field is not used, the database will have to check each potential record to determine if it matches the condition. If the only condition is not indexed, or there are no conditions with an indexed field, the database will have to check every record with a full table scan and can be very slow for large datasets (it will get slower as the dataset grows, `O(n)`). + +The use of indexed fields can significantly improve the performance of a query, providing fast performance even as the database grows in size (`O(log n)`). However, indexed fields require extra writes to the database when performing insert, update, or delete operations. This is because the index must be updated to reflect the changes in the data. This can slow down write operations, but the trade-off is often worth it if the field is frequently used in queries. + +The different operators can also affect the performance of a query. For example, using the `=` operator on an indexed field is generally faster than using the `!=` operator, as the latter requires checking all records that do not match the condition. An index is a sorted listed of values, so the greater than and less than operators will also utilize indexed fields when possible. If the range is narrow, these operations can be very fast. A wide range could yield a large number of records and will naturally incur more overhead. The `starts_with` operator can also leverage indexed fields because it quickly find the correct matching entries in the sorted index. On other hand, the `contains` and `ends_with` and not equal (`!=` or `not_equal`) operators can not leverage the indexes, so they will require a full table scan to find the matching records if they are not used in conjunction in with a selective/indexed condition. There is a special case of `!= null` which can use indexes to find non-null records. However, there is generally only helpful for sparse fields where a small subset are non-null values. More generally, operators are more efficient if they are selecting on fields with a high cardinality. + +Conditions can be applied to primary key fields or other indexed fields (known as secondary indexes). In general, querying on a primary key will be faster than querying on a secondary index, as the primary key is the most efficient way to access data in the database, and doesn't require cross-referencing to the main records. + +### Relationships/Joins + +Harper supports relationships between tables, allowing for "join" queries that. This does result in more complex queries with potentially larger performance overhead, as more lookups are necessary to connect matched or selected data with other tables. Similar principles apply to conditions which use relationships. Indexed fields and comparators that leverage the ordering are still valuable for performance. It is also important that if a condition on a table is connected to another table's foreign key, that that foreign key also be indexed. Likewise, if a query `select`s data from a related table that uses a foreign key to relate, that it is indexed. The same principles of higher cardinality applies here as well, more unique values allow for efficient lookups. + + +### Sorting +Queries can also specify a sort order. This can also significantly impact performance. If a query specifies a sort order on an indexed field, the database can use the index to quickly retrieve the data in the specified order. A sort order can be used in conjunction with a condition on the same (indexed) field can utilize the index for ordering. However, if the sort order is not on an indexed field, or the query specifies conditions on different fields, Harper will generally need to sort the data after retrieving it, which can be slow for large datasets. The same principles apply to sorting as they do to conditions. Sorting on a primary key is generally faster than sorting on a secondary index, if the condition aligns with the sort order. + +### Streaming + +One of the unique and powerful features of Harper's querying functionality is the ability to stream query results. When possible, Harper can return records from a query as they are found, rather than waiting for the entire query to complete. This can significantly improve performance for large queries, as it allows the application to start processing results or sending the initial data before the entire query is complete (improving time-to-first-byte speed, for example). However, using a sort order on a query with conditions that are not on an aligned index requires that the entire query result be loaded in order to perform the sorting, which defeats the streaming benefits. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/developers/miscellaneous/sdks.md b/site/versioned_docs/version-4.4/developers/miscellaneous/sdks.md new file mode 100644 index 00000000..13998f80 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/miscellaneous/sdks.md @@ -0,0 +1,22 @@ +--- +title: SDKs +description: >- + Software Development Kits available for connecting to Harper from different + languages. +--- + +# SDKs + +| SDK/Tool | Description | Installation | +| ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------- | +| [HarperDB.NET.Client](https:/www.nuget.org/packages/HarperDB.NET.Client) | A Dot Net Core client to execute operations against HarperDB | `dotnet add package HarperDB.NET.Client --version 1.1.0` | +| [Websocket Client](https:/www.npmjs.com/package/harperdb-websocket-client) | A Javascript client for real-time access to HarperDB transactions | `npm i -s harperdb-websocket-client` | +| [Gatsby HarperDB Source](https:/www.npmjs.com/package/gatsby-source-harperdb) | Use Harper as the data source for a Gatsby project at the build time | `npm i -s gatsby-source-harperdb` | +| [HarperDB.EntityFrameworkCore](https:/www.nuget.org/packages/HarperDB.EntityFrameworkCore) | The Harper EntityFrameworkCore Provider Package for .NET 6.0 | `dotnet add package HarperDB.EntityFrameworkCore --version 1.0.0` | +| [Python SDK](https:/pypi.org/project/harperdb/) | Python3 implementations of Harper API functions with wrappers for an object-oriented interface | `pip3 install harperdb` | +| [HarperDB Flutter SDK](https:/github.com/HarperDB/harperdb-sdk-flutter) | A Harper SDK for Flutter | `flutter pub add harperdb` | +| [React Hook](https:/www.npmjs.com/package/use-harperdb) | A ReactJS Hook for HarperDB | `npm i -s use-harperdb` | +| [Node Red Node](https:/flows.nodered.org/node/node-red-contrib-harperdb) | Easy drag and drop connections to Harper using the Node-Red platform | `npm i -s node-red-contrib-harperdb` | +| [NodeJS SDK](https:/www.npmjs.com/package/harperive) | A Harper SDK for NodeJS | `npm i -s harperive` | +| [HarperDB Cargo Crate](https:/crates.io/crates/harperdb) | A Harper SDK for Rust | `Cargo.toml > harperdb = '1.0.0'` | +| [HarperDB Go SDK](https:/github.com/HarperDB-Add-Ons/sdk-go) | A Harper SDK for Go | `go get github.com/HarperDB-Add-Ons/sdk-go` | diff --git a/site/versioned_docs/version-4.4/developers/operations-api/advanced-json-sql-examples.md b/site/versioned_docs/version-4.4/developers/operations-api/advanced-json-sql-examples.md new file mode 100644 index 00000000..61c26f47 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/advanced-json-sql-examples.md @@ -0,0 +1,1780 @@ +--- +title: Advanced JSON SQL Examples +--- + +# Advanced JSON SQL Examples + +## Create movies database +Create a new database called "movies" using the 'create_database' operation. + +_Note: Creating a database is optional, if one is not created Harper will default to using a database named `data`_ + +### Body +```json +{ + "operation": "create_database", + "database": "movies" +} +``` + +### Response: 200 +```json +{ + "message": "database 'movies' successfully created" +} +``` + +--- + +## Create movie Table +Creates a new table called "movie" inside the database "movies" using the ‘create_table’ operation. + +### Body + +```json +{ + "operation": "create_table", + "database": "movies", + "table": "movie", + "primary_key": "id" +} +``` + +### Response: 200 +```json +{ + "message": "table 'movies.movie' successfully created." +} +``` + + +--- + +## Create credits Table +Creates a new table called "credits" inside the database "movies" using the ‘create_table’ operation. + +### Body + +```json +{ + "operation": "create_table", + "database": "movies", + "table": "credits", + "primary_key": "movie_id" +} +``` + +### Response: 200 +```json +{ + "message": "table 'movies.credits' successfully created." +} +``` + + +--- + +## Bulk Insert movie Via CSV +Inserts data from a hosted CSV file into the "movie" table using the 'csv_url_load' operation. + +### Body + +```json +{ + "operation": "csv_url_load", + "database": "movies", + "table": "movie", + "csv_url": "https:/search-json-sample-data.s3.us-east-2.amazonaws.com/movie.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 1889eee4-23c1-4945-9bb7-c805fc20726c" +} +``` + + +--- + +## Bulk Insert credits Via CSV +Inserts data from a hosted CSV file into the "credits" table using the 'csv_url_load' operation. + +### Body + +```json +{ + "operation": "csv_url_load", + "database": "movies", + "table": "credits", + "csv_url": "https:/search-json-sample-data.s3.us-east-2.amazonaws.com/credits.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 3a14cd74-67f3-41e9-8ccd-45ffd0addc2c", + "job_id": "3a14cd74-67f3-41e9-8ccd-45ffd0addc2c" +} +``` + + +--- + +## View raw data +In the following example we will be running expressions on the keywords & production_companies attributes, so for context we are displaying what the raw data looks like. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT title, rank, keywords, production_companies FROM movies.movie ORDER BY rank LIMIT 10" +} +``` + +### Response: 200 +```json +[ + { + "title": "Ad Astra", + "rank": 1, + "keywords": [ + { + "id": 305, + "name": "moon" + }, + { + "id": 697, + "name": "loss of loved one" + }, + { + "id": 839, + "name": "planet mars" + }, + { + "id": 14626, + "name": "astronaut" + }, + { + "id": 157265, + "name": "moon colony" + }, + { + "id": 162429, + "name": "solar system" + }, + { + "id": 240119, + "name": "father son relationship" + }, + { + "id": 244256, + "name": "near future" + }, + { + "id": 257878, + "name": "planet neptune" + }, + { + "id": 260089, + "name": "space walk" + } + ], + "production_companies": [ + { + "id": 490, + "name": "New Regency Productions", + "origin_country": "" + }, + { + "id": 79963, + "name": "Keep Your Head", + "origin_country": "" + }, + { + "id": 73492, + "name": "MadRiver Pictures", + "origin_country": "" + }, + { + "id": 81, + "name": "Plan B Entertainment", + "origin_country": "US" + }, + { + "id": 30666, + "name": "RT Features", + "origin_country": "BR" + }, + { + "id": 30148, + "name": "Bona Film Group", + "origin_country": "CN" + }, + { + "id": 22213, + "name": "TSG Entertainment", + "origin_country": "US" + } + ] + }, + { + "title": "Extraction", + "rank": 2, + "keywords": [ + { + "id": 3070, + "name": "mercenary" + }, + { + "id": 4110, + "name": "mumbai (bombay), india" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 9730, + "name": "crime boss" + }, + { + "id": 11107, + "name": "rescue mission" + }, + { + "id": 18712, + "name": "based on graphic novel" + }, + { + "id": 265216, + "name": "dhaka (dacca), bangladesh" + } + ], + "production_companies": [ + { + "id": 106544, + "name": "AGBO", + "origin_country": "US" + }, + { + "id": 109172, + "name": "Thematic Entertainment", + "origin_country": "US" + }, + { + "id": 92029, + "name": "TGIM Films", + "origin_country": "US" + } + ] + }, + { + "title": "To the Beat! Back 2 School", + "rank": 3, + "keywords": [ + { + "id": 10873, + "name": "school" + } + ], + "production_companies": [] + }, + { + "title": "Bloodshot", + "rank": 4, + "keywords": [ + { + "id": 2651, + "name": "nanotechnology" + }, + { + "id": 9715, + "name": "superhero" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 164218, + "name": "psychotronic" + }, + { + "id": 255024, + "name": "shared universe" + }, + { + "id": 258575, + "name": "valiant comics" + } + ], + "production_companies": [ + { + "id": 34, + "name": "Sony Pictures", + "origin_country": "US" + }, + { + "id": 10246, + "name": "Cross Creek Pictures", + "origin_country": "US" + }, + { + "id": 6573, + "name": "Mimran Schur Pictures", + "origin_country": "US" + }, + { + "id": 333, + "name": "Original Film", + "origin_country": "US" + }, + { + "id": 103673, + "name": "The Hideaway Entertainment", + "origin_country": "US" + }, + { + "id": 124335, + "name": "Valiant Entertainment", + "origin_country": "US" + }, + { + "id": 5, + "name": "Columbia Pictures", + "origin_country": "US" + }, + { + "id": 1225, + "name": "One Race", + "origin_country": "US" + }, + { + "id": 30148, + "name": "Bona Film Group", + "origin_country": "CN" + } + ] + }, + { + "title": "The Call of the Wild", + "rank": 5, + "keywords": [ + { + "id": 818, + "name": "based on novel or book" + }, + { + "id": 4542, + "name": "gold rush" + }, + { + "id": 15162, + "name": "dog" + }, + { + "id": 155821, + "name": "sled dogs" + }, + { + "id": 189390, + "name": "yukon" + }, + { + "id": 207928, + "name": "19th century" + }, + { + "id": 259987, + "name": "cgi animation" + }, + { + "id": 263806, + "name": "1890s" + } + ], + "production_companies": [ + { + "id": 787, + "name": "3 Arts Entertainment", + "origin_country": "US" + }, + { + "id": 127928, + "name": "20th Century Studios", + "origin_country": "US" + }, + { + "id": 22213, + "name": "TSG Entertainment", + "origin_country": "US" + } + ] + }, + { + "title": "Sonic the Hedgehog", + "rank": 6, + "keywords": [ + { + "id": 282, + "name": "video game" + }, + { + "id": 6054, + "name": "friendship" + }, + { + "id": 10842, + "name": "good vs evil" + }, + { + "id": 41645, + "name": "based on video game" + }, + { + "id": 167043, + "name": "road movie" + }, + { + "id": 172142, + "name": "farting" + }, + { + "id": 188933, + "name": "bar fight" + }, + { + "id": 226967, + "name": "amistad" + }, + { + "id": 245230, + "name": "live action remake" + }, + { + "id": 258111, + "name": "fantasy" + }, + { + "id": 260223, + "name": "videojuego" + } + ], + "production_companies": [ + { + "id": 333, + "name": "Original Film", + "origin_country": "US" + }, + { + "id": 10644, + "name": "Blur Studios", + "origin_country": "US" + }, + { + "id": 77884, + "name": "Marza Animation Planet", + "origin_country": "JP" + }, + { + "id": 4, + "name": "Paramount", + "origin_country": "US" + }, + { + "id": 113750, + "name": "SEGA", + "origin_country": "JP" + }, + { + "id": 100711, + "name": "DJ2 Entertainment", + "origin_country": "" + }, + { + "id": 24955, + "name": "Paramount Animation", + "origin_country": "US" + } + ] + }, + { + "title": "Birds of Prey (and the Fantabulous Emancipation of One Harley Quinn)", + "rank": 7, + "keywords": [ + { + "id": 849, + "name": "dc comics" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 187056, + "name": "woman director" + }, + { + "id": 229266, + "name": "dc extended universe" + } + ], + "production_companies": [ + { + "id": 9993, + "name": "DC Entertainment", + "origin_country": "US" + }, + { + "id": 82968, + "name": "LuckyChap Entertainment", + "origin_country": "GB" + }, + { + "id": 103462, + "name": "Kroll & Co Entertainment", + "origin_country": "US" + }, + { + "id": 174, + "name": "Warner Bros. Pictures", + "origin_country": "US" + }, + { + "id": 429, + "name": "DC Comics", + "origin_country": "US" + }, + { + "id": 128064, + "name": "DC Films", + "origin_country": "US" + }, + { + "id": 101831, + "name": "Clubhouse Pictures", + "origin_country": "US" + } + ] + }, + { + "title": "Justice League Dark: Apokolips War", + "rank": 8, + "keywords": [ + { + "id": 849, + "name": "dc comics" + } + ], + "production_companies": [ + { + "id": 2785, + "name": "Warner Bros. Animation", + "origin_country": "US" + }, + { + "id": 9993, + "name": "DC Entertainment", + "origin_country": "US" + }, + { + "id": 429, + "name": "DC Comics", + "origin_country": "US" + } + ] + }, + { + "title": "Parasite", + "rank": 9, + "keywords": [ + { + "id": 1353, + "name": "underground" + }, + { + "id": 5318, + "name": "seoul" + }, + { + "id": 5732, + "name": "birthday party" + }, + { + "id": 5752, + "name": "private lessons" + }, + { + "id": 9866, + "name": "basement" + }, + { + "id": 10453, + "name": "con artist" + }, + { + "id": 11935, + "name": "working class" + }, + { + "id": 12565, + "name": "psychological thriller" + }, + { + "id": 13126, + "name": "limousine driver" + }, + { + "id": 14514, + "name": "class differences" + }, + { + "id": 14864, + "name": "rich poor" + }, + { + "id": 17997, + "name": "housekeeper" + }, + { + "id": 18015, + "name": "tutor" + }, + { + "id": 18035, + "name": "family" + }, + { + "id": 33421, + "name": "crime family" + }, + { + "id": 173272, + "name": "flood" + }, + { + "id": 188861, + "name": "smell" + }, + { + "id": 198673, + "name": "unemployed" + }, + { + "id": 237462, + "name": "wealthy family" + } + ], + "production_companies": [ + { + "id": 7036, + "name": "CJ Entertainment", + "origin_country": "KR" + }, + { + "id": 4399, + "name": "Barunson E&A", + "origin_country": "KR" + } + ] + }, + { + "title": "Star Wars: The Rise of Skywalker", + "rank": 10, + "keywords": [ + { + "id": 161176, + "name": "space opera" + } + ], + "production_companies": [ + { + "id": 1, + "name": "Lucasfilm", + "origin_country": "US" + }, + { + "id": 11461, + "name": "Bad Robot", + "origin_country": "US" + }, + { + "id": 2, + "name": "Walt Disney Pictures", + "origin_country": "US" + }, + { + "id": 120404, + "name": "British Film Commission", + "origin_country": "" + } + ] + } +] +``` + + +--- + +## Simple search_json call +This query uses search_json to convert the keywords object array to a simple string array. The expression '[name]' tells the function to extract all values for the name attribute and wrap them in an array. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT title, rank, search_json('[name]', keywords) as keywords FROM movies.movie ORDER BY rank LIMIT 10" +} +``` + +### Response: 200 +```json +[ + { + "title": "Ad Astra", + "rank": 1, + "keywords": [ + "moon", + "loss of loved one", + "planet mars", + "astronaut", + "moon colony", + "solar system", + "father son relationship", + "near future", + "planet neptune", + "space walk" + ] + }, + { + "title": "Extraction", + "rank": 2, + "keywords": [ + "mercenary", + "mumbai (bombay), india", + "based on comic", + "crime boss", + "rescue mission", + "based on graphic novel", + "dhaka (dacca), bangladesh" + ] + }, + { + "title": "To the Beat! Back 2 School", + "rank": 3, + "keywords": [ + "school" + ] + }, + { + "title": "Bloodshot", + "rank": 4, + "keywords": [ + "nanotechnology", + "superhero", + "based on comic", + "psychotronic", + "shared universe", + "valiant comics" + ] + }, + { + "title": "The Call of the Wild", + "rank": 5, + "keywords": [ + "based on novel or book", + "gold rush", + "dog", + "sled dogs", + "yukon", + "19th century", + "cgi animation", + "1890s" + ] + }, + { + "title": "Sonic the Hedgehog", + "rank": 6, + "keywords": [ + "video game", + "friendship", + "good vs evil", + "based on video game", + "road movie", + "farting", + "bar fight", + "amistad", + "live action remake", + "fantasy", + "videojuego" + ] + }, + { + "title": "Birds of Prey (and the Fantabulous Emancipation of One Harley Quinn)", + "rank": 7, + "keywords": [ + "dc comics", + "based on comic", + "woman director", + "dc extended universe" + ] + }, + { + "title": "Justice League Dark: Apokolips War", + "rank": 8, + "keywords": [ + "dc comics" + ] + }, + { + "title": "Parasite", + "rank": 9, + "keywords": [ + "underground", + "seoul", + "birthday party", + "private lessons", + "basement", + "con artist", + "working class", + "psychological thriller", + "limousine driver", + "class differences", + "rich poor", + "housekeeper", + "tutor", + "family", + "crime family", + "flood", + "smell", + "unemployed", + "wealthy family" + ] + }, + { + "title": "Star Wars: The Rise of Skywalker", + "rank": 10, + "keywords": [ + "space opera" + ] + } +] +``` + + +--- + +## Use search_json in a where clause +This example shows how we can use SEARCH_JSON to filter out records in a WHERE clause. The production_companies attribute holds an object array of companies that produced each movie, we want to only see movies which were produced by Marvel Studios. Our expression is a filter '$[name="Marvel Studios"]' this tells the function to iterate the production_companies array and only return entries where the name is "Marvel Studios". + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT title, release_date FROM movies.movie where search_json('$[name=\"Marvel Studios\"]', production_companies) IS NOT NULL ORDER BY release_date" +} +``` + +### Response: 200 +```json +[ + { + "title": "Iron Man", + "release_date": "2008-04-30" + }, + { + "title": "The Incredible Hulk", + "release_date": "2008-06-12" + }, + { + "title": "Iron Man 2", + "release_date": "2010-04-28" + }, + { + "title": "Thor", + "release_date": "2011-04-21" + }, + { + "title": "Captain America: The First Avenger", + "release_date": "2011-07-22" + }, + { + "title": "Marvel One-Shot: The Consultant", + "release_date": "2011-09-12" + }, + { + "title": "Marvel One-Shot: A Funny Thing Happened on the Way to Thor's Hammer", + "release_date": "2011-10-25" + }, + { + "title": "The Avengers", + "release_date": "2012-04-25" + }, + { + "title": "Marvel One-Shot: Item 47", + "release_date": "2012-09-13" + }, + { + "title": "Iron Man 3", + "release_date": "2013-04-18" + }, + { + "title": "Marvel One-Shot: Agent Carter", + "release_date": "2013-09-08" + }, + { + "title": "Thor: The Dark World", + "release_date": "2013-10-29" + }, + { + "title": "Marvel One-Shot: All Hail the King", + "release_date": "2014-02-04" + }, + { + "title": "Marvel Studios: Assembling a Universe", + "release_date": "2014-03-18" + }, + { + "title": "Captain America: The Winter Soldier", + "release_date": "2014-03-20" + }, + { + "title": "Guardians of the Galaxy", + "release_date": "2014-07-30" + }, + { + "title": "Avengers: Age of Ultron", + "release_date": "2015-04-22" + }, + { + "title": "Ant-Man", + "release_date": "2015-07-14" + }, + { + "title": "Captain America: Civil War", + "release_date": "2016-04-27" + }, + { + "title": "Team Thor", + "release_date": "2016-08-28" + }, + { + "title": "Doctor Strange", + "release_date": "2016-10-25" + }, + { + "title": "Guardians of the Galaxy Vol. 2", + "release_date": "2017-04-19" + }, + { + "title": "Spider-Man: Homecoming", + "release_date": "2017-07-05" + }, + { + "title": "Thor: Ragnarok", + "release_date": "2017-10-25" + }, + { + "title": "Black Panther", + "release_date": "2018-02-13" + }, + { + "title": "Avengers: Infinity War", + "release_date": "2018-04-25" + }, + { + "title": "Ant-Man and the Wasp", + "release_date": "2018-07-04" + }, + { + "title": "Captain Marvel", + "release_date": "2019-03-06" + }, + { + "title": "Avengers: Endgame", + "release_date": "2019-04-24" + }, + { + "title": "Spider-Man: Far from Home", + "release_date": "2019-06-28" + }, + { + "title": "Black Widow", + "release_date": "2020-10-28" + }, + { + "title": "Untitled Spider-Man 3", + "release_date": "2021-11-04" + }, + { + "title": "Thor: Love and Thunder", + "release_date": "2022-02-10" + }, + { + "title": "Doctor Strange in the Multiverse of Madness", + "release_date": "2022-03-23" + }, + { + "title": "Untitled Marvel Project (3)", + "release_date": "2022-07-29" + }, + { + "title": "Guardians of the Galaxy Vol. 3", + "release_date": "2023-02-16" + } +] +``` + + +--- + +## Use search_json to show the movies with the largest casts +This example shows how we can use SEARCH_JSON to perform a simple calculation on JSON and order by the results. The cast attribute holds an object array of details around the cast of a movie. We use the expression '$count(id)' that counts each id and returns the value back which we alias in SQL as cast_size which in turn gets used to sort the rows. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT movie_title, search_json('$count(id)', `cast`) as cast_size FROM movies.credits ORDER BY cast_size DESC LIMIT 10" +} +``` + +### Response: 200 +```json +[ + { + "movie_title": "Around the World in Eighty Days", + "cast_size": 312 + }, + { + "movie_title": "And the Oscar Goes To...", + "cast_size": 259 + }, + { + "movie_title": "Rock of Ages", + "cast_size": 223 + }, + { + "movie_title": "Mr. Smith Goes to Washington", + "cast_size": 213 + }, + { + "movie_title": "Les Misérables", + "cast_size": 208 + }, + { + "movie_title": "Jason Bourne", + "cast_size": 201 + }, + { + "movie_title": "The Muppets", + "cast_size": 191 + }, + { + "movie_title": "You Don't Mess with the Zohan", + "cast_size": 183 + }, + { + "movie_title": "The Irishman", + "cast_size": 173 + }, + { + "movie_title": "Spider-Man: Far from Home", + "cast_size": 173 + } +] +``` + + +--- + +## search_json as a condition, in a select with a table join +This example shows how we can use SEARCH_JSON to find movies where at least of 2 our favorite actors from Marvel films have acted together then list the movie, its overview, release date, and the actors names and their characters. The WHERE clause performs a count on credits.cast attribute that have the matching actors. The SELECT performs the same filter on the cast attribute and performs a transform on each object to just return the actor's name and their character. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT m.title, m.overview, m.release_date, search_json('$[name in [\"Robert Downey Jr.\", \"Chris Evans\", \"Scarlett Johansson\", \"Mark Ruffalo\", \"Chris Hemsworth\", \"Jeremy Renner\", \"Clark Gregg\", \"Samuel L. Jackson\", \"Gwyneth Paltrow\", \"Don Cheadle\"]].{\"actor\": name, \"character\": character}', c.`cast`) as characters FROM movies.credits c INNER JOIN movies.movie m ON c.movie_id = m.id WHERE search_json('$count($[name in [\"Robert Downey Jr.\", \"Chris Evans\", \"Scarlett Johansson\", \"Mark Ruffalo\", \"Chris Hemsworth\", \"Jeremy Renner\", \"Clark Gregg\", \"Samuel L. Jackson\", \"Gwyneth Paltrow\", \"Don Cheadle\"]])', c.`cast`) >= 2" +} +``` + +### Response: 200 +```json +[ + { + "title": "Out of Sight", + "overview": "Meet Jack Foley, a smooth criminal who bends the law and is determined to make one last heist. Karen Sisco is a federal marshal who chooses all the right moves … and all the wrong guys. Now they're willing to risk it all to find out if there's more between them than just the law.", + "release_date": "1998-06-26", + "characters": [ + { + "actor": "Don Cheadle", + "character": "Maurice Miller" + }, + { + "actor": "Samuel L. Jackson", + "character": "Hejira Henry (uncredited)" + } + ] + }, + { + "title": "Iron Man", + "overview": "After being held captive in an Afghan cave, billionaire engineer Tony Stark creates a unique weaponized suit of armor to fight evil.", + "release_date": "2008-04-30", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "Captain America: The First Avenger", + "overview": "During World War II, Steve Rogers is a sickly man from Brooklyn who's transformed into super-soldier Captain America to aid in the war effort. Rogers must stop the Red Skull – Adolf Hitler's ruthless head of weaponry, and the leader of an organization that intends to use a mysterious device of untold powers for world domination.", + "release_date": "2011-07-22", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "In Good Company", + "overview": "Dan Foreman is a seasoned advertisement sales executive at a high-ranking publication when a corporate takeover results in him being placed under naive supervisor Carter Duryea, who is half his age. Matters are made worse when Dan's new supervisor becomes romantically involved with his daughter an 18 year-old college student Alex.", + "release_date": "2004-12-29", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Alex Foreman" + }, + { + "actor": "Clark Gregg", + "character": "Mark Steckle" + } + ] + }, + { + "title": "Zodiac", + "overview": "The true story of the investigation of the \"Zodiac Killer\", a serial killer who terrified the San Francisco Bay Area, taunting police with his ciphers and letters. The case becomes an obsession for three men as their lives and careers are built and destroyed by the endless trail of clues.", + "release_date": "2007-03-02", + "characters": [ + { + "actor": "Mark Ruffalo", + "character": "Dave Toschi" + }, + { + "actor": "Robert Downey Jr.", + "character": "Paul Avery" + } + ] + }, + { + "title": "Hard Eight", + "overview": "A stranger mentors a young Reno gambler who weds a hooker and befriends a vulgar casino regular.", + "release_date": "1996-02-28", + "characters": [ + { + "actor": "Gwyneth Paltrow", + "character": "Clementine" + }, + { + "actor": "Samuel L. Jackson", + "character": "Jimmy" + } + ] + }, + { + "title": "The Spirit", + "overview": "Down these mean streets a man must come. A hero born, murdered, and born again. A Rookie cop named Denny Colt returns from the beyond as The Spirit, a hero whose mission is to fight against the bad forces from the shadows of Central City. The Octopus, who kills anyone unfortunate enough to see his face, has other plans; he is going to wipe out the entire city.", + "release_date": "2008-12-25", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Silken Floss" + }, + { + "actor": "Samuel L. Jackson", + "character": "Octopuss" + } + ] + }, + { + "title": "S.W.A.T.", + "overview": "Hondo Harrelson recruits Jim Street to join an elite unit of the Los Angeles Police Department. Together they seek out more members, including tough Deke Kay and single mom Chris Sanchez. The team's first big assignment is to escort crime boss Alex Montel to prison. It seems routine, but when Montel offers a huge reward to anyone who can break him free, criminals of various stripes step up for the prize.", + "release_date": "2003-08-08", + "characters": [ + { + "actor": "Samuel L. Jackson", + "character": "Sgt. Dan 'Hondo' Harrelson" + }, + { + "actor": "Jeremy Renner", + "character": "Brian Gamble" + } + ] + }, + { + "title": "Iron Man 2", + "overview": "With the world now aware of his dual life as the armored superhero Iron Man, billionaire inventor Tony Stark faces pressure from the government, the press and the public to share his technology with the military. Unwilling to let go of his invention, Stark, with Pepper Potts and James 'Rhodey' Rhodes at his side, must forge new alliances – and confront powerful enemies.", + "release_date": "2010-04-28", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Scarlett Johansson", + "character": "Natalie Rushman / Natasha Romanoff / Black Widow" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + } + ] + }, + { + "title": "Thor", + "overview": "Against his father Odin's will, The Mighty Thor - a powerful but arrogant warrior god - recklessly reignites an ancient war. Thor is cast down to Earth and forced to live among humans as punishment. Once here, Thor learns what it takes to be a true hero when the most dangerous villain of his world sends the darkest forces of Asgard to invade Earth.", + "release_date": "2011-04-21", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye (uncredited)" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + } + ] + }, + { + "title": "View from the Top", + "overview": "A small-town woman tries to achieve her goal of becoming a flight attendant.", + "release_date": "2003-03-21", + "characters": [ + { + "actor": "Gwyneth Paltrow", + "character": "Donna" + }, + { + "actor": "Mark Ruffalo", + "character": "Ted Stewart" + } + ] + }, + { + "title": "The Nanny Diaries", + "overview": "A college graduate goes to work as a nanny for a rich New York family. Ensconced in their home, she has to juggle their dysfunction, a new romance, and the spoiled brat in her charge.", + "release_date": "2007-08-24", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Annie Braddock" + }, + { + "actor": "Chris Evans", + "character": "Hayden \"Harvard Hottie\"" + } + ] + }, + { + "title": "The Perfect Score", + "overview": "Six high school seniors decide to break into the Princeton Testing Center so they can steal the answers to their upcoming SAT tests and all get perfect scores.", + "release_date": "2004-01-30", + "characters": [ + { + "actor": "Chris Evans", + "character": "Kyle" + }, + { + "actor": "Scarlett Johansson", + "character": "Francesca Curtis" + } + ] + }, + { + "title": "The Avengers", + "overview": "When an unexpected enemy emerges and threatens global safety and security, Nick Fury, director of the international peacekeeping agency known as S.H.I.E.L.D., finds himself in need of a team to pull the world back from the brink of disaster. Spanning the globe, a daring recruitment effort begins!", + "release_date": "2012-04-25", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + } + ] + }, + { + "title": "Iron Man 3", + "overview": "When Tony Stark's world is torn apart by a formidable terrorist called the Mandarin, he starts an odyssey of rebuilding and retribution.", + "release_date": "2013-04-18", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / Iron Patriot" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner (uncredited)" + } + ] + }, + { + "title": "Marvel One-Shot: The Consultant", + "overview": "Agent Coulson informs Agent Sitwell that the World Security Council wishes Emil Blonsky to be released from prison to join the Avengers Initiative. As Nick Fury doesn't want to release Blonsky, the two agents decide to send a patsy to sabotage the meeting...", + "release_date": "2011-09-12", + "characters": [ + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark (archive footage)" + } + ] + }, + { + "title": "Thor: The Dark World", + "overview": "Thor fights to restore order across the cosmos… but an ancient race led by the vengeful Malekith returns to plunge the universe back into darkness. Faced with an enemy that even Odin and Asgard cannot withstand, Thor must embark on his most perilous and personal journey yet, one that will reunite him with Jane Foster and force him to sacrifice everything to save us all.", + "release_date": "2013-10-29", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Chris Evans", + "character": "Loki as Captain America (uncredited)" + } + ] + }, + { + "title": "Avengers: Age of Ultron", + "overview": "When Tony Stark tries to jumpstart a dormant peacekeeping program, things go awry and Earth’s Mightiest Heroes are put to the ultimate test as the fate of the planet hangs in the balance. As the villainous Ultron emerges, it is up to The Avengers to stop him from enacting his terrible plans, and soon uneasy alliances and unexpected action pave the way for an epic and unique global adventure.", + "release_date": "2015-04-22", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + } + ] + }, + { + "title": "Captain America: The Winter Soldier", + "overview": "After the cataclysmic events in New York with The Avengers, Steve Rogers, aka Captain America is living quietly in Washington, D.C. and trying to adjust to the modern world. But when a S.H.I.E.L.D. colleague comes under attack, Steve becomes embroiled in a web of intrigue that threatens to put the world at risk. Joining forces with the Black Widow, Captain America struggles to expose the ever-widening conspiracy while fighting off professional assassins sent to silence him at every turn. When the full scope of the villainous plot is revealed, Captain America and the Black Widow enlist the help of a new ally, the Falcon. However, they soon find themselves up against an unexpected and formidable enemy—the Winter Soldier.", + "release_date": "2014-03-20", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + } + ] + }, + { + "title": "Thanks for Sharing", + "overview": "A romantic comedy that brings together three disparate characters who are learning to face a challenging and often confusing world as they struggle together against a common demon—sex addiction.", + "release_date": "2013-09-19", + "characters": [ + { + "actor": "Mark Ruffalo", + "character": "Adam" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Phoebe" + } + ] + }, + { + "title": "Chef", + "overview": "When Chef Carl Casper suddenly quits his job at a prominent Los Angeles restaurant after refusing to compromise his creative integrity for its controlling owner, he is left to figure out what's next. Finding himself in Miami, he teams up with his ex-wife, his friend and his son to launch a food truck. Taking to the road, Chef Carl goes back to his roots to reignite his passion for the kitchen -- and zest for life and love.", + "release_date": "2014-05-08", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Molly" + }, + { + "actor": "Robert Downey Jr.", + "character": "Marvin" + } + ] + }, + { + "title": "Marvel Studios: Assembling a Universe", + "overview": "A look at the story behind Marvel Studios and the Marvel Cinematic Universe, featuring interviews and behind-the-scenes footage from all of the Marvel films, the Marvel One-Shots and \"Marvel's Agents of S.H.I.E.L.D.\"", + "release_date": "2014-03-18", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Himself / Tony Stark / Iron Man" + }, + { + "actor": "Chris Hemsworth", + "character": "Himself / Thor" + }, + { + "actor": "Chris Evans", + "character": "Himself / Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Himself / Bruce Banner / Hulk" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Herself" + }, + { + "actor": "Clark Gregg", + "character": "Himself" + }, + { + "actor": "Samuel L. Jackson", + "character": "Himself" + }, + { + "actor": "Scarlett Johansson", + "character": "Herself" + }, + { + "actor": "Jeremy Renner", + "character": "Himself" + } + ] + }, + { + "title": "Captain America: Civil War", + "overview": "Following the events of Age of Ultron, the collective governments of the world pass an act designed to regulate all superhuman activity. This polarizes opinion amongst the Avengers, causing two factions to side with Iron Man or Captain America, which causes an epic battle between former allies.", + "release_date": "2016-04-27", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + } + ] + }, + { + "title": "Thor: Ragnarok", + "overview": "Thor is imprisoned on the other side of the universe and finds himself in a race against time to get back to Asgard to stop Ragnarok, the destruction of his home-world and the end of Asgardian civilization, at the hands of an all-powerful new threat, the ruthless Hela.", + "release_date": "2017-10-25", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / Hulk" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow (archive footage / uncredited)" + } + ] + }, + { + "title": "Avengers: Endgame", + "overview": "After the devastating events of Avengers: Infinity War, the universe is in ruins due to the efforts of the Mad Titan, Thanos. With the help of remaining allies, the Avengers must assemble once more in order to undo Thanos' actions and restore order to the universe once and for all, no matter what consequences may be in store.", + "release_date": "2019-04-24", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Don Cheadle", + "character": "James Rhodes / War Machine" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Pepper Potts" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "Avengers: Infinity War", + "overview": "As the Avengers and their allies have continued to protect the world from threats too large for any one hero to handle, a new danger has emerged from the cosmic shadows: Thanos. A despot of intergalactic infamy, his goal is to collect all six Infinity Stones, artifacts of unimaginable power, and use them to inflict his twisted will on all of reality. Everything the Avengers have fought for has led up to this moment - the fate of Earth and existence itself has never been more uncertain.", + "release_date": "2018-04-25", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + } + ] + }, + { + "title": "Captain Marvel", + "overview": "The story follows Carol Danvers as she becomes one of the universe’s most powerful heroes when Earth is caught in the middle of a galactic war between two alien races. Set in the 1990s, Captain Marvel is an all-new adventure from a previously unseen period in the history of the Marvel Cinematic Universe.", + "release_date": "2019-03-06", + "characters": [ + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Agent Phil Coulson" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America (uncredited)" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow (uncredited)" + }, + { + "actor": "Don Cheadle", + "character": "James 'Rhodey' Rhodes / War Machine (uncredited)" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk (uncredited)" + } + ] + }, + { + "title": "Spider-Man: Homecoming", + "overview": "Following the events of Captain America: Civil War, Peter Parker, with the help of his mentor Tony Stark, tries to balance his life as an ordinary high school student in Queens, New York City, with fighting crime as his superhero alter ego Spider-Man as a new threat, the Vulture, emerges.", + "release_date": "2017-07-05", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + } + ] + }, + { + "title": "Team Thor", + "overview": "Discover what Thor was up to during the events of Captain America: Civil War.", + "release_date": "2016-08-28", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner" + } + ] + }, + { + "title": "Black Widow", + "overview": "Natasha Romanoff, also known as Black Widow, confronts the darker parts of her ledger when a dangerous conspiracy with ties to her past arises. Pursued by a force that will stop at nothing to bring her down, Natasha must deal with her history as a spy and the broken relationships left in her wake long before she became an Avenger.", + "release_date": "2020-10-28", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + } + ] + } +] +``` diff --git a/site/versioned_docs/version-4.4/developers/operations-api/bulk-operations.md b/site/versioned_docs/version-4.4/developers/operations-api/bulk-operations.md new file mode 100644 index 00000000..836087d3 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/bulk-operations.md @@ -0,0 +1,136 @@ +--- +title: Bulk Operations +--- + +# Bulk Operations + +## CSV Data Load +Ingests CSV data, provided directly in the operation as an `insert`, `update` or `upsert` into the specified database table. + +* operation _(required)_ - must always be `csv_data_load` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* data _(required)_ - csv data to import into Harper + +### Body +```json +{ + "operation": "csv_data_load", + "database": "dev", + "action": "insert", + "table": "breed", + "data": "id,name,section,country,image\n1,ENGLISH POINTER,British and Irish Pointers and Setters,GREAT BRITAIN,http:/www.fci.be/Nomenclature/Illustrations/001g07.jpg\n2,ENGLISH SETTER,British and Irish Pointers and Setters,GREAT BRITAIN,http:/www.fci.be/Nomenclature/Illustrations/002g07.jpg\n3,KERRY BLUE TERRIER,Large and medium sized Terriers,IRELAND,\n" +} +``` + +### Response: 200 +```json + { + "message": "Starting job with id 2fe25039-566e-4670-8bb3-2db3d4e07e69", + "job_id": "2fe25039-566e-4670-8bb3-2db3d4e07e69" + } +``` + +--- + +## CSV File Load +Ingests CSV data, provided via a path on the local filesystem, as an `insert`, `update` or `upsert` into the specified database table. + +_Note: The CSV file must reside on the same machine on which Harper is running. For example, the path to a CSV on your computer will produce an error if your Harper instance is a cloud instance._ + +* operation _(required)_ - must always be `csv_file_load` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* file_path _(required)_ - path to the csv file on the host running Harper + +### Body +```json +{ + "operation": "csv_file_load", + "action": "insert", + "database": "dev", + "table": "breed", + "file_path": "/home/user/imports/breeds.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 3994d8e2-ec6a-43c4-8563-11c1df81870e", + "job_id": "3994d8e2-ec6a-43c4-8563-11c1df81870e" +} +``` + +--- + +## CSV URL Load +Ingests CSV data, provided via URL, as an `insert`, `update` or `upsert` into the specified database table. + +* operation _(required)_ - must always be `csv_url_load` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* csv_url _(required)_ - URL to the csv + +### Body +```json +{ + "operation": "csv_url_load", + "action": "insert", + "database": "dev", + "table": "breed", + "csv_url": "https:/s3.amazonaws.com/complimentarydata/breeds.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 332aa0a2-6833-46cd-88a6-ae375920436a", + "job_id": "332aa0a2-6833-46cd-88a6-ae375920436a" +} +``` + +--- + +## Import from S3 +This operation allows users to import CSV or JSON files from an AWS S3 bucket as an `insert`, `update` or `upsert`. + +* operation _(required)_ - must always be `import_from_s3` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* s3 _(required)_ - object containing required AWS S3 bucket info for operation: + * aws_access_key_id - AWS access key for authenticating into your S3 bucket + * aws_secret_access_key - AWS secret for authenticating into your S3 bucket + * bucket - AWS S3 bucket to import from + * key - the name of the file to import - _the file must include a valid file extension ('.csv' or '.json')_ + * region - the region of the bucket + +### Body +```json +{ + "operation": "import_from_s3", + "action": "insert", + "database": "dev", + "table": "dog", + "s3": { + "aws_access_key_id": "YOUR_KEY", + "aws_secret_access_key": "YOUR_SECRET_KEY", + "bucket": "BUCKET_NAME", + "key": "OBJECT_NAME", + "region": "BUCKET_REGION" + } +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 062a1892-6a0a-4282-9791-0f4c93b12e16", + "job_id": "062a1892-6a0a-4282-9791-0f4c93b12e16" +} +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/developers/operations-api/clustering-nats.md b/site/versioned_docs/version-4.4/developers/operations-api/clustering-nats.md new file mode 100644 index 00000000..a1157bea --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/clustering-nats.md @@ -0,0 +1,457 @@ +--- +title: Clustering using NATS +--- + +# Clustering using NATS + +## Cluster Set Routes +Adds a route/routes to either the hub or leaf server cluster configuration. This operation behaves as a PATCH/upsert, meaning it will add new routes to the configuration while leaving existing routes untouched. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_set_routes` +* server _(required)_ - must always be `hub` or `leaf`, in most cases you should use `hub` here +* routes _(required)_ - must always be an objects array with a host and port: + * host - the host of the remote instance you are clustering to + * port - the clustering port of the remote instance you are clustering to, in most cases this is the value in `clustering.hubServer.cluster.network.port` on the remote instance `harperdb-config.yaml` + +### Body +```json +{ + "operation": "cluster_set_routes", + "server": "hub", + "routes": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "cluster routes successfully set", + "set": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "skipped": [] +} +``` + +--- + +## Cluster Get Routes +Gets all the hub and leaf server routes from the config file. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_get_routes` + +### Body +```json +{ + "operation": "cluster_get_routes" +} +``` + +### Response: 200 +```json +{ + "hub": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "leaf": [] +} +``` + +--- + +## Cluster Delete Routes +Removes route(s) from hub and/or leaf server routes array in config file. Returns a deletion success message and arrays of deleted and skipped records. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_delete_routes` +* routes _required_ - Must be an array of route object(s) + +### Body + +```json +{ + "operation": "cluster_delete_routes", + "routes": [ + { + "host": "18.116.24.71", + "port": 12345 + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "cluster routes successfully deleted", + "deleted": [ + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "skipped": [] +} +``` + + +--- + +## Add Node +Registers an additional Harper instance with associated subscriptions. Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_node` +* node_name _(required)_ - the node name of the remote node +* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `schema`, `table`, `subscribe` and `publish`: + * schema - the schema to replicate from + * table - the table to replicate from + * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + * publish - a boolean which determines if transactions on the local table should be replicated on the remote table + * start_time _(optional)_ - How far back to go to get transactions from node being added. Must be in UTC YYYY-MM-DDTHH:mm:ss.sssZ format + +### Body +```json +{ + "operation": "add_node", + "node_name": "ec2-3-22-181-22", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": false, + "publish": true, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "Successfully added 'ec2-3-22-181-22' to manifest" +} +``` + +--- + +## Update Node +Modifies an existing Harper instance registration and associated subscriptions. This operation behaves as a PATCH/upsert, meaning it will insert or update the specified replication configurations while leaving other table replication configuration untouched. Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `update_node` +* node_name _(required)_ - the node name of the remote node you are updating +* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `schema`, `table`, `subscribe` and `publish`: + * schema - the schema to replicate from + * table - the table to replicate from + * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + * publish - a boolean which determines if transactions on the local table should be replicated on the remote table + * start_time _(optional)_ - How far back to go to get transactions from node being added. Must be in UTC YYYY-MM-DDTHH:mm:ss.sssZ format + +### Body +```json +{ + "operation": "update_node", + "node_name": "ec2-18-223-239-195", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": false, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "Successfully updated 'ec2-3-22-181-22'" +} +``` + +--- + +## Set Node Replication +A more adeptly named alias for add and update node. This operation behaves as a PATCH/upsert, meaning it will insert or update the specified replication configurations while leaving other table replication configuration untouched. The `database` (aka `schema`) parameter is optional, it will default to `data`. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_node_replication` +* node_name _(required)_ - the node name of the remote node you are updating +* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and `table`, `subscribe` and `publish`: + * database *(optional)* - the database to replicate from + * table *(required)* - the table to replicate from + * subscribe *(required)* - a boolean which determines if transactions on the remote table should be replicated on the local table + * publish *(required)* - a boolean which determines if transactions on the local table should be replicated on the remote table +* +### Body +```json +{ + "operation": "set_node_replication", + "node_name": "node1", + "subscriptions": [ + { + "table": "dog", + "subscribe": true, + "publish": true + } + ] +} +``` +### Response: 200 +```json +{ + "message": "Successfully updated 'ec2-3-22-181-22'" +} +``` + +--- + +## Cluster Status +Returns an array of status objects from a cluster. A status object will contain the clustering node name, whether or not clustering is enabled, and a list of possible connections. Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_status` + +### Body +```json +{ + "operation": "cluster_status" +} +``` + +### Response: 200 +```json +{ + "node_name": "ec2-18-221-143-69", + "is_enabled": true, + "connections": [ + { + "node_name": "ec2-3-22-181-22", + "status": "open", + "ports": { + "clustering": 12345, + "operations_api": 9925 + }, + "latency_ms": 13, + "uptime": "30d 1h 18m 8s", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ] + } + ] +} +``` + + +--- + +## Cluster Network +Returns an object array of enmeshed nodes. Each node object will contain the name of the node, the amount of time (in milliseconds) it took for it to respond, the names of the nodes it is enmeshed with and the routes set in its config file. Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_- must always be `cluster_network` +* timeout (_optional_) - the amount of time in milliseconds to wait for a response from the network. Must be a number +* connected_nodes (_optional_) - omit `connected_nodes` from the response. Must be a boolean. Defaults to `false` +* routes (_optional_) - omit `routes` from the response. Must be a boolean. Defaults to `false` + +### Body + +```json +{ + "operation": "cluster_network" +} +``` + +### Response: 200 +```json +{ + "nodes": [ + { + "name": "local_node", + "response_time": 4, + "connected_nodes": ["ec2-3-142-255-78"], + "routes": [ + { + "host": "3.142.255.78", + "port": 9932 + } + ] + }, + { + "name": "ec2-3-142-255-78", + "response_time": 57, + "connected_nodes": ["ec2-3-12-153-124", "ec2-3-139-236-138", "local_node"], + "routes": [] + } + ] +} +``` + +--- + +## Remove Node +Removes a Harper instance and associated subscriptions from the cluster. Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `remove_node` +* name _(required)_ - The name of the node you are de-registering + +### Body +```json +{ + "operation": "remove_node", + "node_name": "ec2-3-22-181-22" +} +``` + +### Response: 200 +```json +{ + "message": "Successfully removed 'ec2-3-22-181-22' from manifest" +} +``` + +--- + +## Configure Cluster +Bulk create/remove subscriptions for any number of remote nodes. Resets and replaces any existing clustering setup. +Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `configure_cluster` +* connections _(required)_ - must be an object array with each object containing `node_name` and `subscriptions` for that node + +### Body +```json +{ + "operation": "configure_cluster", + "connections": [ + { + "node_name": "ec2-3-137-184-8", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": false + } + ] + }, + { + "node_name": "ec2-18-223-239-195", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": true + } + ] + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "Cluster successfully configured." +} +``` + +--- + +## Purge Stream + +Will purge messages from a stream + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `purge_stream` +* database _(required)_ - the name of the database where the streams table resides +* table _(required)_ - the name of the table that belongs to the stream +* options _(optional)_ - control how many messages get purged. Options are: + * `keep` - purge will keep this many most recent messages + * `seq` - purge all messages up to, but not including, this sequence + +### Body +```json +{ + "operation": "purge_stream", + "database": "dev", + "table": "dog", + "options": { + "keep": 100 + } +} +``` + +--- \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/developers/operations-api/clustering.md b/site/versioned_docs/version-4.4/developers/operations-api/clustering.md new file mode 100644 index 00000000..d9909cd3 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/clustering.md @@ -0,0 +1,348 @@ +--- +title: Clustering +--- + +# Clustering + +The following operations are available for configuring and managing [Harper replication](../replication/).\ + + +_**If you are using NATS for clustering, please see the**_ [_**NATS Clustering Operations**_](./clustering-nats) _**documentation.**_ + +## Add Node + +Adds a new Harper instance to the cluster. If `subscriptions` are provided, it will also create the replication relationships between the nodes. If they are not provided a fully replicating system will be created. [Learn more about adding nodes here](../replication/). + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `add_node` +* hostname or url _(required)_ - one of these fields is required. You must provide either the `hostname` or the `url` of the node you want to add +* verify\_tls _(optional)_ - a boolean which determines if the TLS certificate should be verified. This will allow the Harper default self-signed certificates to be accepted. Defaults to `true` +* authorization _(optional)_ - an object or a string which contains the authorization information for the node being added. If it is an object, it should contain `username` and `password` fields. If it is a string, it should use HTTP `Authorization` style credentials +* retain\_authorization _(optional)_ - a boolean which determines if the authorization credentials should be retained/stored and used everytime a connection is made to this node. If `true`, the authorization will be stored on the node record. Generally this should not be used, as mTLS/certificate based authorization is much more secure and safe, and avoids the need for storing credentials. Defaults to `false`. +* subscriptions _(optional)_ - The relationship created between nodes. If not provided a fully replicated cluster will be setup. Must be an object array and include `database`, `table`, `subscribe` and `publish`: + * database - the database to replicate + * table - the table to replicate + * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + * publish - a boolean which determines if transactions on the local table should be replicated on the remote table + +### Body + +```json +{ + "operation": "add_node", + "hostname": "server-two", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password" + } +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully added 'server-two' to cluster" +} +``` + +*** + +## Update Node + +Modifies an existing Harper instance in the cluster. + +_Operation is restricted to super\_user roles only_ + +_Note: will attempt to add the node if it does not exist_ + +* operation _(required)_ - must always be `update_node` +* hostname _(required)_ - the `hostname` of the remote node you are updating +* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `database`, `table`, `subscribe` and `publish`: + * database - the database to replicate from + * table - the table to replicate from + * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + * publish - a boolean which determines if transactions on the local table should be replicated on the remote table + +### Body + +```json +{ + "operation": "update_node", + "hostname": "server-two", + "subscriptions": [ + { + "database": "dev", + "table": "my-table", + "subscribe": true, + "publish": true + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully updated 'server-two'" +} +``` + +*** + +## Remove Node + +Removes a Harper node from the cluster and stops replication, [Learn more about remove node here](../replication/). + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `remove_node` +* name _(required)_ - The name of the node you are removing + +### Body + +```json +{ + "operation": "remove_node", + "hostname": "server-two" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully removed 'server-two' from cluster" +} +``` + +*** + +## Cluster Status + +Returns an array of status objects from a cluster. + +`database_sockets` shows the actual websocket connections that exist between nodes. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `cluster_status` + +### Body + +```json +{ + "operation": "cluster_status" +} +``` + +### Response: 200 + +```json +{ + "type": "cluster-status", + "connections": [ + { + "url": "wss:/server-two:9925", + "subscriptions": [ + { + "schema": "dev", + "table": "my-table", + "publish": true, + "subscribe": true + } + ], + "name": "server-two", + "database_sockets": [ + { + "database": "dev", + "connected": true, + "latency": 0.84197798371315, + "threadId": 1, + "nodes": [ + "server-two" + ] + } + ] + } + ], + "node_name": "server-one", + "is_enabled": true +} +``` + +*** + +## Configure Cluster + +Bulk create/remove subscriptions for any number of remote nodes. Resets and replaces any existing clustering setup. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `configure_cluster` +* connections _(required)_ - must be an object array with each object following the `add_node` schema. + +### Body + +```json +{ + "operation": "configure_cluster", + "connections": [ + { + "hostname": "server-two", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password2" + }, + "subscriptions": [ + { + "schema": "dev", + "table": "my-table", + "subscribe": true, + "publish": false + } + ] + }, + { + "hostname": "server-three", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password3" + }, + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": true + } + ] + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "Cluster successfully configured." +} +``` + +*** + +## Cluster Set Routes + +Adds a route/routes to the `replication.routes` configuration. This operation behaves as a PATCH/upsert, meaning it will add new routes to the configuration while leaving existing routes untouched. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `cluster_set_routes` +* routes _(required)_ - the routes field is an array that specifies the routes for clustering. Each element in the array can be either a string or an object with `hostname` and `port` properties. + +### Body + +```json +{ + "operation": "cluster_set_routes", + "routes": [ + "wss:/server-two:9925", + { + "hostname": "server-three", + "port": 9930 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "cluster routes successfully set", + "set": [ + "wss:/server-two:9925", + { + "hostname": "server-three", + "port": 9930 + } + ], + "skipped": [] +} +``` + +*** + +## Cluster Get Routes + +Gets the replication routes from the Harper config file. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `cluster_get_routes` + +### Body + +```json +{ + "operation": "cluster_get_routes" +} +``` + +### Response: 200 + +```json +[ + "wss:/server-two:9925", + { + "hostname": "server-three", + "port": 9930 + } +] +``` + +*** + +## Cluster Delete Routes + +Removes route(s) from the Harper config file. Returns a deletion success message and arrays of deleted and skipped records. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `cluster_delete_routes` +* routes _required_ - Must be an array of route object(s) + +### Body + +```json +{ + "operation": "cluster_delete_routes", + "routes": [ + { + "hostname": "server-three", + "port": 9930 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "cluster routes successfully deleted", + "deleted": [ + { + "hostname": "server-three", + "port": 9930 + } + ], + "skipped": [] +} +``` diff --git a/site/versioned_docs/version-4.4/developers/operations-api/components.md b/site/versioned_docs/version-4.4/developers/operations-api/components.md new file mode 100644 index 00000000..442d16bd --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/components.md @@ -0,0 +1,510 @@ +--- +title: Components +--- + +# Components + +## Add Component + +Creates a new component project in the component root directory using a predefined template. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `add_component` +* project _(required)_ - the name of the project you wish to create +* replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "add_component", + "project": "my-component" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully added project: my-component" +} +``` + +*** + +## Deploy Component + +Will deploy a component using either a base64-encoded string representation of a `.tar` file (the output from `package_component`) or a package value, which can be any valid NPM reference, such as a GitHub repo, an NPM package, a tarball, a local directory or a website. + +If deploying with the `payload` option, Harper will decrypt the base64-encoded string, reconstitute the .tar file of your project folder, and extract it to the component root project directory. + +If deploying with the `package` option, the package value will be written to `harperdb-config.yaml`. Then npm install will be utilized to install the component in the `node_modules` directory located in the hdb root. The value is a package reference, which should generally be a [URL reference, as described here](https:/docs.npmjs.com/cli/v10/configuring-npm/package-json#urls-as-dependencies) (it is also possible to include NPM registerd packages and file paths). URL package references can directly reference tarballs that can be installed as a package. However, the most common and recommended usage is to install from a Git repository, which can be combined with a tag to deploy a specific version directly from versioned source control. When using tags, we highly recommend that you use the `semver` directive to ensure consistent and reliable installation by NPM. In addition to tags, you can also reference branches or commit numbers. Here is an example URL package reference to a (public) Git repository that doesn't require authentication: + +``` +https:/github.com/HarperDB/application-template#semver:v1.0.0 +``` + +or this can be shortened to: + +``` +HarperDB/application-template#semver:v1.0.0 +``` + +You can also install from private repository if you have an installed SSH keys on the server: + +``` +git+ssh:/git@github.com:my-org/my-app.git#semver:v1.0.0 +``` + +Or you can use a Github token: + +``` +https:/@github.com/my-org/my-app#semver:v1.0.0 +``` + +Or you can use a GitLab Project Access Token: + +``` +https:/my-project:@gitlab.com/my-group/my-project#semver:v1.0.0 +``` + +Note that your component will be installed by NPM. If your component has dependencies, NPM will attempt to download and install these as well. NPM normally uses the public registry.npmjs.org registry. If you are installing without network access to this, you may wish to define [custom registry locations](https:/docs.npmjs.com/cli/v8/configuring-npm/npmrc) if you have any dependencies that need to be installed. NPM will install the deployed component and any dependencies in node\_modules in the hdb root directory (typically `~/hdb/node_modules`). + +_Note: After deploying a component a restart may be required_ + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `deploy_component` +* project _(required)_ - the name of the project you wish to deploy +* package _(optional)_ - this can be any valid GitHub or NPM reference +* payload _(optional)_ - a base64-encoded string representation of the .tar file. Must be a string +* restart _(optional)_ - must be either a boolean or the string `rolling`. If set to `rolling`, a rolling restart will be triggered after the component is deployed, meaning that each node in the cluster will be sequentially restarted (waiting for the last restart to start the next). If set to `true`, the restart will not be rolling, all nodes will be restarted in parallel. If `replicated` is `true`, the restart operations will be replicated across the cluster. +* replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. +* install\_command _(optional)_ - A command to use when installing the component. Must be a string. This can be used to install dependencies with pnpm or yarn, for example, like: `"install_command": "npm install -g pnpm && pnpm install"` + +### Body + +```json +{ + "operation": "deploy_component", + "project": "my-component", + "payload": "A very large base64-encoded string representation of the .tar file" +} +``` + +```json +{ + "operation": "deploy_component", + "project": "my-component", + "package": "HarperDB/application-template", + "replicated": true +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deployed: my-component" +} +``` + +*** + +## Package Component + +Creates a temporary `.tar` file of the specified project folder, then reads it into a base64-encoded string and returns an object with the string and the payload. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `package_component` +* project _(required)_ - the name of the project you wish to package +* skip\_node\_modules _(optional)_ - if true, creates option for tar module that will exclude the project's node\_modules directory. Must be a boolean + +### Body + +```json +{ + "operation": "package_component", + "project": "my-component", + "skip_node_modules": true +} +``` + +### Response: 200 + +```json +{ + "project": "my-component", + "payload": "LgAAAAAAAAAAAAAAAAAAA...AAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" +} +``` + +*** + +## Drop Component + +Deletes a file from inside the component project or deletes the complete project. + +**If just `project` is provided it will delete all that projects local files and folders** + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `drop_component` +* project _(required)_ - the name of the project you wish to delete or to delete from if using the `file` parameter +* file _(optional)_ - the path relative to your project folder of the file you wish to delete +* replicated _(optional)_ - if true, Harper will replicate the component deletion to all nodes in the cluster. Must be a boolean. +* restart _(optional)_ - if true, Harper will restart after dropping the component. Must be a boolean. + +### Body + +```json +{ + "operation": "drop_component", + "project": "my-component", + "file": "utils/myUtils.js" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully dropped: my-component/utils/myUtils.js" +} +``` + +*** + +## Get Components + +Gets all local component files and folders and any component config from `harperdb-config.yaml` + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `get_components` + +### Body + +```json +{ + "operation": "get_components" +} +``` + +### Response: 200 + +```json +{ + "name": "components", + "entries": [ + { + "package": "HarperDB/application-template", + "name": "deploy-test-gh" + }, + { + "package": "@fastify/compress", + "name": "fast-compress" + }, + { + "name": "my-component", + "entries": [ + { + "name": "LICENSE", + "mtime": "2023-08-22T16:00:40.286Z", + "size": 1070 + }, + { + "name": "index.md", + "mtime": "2023-08-22T16:00:40.287Z", + "size": 1207 + }, + { + "name": "config.yaml", + "mtime": "2023-08-22T16:00:40.287Z", + "size": 1069 + }, + { + "name": "package.json", + "mtime": "2023-08-22T16:00:40.288Z", + "size": 145 + }, + { + "name": "resources.js", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 583 + }, + { + "name": "schema.graphql", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 466 + }, + { + "name": "utils", + "entries": [ + { + "name": "commonUtils.js", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 583 + } + ] + } + ] + } + ] +} +``` + +*** + +## Get Component File + +Gets the contents of a file inside a component project. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `get_component_file` +* project _(required)_ - the name of the project where the file is located +* file _(required)_ - the path relative to your project folder of the file you wish to view +* encoding _(optional)_ - the encoding that will be passed to the read file call. Defaults to `utf8` + +### Body + +```json +{ + "operation": "get_component_file", + "project": "my-component", + "file": "resources.js" +} +``` + +### Response: 200 + +```json +{ + "message": "/**export class MyCustomResource extends tables.TableName {\n\t/ we can define our own custom POST handler\n\tpost(content) {\n\t\t/ do something with the incoming content;\n\t\treturn super.post(content);\n\t}\n\t/ or custom GET handler\n\tget() {\n\t\t/ we can modify this resource before returning\n\t\treturn super.get();\n\t}\n}\n */\n/ we can also define a custom resource without a specific table\nexport class Greeting extends Resource {\n\t/ a \"Hello, world!\" handler\n\tget() {\n\t\treturn { greeting: 'Hello, world!' };\n\t}\n}" +} +``` + +*** + +## Set Component File + +Creates or updates a file inside a component project. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `set_component_file` +* project _(required)_ - the name of the project the file is located in +* file _(required)_ - the path relative to your project folder of the file you wish to set +* payload _(required)_ - what will be written to the file +* encoding _(optional)_ - the encoding that will be passed to the write file call. Defaults to `utf8` +* replicated _(optional)_ - if true, Harper will replicate the component update to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "set_component_file", + "project": "my-component", + "file": "test.js", + "payload": "console.log('hello world')" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully set component: test.js" +} +``` + +## Add SSH Key + +Adds an SSH key for deploying components from private repositories. This will also create an ssh config file that will be used when deploying the components. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `add_ssh_key` +* name _(required)_ - the name of the key +* key _(required)_ - the private key contents. Line breaks must be delimited with +* host _(required)_ - the host for the ssh config (see below). Used as part of the `package` url when deploying a component using this key +* hostname _(required)_ - the hostname for the ssh config (see below). Used to map `host` to an actual domain (e.g. `github.com`) +* known\_hosts _(optional)_ - the public SSH keys of the host your component will be retrieved from. If `hostname` is `github.com` this will be retrieved automatically. Line breaks must be delimited with +* replicated _(optional)_ - if true, Harper will replicate the key to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "add_ssh_key", + "name": "harperdb-private-component", + "key": "-----BEGIN OPENSSH PRIVATE KEY-----\nthis\nis\na\nfake\nkey\n-----END OPENSSH PRIVATE KEY-----", + "host": "harperdb-private-component.github.com", + "hostname": "github.com" +} +``` + +### Response: 200 + +```json +{ + "message": "Added ssh key: harperdb-private-component" +} +``` + +### Generated Config and Deploy Component "package" string examples + +``` +#harperdb-private-component +Host harperdb-private-component.github.com + HostName github.com + User git + IdentityFile /hdbroot/ssh/harperdb-private-component.key + IdentitiesOnly yes +``` + +``` +"package": "git+ssh:/git@:.git#semver:v1.2.3" + +"package": "git+ssh:/git@harperdb-private-component.github.com:HarperDB/harperdb-private-component.git#semver:v1.2.3" +``` + +Note that `deploy_component` with a package uses `npm install` so the url must be a valid npm format url. The above is an example of a url using a tag in the repo to install. + +## Update SSH Key + +Updates the private key contents of an existing SSH key. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `update_ssh_key` +* name _(required)_ - the name of the key to be updated +* key _(required)_ - the private key contents. Line breaks must be delimited with +* replicated _(optional)_ - if true, Harper will replicate the key update to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "update_ssh_key", + "name": "harperdb-private-component", + "key": "-----BEGIN OPENSSH PRIVATE KEY-----\nthis\nis\na\nNEWFAKE\nkey\n-----END OPENSSH PRIVATE KEY-----", + "host": "harperdb-private-component.github.com", + "hostname": "github.com" +} +``` + +### Response: 200 + +```json +{ + "message": "Updated ssh key: harperdb-private-component" +} +``` + +## Delete SSH Key + +Deletes a SSH key. This will also remove it from the generated SSH config. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `delete_ssh_key` +* name _(required)_ - the name of the key to be deleted +* replicated _(optional)_ - if true, Harper will replicate the key deletion to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "name": "harperdb-private-component" +} +``` + +### Response: 200 + +```json +{ + "message": "Deleted ssh key: harperdb-private-component" +} +``` + +## List SSH Keys + +List off the names of added SSH keys + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `list_ssh_keys` + +### Body + +```json +{ + "operation": "list_ssh_keys" +} +``` + +### Response: 200 + +```json +[ + { + "name": "harperdb-private-component" + }, + ... +] +``` + +## Set SSH Known Hosts + +Sets the SSH known\_hosts file. This will overwrite the file. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `set_ssh_known_hosts` +* known\_hosts _(required)_ - The contents to set the known\_hosts to. Line breaks must be delimite d with +* replicated _(optional)_ - if true, Harper will replicate the known hosts to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "set_ssh_known_hosts", + "known_hosts": "github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=\n" +} +``` + +### Response: 200 + +```json +{ + "message": "Known hosts successfully set" +} +``` + +## Get SSH Known Hosts + +Gets the contents of the known\_hosts file + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `get_ssh_known_hosts` + +### Body + +```json +{ + "operation": "get_ssh_known_hosts" +} +``` + +### Response: 200 + +```json +{ + "known_hosts": "github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=\n" +} +``` diff --git a/site/versioned_docs/version-4.4/developers/operations-api/custom-functions.md b/site/versioned_docs/version-4.4/developers/operations-api/custom-functions.md new file mode 100644 index 00000000..308544f6 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/custom-functions.md @@ -0,0 +1,276 @@ +--- +title: Custom Functions +--- + +# Custom Functions + +## Custom Functions Status + +Returns the state of the Custom functions server. This includes whether it is enabled, upon which port it is listening, and where its root project directory is located on the host machine. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `custom_function_status` + +### Body +```json +{ + "operation": "custom_functions_status" +} +``` + +### Response: 200 +```json +{ + "is_enabled": true, + "port": 9926, + "directory": "/Users/myuser/hdb/custom_functions" +} +``` + +--- + +## Get Custom Functions + +Returns an array of projects within the Custom Functions root project directory. Each project has details including each of the files in the routes and helpers directories, and the total file count in the static folder. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_custom_functions` + +### Body + +```json +{ + "operation": "get_custom_functions" +} +``` + +### Response: 200 + +```json +{ + "dogs": { + "routes": ["examples"], + "helpers":["example"], + "static":3 + } +} +``` + +--- + +## Get Custom Function + +Returns the content of the specified file as text. HarperDStudio uses this call to render the file content in its built-in code editor. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_custom_function` +* project _(required)_ - the name of the project containing the file for which you wish to get content +* type _(required)_ - the name of the sub-folder containing the file for which you wish to get content - must be either routes or helpers +* file _(required)_ - The name of the file for which you wish to get content - should not include the file extension (which is always .js) + +### Body + +```json +{ + "operation": "get_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example" +} +``` + +### Response: 200 + +```json +{ + "message": "'use strict';\n\nconst https = require('https');\n\nconst authRequest = (options) => {\n return new Promise((resolve, reject) => {\n const req = https.request(options, (res) => {\n res.setEncoding('utf8');\n let responseBody = '';\n\n res.on('data', (chunk) => {\n responseBody += chunk;\n });\n\n res.on('end', () => {\n resolve(JSON.parse(responseBody));\n });\n });\n\n req.on('error', (err) => {\n reject(err);\n });\n\n req.end();\n });\n};\n\nconst customValidation = async (request,logger) => {\n const options = {\n hostname: 'jsonplaceholder.typicode.com',\n port: 443,\n path: '/todos/1',\n method: 'GET',\n headers: { authorization: request.headers.authorization },\n };\n\n const result = await authRequest(options);\n\n /*\n * throw an authentication error based on the response body or statusCode\n */\n if (result.error) {\n const errorString = result.error || 'Sorry, there was an error authenticating your request';\n logger.error(errorString);\n throw new Error(errorString);\n }\n return request;\n};\n\nmodule.exports = customValidation;\n" +} +``` + +--- + +## Set Custom Function + +Updates the content of the specified file. Harper Studio uses this call to save any changes made through its built-in code editor. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_custom_function` +* project _(required)_ - the name of the project containing the file for which you wish to set content +* type _(required)_ - the name of the sub-folder containing the file for which you wish to set content - must be either routes or helpers +* file _(required)_ - the name of the file for which you wish to set content - should not include the file extension (which is always .js) +* function_content _(required)_ - the content you wish to save into the specified file + +### Body + +```json +{ + "operation": "set_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example", + "function_content": "'use strict';\n\nconst https = require('https');\n\nconst authRequest = (options) => {\n return new Promise((resolve, reject) => {\n const req = https.request(options, (res) => {\n res.setEncoding('utf8');\n let responseBody = '';\n\n res.on('data', (chunk) => {\n responseBody += chunk;\n });\n\n res.on('end', () => {\n resolve(JSON.parse(responseBody));\n });\n });\n\n req.on('error', (err) => {\n reject(err);\n });\n\n req.end();\n });\n};\n\nconst customValidation = async (request,logger) => {\n const options = {\n hostname: 'jsonplaceholder.typicode.com',\n port: 443,\n path: '/todos/1',\n method: 'GET',\n headers: { authorization: request.headers.authorization },\n };\n\n const result = await authRequest(options);\n\n /*\n * throw an authentication error based on the response body or statusCode\n */\n if (result.error) {\n const errorString = result.error || 'Sorry, there was an error authenticating your request';\n logger.error(errorString);\n throw new Error(errorString);\n }\n return request;\n};\n\nmodule.exports = customValidation;\n" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully updated custom function: example.js" +} +``` + +--- + +## Drop Custom Function + +Deletes the specified file. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `drop_custom_function` +* project _(required)_ - the name of the project containing the file you wish to delete +* type _(required)_ - the name of the sub-folder containing the file you wish to delete. Must be either routes or helpers +* file _(required)_ - the name of the file you wish to delete. Should not include the file extension (which is always .js) + +### Body + +```json +{ + "operation": "drop_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example" +} +``` + +### Response: 200 + +```json +{ + "message":"Successfully deleted custom function: example.js" +} +``` + +--- + +## Add Custom Function Project + +Creates a new project folder in the Custom Functions root project directory. It also inserts into the new directory the contents of our Custom Functions Project template, which is available publicly, here: https:/github.com/HarperDB/harperdb-custom-functions-template. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_custom_function_project` +* project _(required)_ - the name of the project you wish to create + +### Body + +```json +{ + "operation": "add_custom_function_project", + "project": "dogs" +} +``` + +### Response: 200 + +```json +{ + "message":"Successfully created custom function project: dogs" +} +``` + +--- + +## Drop Custom Function Project + +Deletes the specified project folder and all of its contents. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `drop_custom_function_project` +* project _(required)_ - the name of the project you wish to delete + +### Body + +```json +{ + "operation": "drop_custom_function_project", + "project": "dogs" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deleted project: dogs" +} +``` + +--- + +## Package Custom Function Project + +Creates a .tar file of the specified project folder, then reads it into a base64-encoded string and returns an object with the string, the payload and the file. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `package_custom_function_project` +* project _(required)_ - the name of the project you wish to package up for deployment +* skip_node_modules _(optional)_ - if true, creates option for tar module that will exclude the project's node_modules directory. Must be a boolean. + +### Body + +```json +{ + "operation": "package_custom_function_project", + "project": "dogs", + "skip_node_modules": true +} +``` + +### Response: 200 + +```json +{ + "project": "dogs", + "payload": "LgAAAAAAAAAAAAAAAAAAA...AAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "file": "/tmp/d27f1154-5d82-43f0-a5fb-a3018f366081.tar" +} +``` + +--- + +## Deploy Custom Function Project + +Takes the output of package_custom_function_project, decrypts the base64-encoded string, reconstitutes the .tar file of your project folder, and extracts it to the Custom Functions root project directory. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `deploy_custom_function_project` +* project _(required)_ - the name of the project you wish to deploy. Must be a string +* payload _(required)_ - a base64-encoded string representation of the .tar file. Must be a string + + +### Body + +```json +{ + "operation": "deploy_custom_function_project", + "project": "dogs", + "payload": "A very large base64-encoded string represenation of the .tar file" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deployed project: dogs" +} +``` diff --git a/site/versioned_docs/version-4.4/developers/operations-api/databases-and-tables.md b/site/versioned_docs/version-4.4/developers/operations-api/databases-and-tables.md new file mode 100644 index 00000000..27ec954e --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/databases-and-tables.md @@ -0,0 +1,364 @@ +--- +title: Databases and Tables +--- + +# Databases and Tables + +## Describe All +Returns the definitions of all databases and tables within the database. Record counts about 5000 records are estimated, as determining the exact count can be expensive. When the record count is estimated, this is indicated by the inclusion of a confidence interval of `estimated_record_range`. If you need the exact count, you can include an `"exact_count": true` in the operation, but be aware that this requires a full table scan (may be expensive). + +* operation _(required)_ - must always be `describe_all` + +### Body +```json +{ + "operation": "describe_all" +} +``` + +### Response: 200 +```json +{ + "dev": { + "dog": { + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 + } + } +} +``` + +--- + +## Describe database +Returns the definitions of all tables within the specified database. + +* operation _(required)_ - must always be `describe_database` +* database _(optional)_ - database where the table you wish to describe lives. The default is `data` + +### Body +```json +{ + "operation": "describe_database", + "database": "dev" +} +``` + +### Response: 200 +```json +{ + "dog": { + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 + } +} +``` + +--- + +## Describe Table +Returns the definition of the specified table. + +* operation _(required)_ - must always be `describe_table` +* table _(required)_ - table you wish to describe +* database _(optional)_ - database where the table you wish to describe lives. The default is `data` + +### Body +```json +{ + "operation": "describe_table", + "table": "dog" +} +``` + +### Response: 200 +```json +{ + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 +} +``` + +--- + +## Create database +Create a new database. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `create_database` +* database _(optional)_ - name of the database you are creating. The default is `data` + +### Body +```json +{ + "operation": "create_database", + "database": "dev" +} +``` + +### Response: 200 +```json +{ + "message": "database 'dev' successfully created" +} +``` + +--- + +## Drop database +Drop an existing database. NOTE: Dropping a database will delete all tables and all of their records in that database. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `drop_database` +* database _(required)_ - name of the database you are dropping +* replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. + +### Body +```json +{ + "operation": "drop_database", + "database": "dev" +} +``` + +### Response: 200 +```json +{ + "message": "successfully deleted 'dev'" +} +``` + +--- + +## Create Table +Create a new table within a database. + +_Operation is restricted to super_user roles only_ + + +* operation _(required)_ - must always be `create_table` +* database _(optional)_ - name of the database where you want your table to live. If the database does not exist, it will be created. If the `database` property is not provided it will default to `data`. +* table _(required)_ - name of the table you are creating +* primary_key _(required)_ - primary key for the table +* attributes _(optional)_ - an array of attributes that specifies the schema for the table, that is the set of attributes for the table. When attributes are supplied the table will not be considered a "dynamic schema" table, and attributes will not be auto-added when records with new properties are inserted. Each attribute is specified as: + * name _(required)_ - the name of the attribute + * indexed _(optional)_ - indicates if the attribute should be indexed + * type _(optional)_ - specifies the data type of the attribute (can be String, Int, Float, Date, ID, Any) +* expiration _(optional)_ - specifies the time-to-live or expiration of records in the table before they are evicted (records are not evicted on any timer if not specified). This is specified in seconds. + +### Body +```json +{ + "operation": "create_table", + "database": "dev", + "table": "dog", + "primary_key": "id" +} +``` + +### Response: 200 +```json +{ + "message": "table 'dev.dog' successfully created." +} +``` + +--- + +## Drop Table +Drop an existing database table. NOTE: Dropping a table will delete all associated records in that table. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `drop_table` +* database _(optional)_ - database where the table you are dropping lives. The default is `data` +* table _(required)_ - name of the table you are dropping +* replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "drop_table", + "database": "dev", + "table": "dog" +} +``` + +### Response: 200 +```json +{ + "message": "successfully deleted table 'dev.dog'" +} +``` + +--- + +## Create Attribute +Create a new attribute within the specified table. **The create_attribute operation can be used for admins wishing to pre-define database values for setting role-based permissions or for any other reason.** + +_Note: Harper will automatically create new attributes on insert and update if they do not already exist within the database._ + +* operation _(required)_ - must always be `create_attribute` +* database _(optional)_ - name of the database of the table you want to add your attribute. The default is `data` +* table _(required)_ - name of the table where you want to add your attribute to live +* attribute _(required)_ - name for the attribute + +### Body +```json +{ + "operation": "create_attribute", + "database": "dev", + "table": "dog", + "attribute": "is_adorable" +} +``` + +### Response: 200 +```json +{ + "message": "inserted 1 of 1 records", + "skipped_hashes": [], + "inserted_hashes": [ + "383c0bef-5781-4e1c-b5c8-987459ad0831" + ] +} +``` + +--- + +## Drop Attribute +Drop an existing attribute from the specified table. NOTE: Dropping an attribute will delete all associated attribute values in that table. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `drop_attribute` +* database _(optional)_ - database where the table you are dropping lives. The default is `data` +* table _(required)_ - table where the attribute you are dropping lives +* attribute _(required)_ - attribute that you intend to drop + +### Body + +```json +{ + "operation": "drop_attribute", + "database": "dev", + "table": "dog", + "attribute": "is_adorable" +} +``` + +### Response: 200 +```json +{ + "message": "successfully deleted attribute 'is_adorable'" +} +``` + +--- + +## Get Backup +This will return a snapshot of the requested database. This provides a means for backing up the database through the operations API. The response will be the raw database file (in binary format), which can later be restored as a database file by copying into the appropriate hdb/databases directory (with Harper not running). The returned file is a snapshot of the database at the moment in time that the get_backup operation begins. This also supports backing up individual tables in a database. However, this is a more expensive operation than backing up a database in whole, and will lose any transactional atomicity between writes across tables, so generally it is recommended that you backup the entire database. + +It is important to note that trying to copy a database file that is in use (Harper actively running and writing to the file) using standard file copying tools is not safe (the copied file will likely be corrupt), which is why using this snapshot operation is recommended for backups (volume snapshots are also a good way to backup Harper databases). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `get_backup` +* database _(required)_ - this is the database that will be snapshotted and returned +* table _(optional)_ - this will specify a specific table to backup +* tables _(optional)_ - this will specify a specific set of tables to backup + +### Body + +```json +{ + "operation": "get_backup", + "database": "dev" +} +``` + +### Response: 200 +``` +The database in raw binary data format +``` diff --git a/site/versioned_docs/version-4.4/developers/operations-api/index.md b/site/versioned_docs/version-4.4/developers/operations-api/index.md new file mode 100644 index 00000000..6d4c2517 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/index.md @@ -0,0 +1,52 @@ +--- +title: Operations API +--- + +# Operations API + +The operations API provides a full set of capabilities for configuring, deploying, administering, and controlling Harper. To send operations to the operations API, you send a POST request to the operations API endpoint, which [defaults to port 9925](../../deployments/configuration#operationsapi), on the root path, where the body is the operations object. These requests need to authenticated, which can be done with [basic auth](../security/basic-auth) or [JWT authentication](../security/jwt-auth). For example, a request to create a table would be performed as: + +```http +POST http:/my-harperdb-server:9925/ +Authorization: Basic YourBase64EncodedInstanceUser:Pass +Content-Type: application/json + +{ + "operation": "create_table", + "table": "my-table" +} +``` + +The operations API reference is available below and categorized by topic: + +* [Quick Start Examples](./quickstart-examples) +* [Databases and Tables](./databases-and-tables) +* [NoSQL Operations](./nosql-operations) +* [Bulk Operations](./bulk-operations) +* [Users and Roles](./users-and-roles) +* [Clustering](./clustering) +* [Clustering with NATS](./clustering-nats) +* [Components](./components) +* [Registration](./registration) +* [Jobs](./jobs) +* [Logs](./logs) +* [Utilities](./utilities) +* [Token Authentication](./token-authentication) +* [SQL Operations](./sql-operations) +* [Advanced JSON SQL Examples](./advanced-json-sql-examples) + +• [Past Release API Documentation](https:/olddocs.harperdb.io) + +## More Examples + +Here is an example of using `curl` to make an operations API request: + +```bash +curl --location --request POST 'https:/instance-subdomain.harperdbcloud.com' \ +--header 'Authorization: Basic YourBase64EncodedInstanceUser:Pass' \ +--header 'Content-Type: application/json' \ +--data-raw '{ +"operation": "create_schema", +"schema": "dev" +}' +``` diff --git a/site/versioned_docs/version-4.4/developers/operations-api/jobs.md b/site/versioned_docs/version-4.4/developers/operations-api/jobs.md new file mode 100644 index 00000000..8b05357f --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/jobs.md @@ -0,0 +1,82 @@ +--- +title: Jobs +--- + +# Jobs + +## Get Job +Returns job status, metrics, and messages for the specified job ID. + +* operation _(required)_ - must always be `get_job` +* id _(required)_ - the id of the job you wish to view + +### Body + +```json +{ + "operation": "get_job", + "id": "4a982782-929a-4507-8794-26dae1132def" +} +``` + +### Response: 200 +```json +[ + { + "__createdtime__": 1611615798782, + "__updatedtime__": 1611615801207, + "created_datetime": 1611615798774, + "end_datetime": 1611615801206, + "id": "4a982782-929a-4507-8794-26dae1132def", + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "start_datetime": 1611615798805, + "status": "COMPLETE", + "type": "csv_url_load", + "user": "HDB_ADMIN", + "start_datetime_converted": "2021-01-25T23:03:18.805Z", + "end_datetime_converted": "2021-01-25T23:03:21.206Z" + } +] +``` + +--- + +## Search Jobs By Start Date +Returns a list of job statuses, metrics, and messages for all jobs executed within the specified time window. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `search_jobs_by_start_date` +* from_date _(required)_ - the date you wish to start the search +* to_date _(required)_ - the date you wish to end the search + +### Body +```json +{ + "operation": "search_jobs_by_start_date", + "from_date": "2021-01-25T22:05:27.464+0000", + "to_date": "2021-01-25T23:05:27.464+0000" +} +``` + +### Response: 200 +```json +[ + { + "id": "942dd5cb-2368-48a5-8a10-8770ff7eb1f1", + "user": "HDB_ADMIN", + "type": "csv_url_load", + "status": "COMPLETE", + "start_datetime": 1611613284781, + "end_datetime": 1611613287204, + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "created_datetime": 1611613284764, + "__createdtime__": 1611613284767, + "__updatedtime__": 1611613287207, + "start_datetime_converted": "2021-01-25T22:21:24.781Z", + "end_datetime_converted": "2021-01-25T22:21:27.204Z" + } +] +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/developers/operations-api/logs.md b/site/versioned_docs/version-4.4/developers/operations-api/logs.md new file mode 100644 index 00000000..b2b0e2b6 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/logs.md @@ -0,0 +1,768 @@ +--- +title: Logs +--- + +# Logs + +## Read Harper Log + +Returns log outputs from the primary Harper log based on the provided search criteria. [Read more about Harper logging here](../../administration/logging/standard-logging#read-logs-via-the-api). + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `read_Log` +* start _(optional)_ - result to start with. Default is 0, the first log in `hdb.log`. Must be a number +* limit _(optional)_ - number of results returned. Default behavior is 1000. Must be a number +* level _(optional)_ - error level to filter on. Default behavior is all levels. Must be `notify`, `error`, `warn`, `info`, `debug` or `trace` +* from _(optional)_ - date to begin showing log results. Must be `YYYY-MM-DD` or `YYYY-MM-DD hh:mm:ss`. Default is first log in `hdb.log` +* until _(optional)_ - date to end showing log results. Must be `YYYY-MM-DD` or `YYYY-MM-DD hh:mm:ss`. Default is last log in `hdb.log` +* order _(optional)_ - order to display logs desc or asc by timestamp. By default, will maintain `hdb.log` order + +### Body + +```json +{ + "operation": "read_log", + "start": 0, + "limit": 1000, + "level": "error", + "from": "2021-01-25T22:05:27.464+0000", + "until": "2021-01-25T23:05:27.464+0000", + "order": "desc" +} +``` + +### Response: 200 + +```json +[ + { + "level": "notify", + "message": "Connected to cluster server.", + "timestamp": "2021-01-25T23:03:20.710Z", + "thread": "main/0", + "tags": [] + }, + { + "level": "warn", + "message": "Login failed", + "timestamp": "2021-01-25T22:24:45.113Z", + "thread": "http/9", + "tags": [] + }, + { + "level": "error", + "message": "unknown attribute 'name and breed'", + "timestamp": "2021-01-25T22:23:24.167Z", + "thread": "http/9", + "tags": [] + } +] + +``` + +*** + +## Read Transaction Log + +Returns all transactions logged for the specified database table. You may filter your results with the optional from, to, and limit fields. [Read more about Harper transaction logs here](./logs#read-transaction-log). + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `read_transaction_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* from _(optional)_ - time format must be millisecond-based epoch in UTC +* to _(optional)_ - time format must be millisecond-based epoch in UTC +* limit _(optional)_ - max number of logs you want to receive. Must be a number + +### Body + +```json +{ + "operation": "read_transaction_log", + "schema": "dev", + "table": "dog", + "from": 1560249020865, + "to": 1660585656639, + "limit": 10 +} +``` + +### Response: 200 + +```json +[ + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619736, + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38, + "__updatedtime__": 1660165619688, + "__createdtime__": 1660165619688 + } + ] + }, + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619813, + "records": [ + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "breed_id": 346, + "age": 7, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 3, + "dog_name": "Alby", + "owner_name": "Kaylan", + "breed_id": 348, + "age": 7, + "weight_lbs": 84, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "breed_id": 347, + "age": 6, + "weight_lbs": 60, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "breed_id": 348, + "age": 8, + "weight_lbs": 15, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 6, + "dog_name": "Kato", + "owner_name": "Kyle", + "breed_id": 351, + "age": 6, + "weight_lbs": 32, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 7, + "dog_name": "Simon", + "owner_name": "Fred", + "breed_id": 349, + "age": 3, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "breed_id": 350, + "age": 5, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 9, + "dog_name": "Yeti", + "owner_name": "Jaxon", + "breed_id": 200, + "age": 5, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 10, + "dog_name": "Monkey", + "owner_name": "Aron", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 11, + "dog_name": "Bode", + "owner_name": "Margo", + "breed_id": 104, + "age": 8, + "weight_lbs": 75, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 12, + "dog_name": "Tucker", + "owner_name": "David", + "breed_id": 346, + "age": 2, + "weight_lbs": 60, + "adorable": true, + "__updatedtime__": 1660165619798, + "__createdtime__": 1660165619798 + }, + { + "id": 13, + "dog_name": "Jagger", + "owner_name": "Margo", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619798, + "__createdtime__": 1660165619798 + } + ] + }, + { + "operation": "update", + "user": "admin", + "timestamp": 1660165620040, + "records": [ + { + "id": 1, + "dog_name": "Penny B", + "__updatedtime__": 1660165620036 + } + ] + } +] +``` + +*** + +## Delete Transaction Logs Before + +Deletes transaction log data for the specified database table that is older than the specified timestamp. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `delete_transaction_log_before` +* schema _(required)_ - schema under which the transaction log resides. Must be a string +* table _(required)_ - table under which the transaction log resides. Must be a string +* timestamp _(required)_ - records older than this date will be deleted. Format is millisecond-based epoch in UTC + +### Body + +```json +{ + "operation": "delete_transaction_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1598290282817 +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id 26a6d3a6-6d77-40f9-bee7-8d6ef479a126" +} +``` + +*** + +## Read Audit Log + +AuditLog must be enabled in the Harper configuration file to make this request. Returns a verbose history of all transactions logged for the specified database table, including original data records. You may filter your results with the optional search\_type and search\_values fields. [Read more about Harper transaction logs here.](../../administration/logging/transaction-logging#read_transaction_log) + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search\_type _(optional)_ - possibilities are `hash_value`, `timestamp` and `username` +* search\_values _(optional)_ - an array of string or numbers relating to search\_type + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog" +} +``` + +### Response: 200 + +```json +[ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [ + 444 + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + } +] +``` + +*** + +## Read Audit Log by timestamp + +AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table between the specified time window. [Read more about Harper transaction logs here](./logs#read-transaction-log). + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search\_type _(optional)_ - timestamp +* search\_values _(optional)_ - an array containing a maximum of two values \[`from_timestamp`, `to_timestamp`] defining the range of transactions you would like to view. + * Timestamp format is millisecond-based epoch in UTC + * If no items are supplied then all transactions are returned + * If only one entry is supplied then all transactions after the supplied timestamp will be returned + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "timestamp", + "search_values": [ + 1660585740558, + 1660585759710.56 + ] +} +``` + +### Response: 200 + +```json +[ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [ + 444 + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } +] +``` + +*** + +## Read Audit Log by username + +AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table which were committed by the specified user. [Read more about Harper transaction logs here](../../administration/logging/transaction-logging#read_transaction_log). + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search\_type _(optional)_ - username +* search\_values _(optional)_ - the Harper user for whom you would like to view transactions + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "username", + "search_values": [ + "admin" + ] +} +``` + +### Response: 200 + +```json +{ + "admin": [ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [ + 444 + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } + ] +} +``` + +*** + +## Read Audit Log by hash\_value + +AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table which were committed to the specified hash value(s). [Read more about Harper transaction logs here](../../administration/logging/transaction-logging#read_transaction_log). + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search\_type _(optional)_ - hash\_value +* search\_values _(optional)_ - an array of hash\_attributes for which you wish to see transaction logs + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "hash_value", + "search_values": [ + 318 + ] +} +``` + +### Response: 200 + +```json +{ + "318": [ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } + ] +} +``` + +*** + +## Delete Audit Logs Before + +AuditLog must be enabled in the Harper configuration file to make this request. Deletes audit log data for the specified database table that is older than the specified timestamp. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `delete_audit_logs_before` +* schema _(required)_ - schema under which the transaction log resides. Must be a string +* table _(required)_ - table under which the transaction log resides. Must be a string +* timestamp _(required)_ - records older than this date will be deleted. Format is millisecond-based epoch in UTC + +### Body + +```json +{ + "operation": "delete_audit_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1660585759710.56 +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id 7479e5f8-a86e-4fc9-add7-749493bc100f" +} +``` diff --git a/site/versioned_docs/version-4.4/developers/operations-api/nosql-operations.md b/site/versioned_docs/version-4.4/developers/operations-api/nosql-operations.md new file mode 100644 index 00000000..f52468ec --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/nosql-operations.md @@ -0,0 +1,413 @@ +--- +title: NoSQL Operations +--- + +# NoSQL Operations + +## Insert + +Adds one or more rows of data to a database table. Primary keys of the inserted JSON record may be supplied on insert. If a primary key is not provided, then a GUID or incremented number (depending on type) will be generated for each record. + +* operation _(required)_ - must always be `insert` +* database _(optional)_ - database where the table you are inserting records into lives. The default is `data` +* table _(required)_ - table where you want to insert records +* records _(required)_ - array of one or more records for insert + +### Body + +```json +{ + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 8, + "dog_name": "Harper", + "breed_id": 346, + "age": 7 + }, + { + "id": 9, + "dog_name": "Penny", + "breed_id": 154, + "age": 7 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 2 of 2 records", + "inserted_hashes": [ + 8, + 9 + ], + "skipped_hashes": [] +} +``` + +--- + +## Update + +Changes the values of specified attributes in one or more rows in a database table as identified by the primary key. NOTE: Primary key of the updated JSON record(s) MUST be supplied on update. + +* operation _(required)_ - must always be `update` +* database _(optional)_ - database of the table you are updating records in. The default is `data` +* table _(required)_ - table where you want to update records +* records _(required)_ - array of one or more records for update + +### Body + +```json +{ + "operation": "update", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 1, + "weight_lbs": 55 + }, + { + "id": 2, + "owner": "Kyle B", + "weight_lbs": 35 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "updated 2 of 2 records", + "update_hashes": [ + 1, + 3 + ], + "skipped_hashes": [] +} +``` + +--- + +## Upsert + +Changes the values of specified attributes for rows with matching primary keys that exist in the table. Adds rows to the database table for primary keys that do not exist or are not provided. + +* operation _(required)_ - must always be `upsert` +* database _(optional)_ - database of the table you are updating records in. The default is `data` +* table _(required)_ - table where you want to update records +* records _(required)_ - array of one or more records for update + +### Body + +```json +{ + "operation": "upsert", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 8, + "weight_lbs": 155 + }, + { + "name": "Bill", + "breed": "Pit Bull", + "id": 10, + "Age": 11, + "weight_lbs": 155 + }, + { + "name": "Harper", + "breed": "Mutt", + "age": 5, + "weight_lbs": 155 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "upserted 3 of 3 records", + "upserted_hashes": [ + 8, + 10, + "ea06fc8e-717b-4c6c-b69d-b29014054ab7" + ] +} +``` + +--- + +## Delete + +Removes one or more rows of data from a specified table. + +* operation _(required)_ - must always be `delete` +* database _(optional)_ - database where the table you are deleting records lives. The default is `data` +* table _(required)_ - table where you want to deleting records +* ids _(required)_ - array of one or more primary key values, which identifies records to delete + +### Body + +```json +{ + "operation": "delete", + "database": "dev", + "table": "dog", + "ids": [ + 1, + 2 + ] +} +``` + +### Response: 200 + +```json +{ + "message": "2 of 2 records successfully deleted", + "deleted_hashes": [ + 1, + 2 + ], + "skipped_hashes": [] +} +``` + +--- + +## Search By ID + +Returns data from a table for one or more primary keys. + +* operation _(required)_ - must always be `search_by_id` +* database _(optional)_ - database where the table you are searching lives. The default is `data` +* table _(required)_ - table you wish to search +* ids _(required)_ - array of primary keys to retrieve +* get_attributes _(required)_ - define which attributes you want returned. _Use `['*']` to return all attributes_ + +### Body + +```json +{ + "operation": "search_by_id", + "database": "dev", + "table": "dog", + "ids": [ + 1, + 2 + ], + "get_attributes": [ + "dog_name", + "breed_id" + ] +} +``` + +### Response: 200 + +```json +[ + { + "dog_name": "Penny", + "breed_id": 154 + }, + { + "dog_name": "Harper", + "breed_id": 346 + } +] +``` + +--- + +## Search By Value + +Returns data from a table for a matching value. + +* operation _(required)_ - must always be `search_by_value` +* database _(optional)_ - database where the table you are searching lives. The default is `data` +* table _(required)_ - table you wish to search +* search_attribute _(required)_ - attribute you wish to search can be any attribute +* search_value _(required)_ - value you wish to search - wild cards are allowed +* get_attributes _(required)_ - define which attributes you want returned. Use `['*']` to return all attributes + +### Body + +```json +{ + "operation": "search_by_value", + "database": "dev", + "table": "dog", + "search_attribute": "owner_name", + "search_value": "Ky*", + "get_attributes": [ + "id", + "dog_name" + ] +} +``` + +### Response: 200 + +```json +[ + { + "dog_name": "Penny" + }, + { + "dog_name": "Kato" + } +] +``` + +--- + +## Search By Conditions + +Returns data from a table for one or more matching conditions. This supports grouping of conditions to indicate order of operations as well. + +* operation _(required)_ - must always be `search_by_conditions` +* database _(optional)_ - database where the table you are searching lives. The default is `data` +* table _(required)_ - table you wish to search +* operator _(optional)_ - the operator used between each condition - `and`, `or`. The default is `and` +* offset _(optional)_ - the number of records that the query results will skip. The default is `0` +* limit _(optional)_ - the number of records that the query results will include. The default is `null`, resulting in no limit +* sort _optional_ - This is an object that indicates the sort order. It has the following properties: + * attribute _(required)_ - The attribute to sort by + * descending _(optional)_ - If true, will sort in descending order (defaults to ascending order) + * next _(optional)_ - This can define the next sort object that will be used to break ties for sorting when there are multiple records with the same value for the first attribute (follows the same structure as `sort`, and can recursive additional attributes). +* get_attributes _(required)_ - define which attributes you want returned. Use `['*']` to return all attributes +* conditions _(required)_ - the array of conditions objects, specified below, to filter by. Must include one or more object in the array that are a condition or a grouped set of conditions. A condition has the following properties: + * search_attribute _(required)_ - the attribute you wish to search, can be any attribute + * search_type _(required)_ - the type of search to perform - `equals`, `not_equal`, `contains`, `starts_with`, `ends_with`, `greater_than`, `greater_than_equal`, `less_than`, `less_than_equal`, `between` + * search_value _(required)_ - case-sensitive value you wish to search. If the `search_type` is `between` then use an array of two values to search between + Or a set of grouped conditions has the following properties: + * operator _(optional)_ - the operator used between each condition - `and`, `or`. The default is `and` + * conditions _(required)_ - the array of conditions objects as described above. +### Body + +```json +{ + "operation": "search_by_conditions", + "database": "dev", + "table": "dog", + "operator": "and", + "offset": 0, + "limit": 10, + "sort": { + "attribute": "id", + "next": { + "dog_name": "age", + "descending": true + } + }, + "get_attributes": [ + "*" + ], + "conditions": [ + { + "search_attribute": "age", + "search_type": "between", + "search_value": [ + 5, + 8 + ] + }, + { + "search_attribute": "weight_lbs", + "search_type": "greater_than", + "search_value": 40 + }, + { + "operator": "or", + "conditions": [ + { + "search_attribute": "adorable", + "search_type": "equals", + "search_value": true + }, + { + "search_attribute": "lovable", + "search_type": "equals", + "search_value": true + } + ] + } + ] +} +``` + +### Response: 200 + +```json +[ + { + "__createdtime__": 1620227719791, + "__updatedtime__": 1620227719791, + "adorable": true, + "age": 7, + "breed_id": 346, + "dog_name": "Harper", + "id": 2, + "owner_name": "Stephen", + "weight_lbs": 55 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 7, + "breed_id": 348, + "dog_name": "Alby", + "id": 3, + "owner_name": "Kaylan", + "weight_lbs": 84 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 6, + "breed_id": 347, + "dog_name": "Billy", + "id": 4, + "owner_name": "Zach", + "weight_lbs": 60 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 5, + "breed_id": 250, + "dog_name": "Gemma", + "id": 8, + "owner_name": "Stephen", + "weight_lbs": 55 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 8, + "breed_id": 104, + "dog_name": "Bode", + "id": 11, + "owner_name": "Margo", + "weight_lbs": 75 + } +] +``` diff --git a/site/versioned_docs/version-4.4/developers/operations-api/quickstart-examples.md b/site/versioned_docs/version-4.4/developers/operations-api/quickstart-examples.md new file mode 100644 index 00000000..9159efca --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/quickstart-examples.md @@ -0,0 +1,387 @@ +--- +title: Quick Start Examples +--- + +# Quick Start Examples + +Harper recommends utilizing [Harper Applications](../../developers/applications/) for defining databases, tables, and other functionality. However, this guide is a great way to get started using on the Harper Operations API. + +## Create dog Table + +We first need to create a table. Since our company is named after our CEO's dog, lets create a table to store all our employees' dogs. We'll call this table, `dogs`. + +Tables in Harper are schema-less, so we don't need to add any attributes other than a primary_key (in pre 4.2 versions this was referred to as the hash_attribute) to create this table. + +Harper does offer a `database` parameter that can be used to hold logical groupings of tables. The parameter is optional and if not provided the operation will default to using a database named `data`. + +If you receive an error response, make sure your Basic Authentication user and password match those you entered during the installation process. + +### Body + +```json +{ + "operation": "create_table", + "table": "dog", + "primary_key": "id" +} +``` + +### Response: 200 + +```json +{ + "message": "table 'data.dog' successfully created." +} +``` + +--- + +## Create breed Table +Now that we have a table to store our dog data, we also want to create a table to track known breeds. Just as with the dog table, the only attribute we need to specify is the `primary_key`. + +### Body + +```json +{ + "operation": "create_table", + "table": "breed", + "primary_key": "id" +} +``` + +### Response: 200 + +```json +{ + "message": "table 'data.breed' successfully created." +} +``` + +--- + +## Insert 1 Dog + +We're ready to add some dog data. Penny is our CTO's pup, so she gets ID 1 or we're all fired. We are specifying attributes in this call, but this doesn't prevent us from specifying additional attributes in subsequent calls. + +### Body + +```json +{ + "operation": "insert", + "table": "dog", + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 1 of 1 records", + "inserted_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` + +--- + +## Insert Multiple Dogs + +Let's add some more Harper doggies! We can add as many dog objects as we want into the records collection. If you're adding a lot of objects, we would recommend using the .csv upload option (see the next section where we populate the breed table). + +### Body + +```json +{ + "operation": "insert", + "table": "dog", + "records": [ + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "breed_id": 346, + "age": 7, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 3, + "dog_name": "Alby", + "owner_name": "Kaylan", + "breed_id": 348, + "age": 7, + "weight_lbs": 84, + "adorable": true + }, + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "breed_id": 347, + "age": 6, + "weight_lbs": 60, + "adorable": true + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "breed_id": 348, + "age": 8, + "weight_lbs": 15, + "adorable": true + }, + { + "id": 6, + "dog_name": "Kato", + "owner_name": "Kyle", + "breed_id": 351, + "age": 6, + "weight_lbs": 32, + "adorable": true + }, + { + "id": 7, + "dog_name": "Simon", + "owner_name": "Fred", + "breed_id": 349, + "age": 3, + "weight_lbs": 35, + "adorable": true + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "breed_id": 350, + "age": 5, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 9, + "dog_name": "Yeti", + "owner_name": "Jaxon", + "breed_id": 200, + "age": 5, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 10, + "dog_name": "Monkey", + "owner_name": "Aron", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true + }, + { + "id": 11, + "dog_name": "Bode", + "owner_name": "Margo", + "breed_id": 104, + "age": 8, + "weight_lbs": 75, + "adorable": true + }, + { + "id": 12, + "dog_name": "Tucker", + "owner_name": "David", + "breed_id": 346, + "age": 2, + "weight_lbs": 60, + "adorable": true + }, + { + "id": 13, + "dog_name": "Jagger", + "owner_name": "Margo", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 12 of 12 records", + "inserted_hashes": [ + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13 + ], + "skipped_hashes": [] +} +``` + +--- + +## Bulk Insert Breeds Via CSV + +We need to populate the 'breed' table with some data so we can reference it later. For larger data sets, we recommend using our CSV upload option. + +Each header in a column will be considered as an attribute, and each row in the file will be a row in the table. Simply specify the file path and the table to upload to, and Harper will take care of the rest. You can pull the breeds.csv file from here: https:/s3.amazonaws.com/complimentarydata/breeds.csv + +### Body + +```json +{ + "operation": "csv_url_load", + "table": "breed", + "csv_url": "https:/s3.amazonaws.com/complimentarydata/breeds.csv" +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id e77d63b9-70d5-499c-960f-6736718a4369", + "job_id": "e77d63b9-70d5-499c-960f-6736718a4369" +} +``` + +--- + +## Update 1 Dog Using NoSQL + +Harper supports NoSQL and SQL commands. We're going to update the dog table to show Penny's last initial using our NoSQL API. + +### Body + +```json +{ + "operation": "update", + "table": "dog", + "records": [ + { + "id": 1, + "dog_name": "Penny B" + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "updated 1 of 1 records", + "update_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` + +--- + +## Select a Dog by ID Using SQL + +Now we're going to use a simple SQL SELECT call to pull Penny's updated data. Note we now see Penny's last initial in the dog name. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT * FROM data.dog where id = 1" +} +``` + +### Response: 200 + +```json +[ + { + "owner_name": "Kyle", + "adorable": null, + "breed_id": 154, + "__updatedtime__": 1610749428575, + "dog_name": "Penny B", + "weight_lbs": 38, + "id": 1, + "age": 7, + "__createdtime__": 1610749386566 + } +] +``` + +--- + +## Select Dogs and Join Breed + +Here's a more complex SQL command joining the breed table with the dog table. We will also pull only the pups belonging to Kyle, Zach, and Stephen. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT d.id, d.dog_name, d.owner_name, b.name, b.section FROM data.dog AS d INNER JOIN data.breed AS b ON d.breed_id = b.id WHERE d.owner_name IN ('Kyle', 'Zach', 'Stephen') AND b.section = 'Mutt' ORDER BY d.dog_name" +} +``` + +### Response: 200 + +```json +[ + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "name": "LABRADOR / GREAT DANE MIX", + "section": "Mutt" + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "name": "SHORT HAIRED SETTER MIX", + "section": "Mutt" + }, + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "name": "HUSKY MIX", + "section": "Mutt" + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "name": "TERRIER MIX", + "section": "Mutt" + } +] + +``` diff --git a/site/versioned_docs/version-4.4/developers/operations-api/registration.md b/site/versioned_docs/version-4.4/developers/operations-api/registration.md new file mode 100644 index 00000000..366b0189 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/registration.md @@ -0,0 +1,67 @@ +--- +title: Registration +--- + +# Registration + + +## Registration Info +Returns the registration data of the Harper instance. + +* operation _(required)_ - must always be `registration_info` + +### Body +```json +{ + "operation": "registration_info" +} +``` + +### Response: 200 +```json +{ + "registered": true, + "version": "4.2.0", + "ram_allocation": 2048, + "license_expiration_date": "2022-01-15" +} +``` + +--- + +## Get Fingerprint +Returns the Harper fingerprint, uniquely generated based on the machine, for licensing purposes. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_fingerprint` + +### Body + +```json +{ + "operation": "get_fingerprint" +} +``` + +--- + +## Set License +Sets the Harper license as generated by Harper License Management software. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_license` +* key _(required)_ - your license key +* company _(required)_ - the company that was used in the license + +### Body + +```json +{ + "operation": "set_license", + "key": "", + "company": "" +} +``` + diff --git a/site/versioned_docs/version-4.4/developers/operations-api/sql-operations.md b/site/versioned_docs/version-4.4/developers/operations-api/sql-operations.md new file mode 100644 index 00000000..9fcc6fb4 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/sql-operations.md @@ -0,0 +1,122 @@ +--- +title: SQL Operations +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL Operations + +## Select +Executes the provided SQL statement. The SELECT statement is used to query data from the database. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT * FROM dev.dog WHERE id = 1" +} +``` + +### Response: 200 +```json +[ + { + "id": 1, + "age": 7, + "dog_name": "Penny", + "weight_lbs": 38, + "breed_id": 154, + "owner_name": "Kyle", + "adorable": true, + "__createdtime__": 1611614106043, + "__updatedtime__": 1611614119507 + } +] +``` + +--- + +## Insert +Executes the provided SQL statement. The INSERT statement is used to add one or more rows to a database table. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body + +```json +{ + "operation": "sql", + "sql": "INSERT INTO dev.dog (id, dog_name) VALUE (22, 'Simon')" +} +``` + +### Response: 200 +```json +{ + "message": "inserted 1 of 1 records", + "inserted_hashes": [ + 22 + ], + "skipped_hashes": [] +} +``` +--- + +## Update +Executes the provided SQL statement. The UPDATE statement is used to change the values of specified attributes in one or more rows in a database table. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body +```json +{ + "operation": "sql", + "sql": "UPDATE dev.dog SET dog_name = 'penelope' WHERE id = 1" +} +``` + +### Response: 200 +```json +{ + "message": "updated 1 of 1 records", + "update_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` + +--- + +## Delete +Executes the provided SQL statement. The DELETE statement is used to remove one or more rows of data from a database table. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body +```json +{ + "operation": "sql", + "sql": "DELETE FROM dev.dog WHERE id = 1" +} +``` + +### Response: 200 +```json +{ + "message": "1 of 1 record successfully deleted", + "deleted_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` diff --git a/site/versioned_docs/version-4.4/developers/operations-api/token-authentication.md b/site/versioned_docs/version-4.4/developers/operations-api/token-authentication.md new file mode 100644 index 00000000..161c69b5 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/token-authentication.md @@ -0,0 +1,54 @@ +--- +title: Token Authentication +--- + +# Token Authentication + +## Create Authentication Tokens +Creates the tokens needed for authentication: operation & refresh token. + +_Note - this operation does not require authorization to be set_ + +* operation _(required)_ - must always be `create_authentication_tokens` +* username _(required)_ - username of user to generate tokens for +* password _(required)_ - password of user to generate tokens for + +### Body +```json +{ + "operation": "create_authentication_tokens", + "username": "", + "password": "" +} +``` + +### Response: 200 +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6IkhEQl9BRE1JTiIsImlhdCI6MTYwNTA2Mzk0OSwiZXhwIjoxNjA1MTUwMzQ5LCJzdWIiOiJvcGVyYXRpb24ifQ.TlV93BqavQVQntXTt_WeY5IjAuCshfd6RzhihLWFWhu1qEKLHdwg9o5Z4ASaNmfuyKBqbFw65IbOYKd348EXeC_T6d0GO3yUhICYWXkqhQnxVW_T-ECKc7m5Bty9HTgfeaJ2e2yW55nbZYWG_gLtNgObUjCziX20-gGGR25sNTRm78mLQPYQkBJph6WXwAuyQrX704h0NfvNqyAZSwjxgtjuuEftTJ7FutLrQSLGIBIYq9nsHrFkheiDSn-C8_WKJ_zATa4YIofjqn9g5wA6o_7kSNaU2-gWnCm_jbcAcfvOmXh6rd89z8pwPqnC0f131qHIBps9UHaC1oozzmu_C6bsg7905OoAdFFY42Vojs98SMbfRApRvwaS4SprBsam3izODNI64ZUBREu3l4SZDalUf2kN8XPVWkI1LKq_mZsdtqr1r11Z9xslI1wVdxjunYeanjBhs7_j2HTX7ieVGn1a23cWceUk8F1HDGe_KEuPQs03R73V8acq_freh-kPhIa4eLqmcHeBw3WcyNGW8GuP8kyQRkGuO5sQSzZqbr_YSbZdSShZWTWDE6RYYC9ZV9KJtHVxhs0hexUpcoqO8OtJocyltRjtDjhSm9oUxszYRaALu-h8YadZT9dEKzsyQIt30d7LS9ETmmGWx4nKSTME2bV21PnDv_rEc5R6gnE", + "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6IkhEQl9BRE1JTiIsImlhdCI6MTYwNTA2Mzk0OSwiZXhwIjoxNjA3NjU1OTQ5LCJzdWIiOiJyZWZyZXNoIn0.znhJhkdSROBPP_GLRzAxYdjgQ3BuqpAbQB7zMSSOQJ3s83HnmZ10Bnpw_3L2aF-tOFgz_t6HUAvn26fNOLsspJD2aOvHPcVS4yLKS5nagpA6ar_pqng9f6Ebfs8ohguLCfHnHRJ8poLxuWRvWW9_9pIlDiwsj4yo3Mbxi3mW8Bbtnk2MwiNHFxTksD12Ne8EWz8q2jic5MjArqBBgR373oYoWU1oxpTM6gIsZCBRowXcc9XFy2vyRoggEUU4ISRFQ4ZY9ayJ-_jleSDCUamJSNQsdb1OUTvc6CxeYlLjCoV0ijRUB6p2XWNVezFhDu8yGqOeyGFJzArhxbVc_pl4UYd5aUVxhrO9DdhG29cY_mHV0FqfXphR9QllK--LJFTP4aFqkCxnVr7HSa17hL0ZVK1HaKrx21PAdCkVNZpD6J3RtRbTkfnIB_C3Be9jhOV3vpTf7ZGn_Bs3CPJi_sL313Z1yKSDAS5rXTPceEOcTPHjzkMP9Wz19KfFq_0kuiZdDmeYNqJeFPAgGJ-S0tO51krzyGqLyCCA32_W104GR8OoQi2gEED6HIx2G0-1rnLnefN6eHQiY5r-Q3Oj9e2y3EvqqgWOmEDw88-SjPTwQVnMbBHYN2RfluU7EmvDh6Saoe79Lhlu8ZeSJ1x6ZgA8-Cirraz1_526Tn8v5FGDfrc" +} +``` + +--- + +## Refresh Operation Token +This operation creates a new operation token. + +* operation _(required)_ - must always be `refresh_operation_token` +* refresh_token _(required)_ - the refresh token that was provided when tokens were created + +### Body +```json +{ + "operation": "refresh_operation_token", + "refresh_token": "EXISTING_REFRESH_TOKEN" +} +``` + +### Response: 200 +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6eyJfX2NyZWF0ZWR0aW1lX18iOjE2MDQ1MTc4Nzk1MjMsIl9fdXBkYXRlZHRpbWVfXyI6MTYwNDUxNzg3OTUyMywiYWN0aXZlIjp0cnVlLCJhdXRoX3Rva2VuIjpudWxsLCJyb2xlIjp7Il9fY3JlYXRlZHRpbWVfXyI6MTYwNDUxNzg3OTUyMSwiX191cGRhdGVkdGltZV9fIjoxNjA0NTE3ODc5NTIxLCJpZCI6IjZhYmRjNGJhLWU5MjQtNDlhNi1iOGY0LWM1NWUxYmQ0OTYzZCIsInBlcm1pc3Npb24iOnsic3VwZXJfdXNlciI6dHJ1ZSwic3lzdGVtIjp7InRhYmxlcyI6eyJoZGJfdGFibGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9hdHRyaWJ1dGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9zY2hlbWEiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl91c2VyIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfcm9sZSI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2pvYiI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2xpY2Vuc2UiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9pbmZvIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfbm9kZXMiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl90ZW1wIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119fX19LCJyb2xlIjoic3VwZXJfdXNlciJ9LCJ1c2VybmFtZSI6IkhEQl9BRE1JTiJ9LCJpYXQiOjE2MDUwNjQ0MjMsImV4cCI6MTYwNTE1MDgyMywic3ViIjoib3BlcmF0aW9uIn0.VVZdhlh7_xFEaGPwhAh6VJ1d7eisiF3ok3ZwLTQAMWZB6umb2S7pPSTbXAmqAGHRlFAK3BYfnwT3YWt0gZbHvk24_0x3s_dej3PYJ8khIxzMjqpkR6qSjQIC2dhKqpwRPNtoqW_xnep9L-qf5iPtqkwsqWhF1c5VSN8nFouLWMZSuJ6Mag04soNhFvY0AF6QiTyzajMTb6uurRMWOnxk8hwMrY_5xtupabqtZheXP_0DV8l10B7GFi_oWf_lDLmwRmNbeUfW8ZyCIJMj36bjN3PsfVIxog87SWKKCwbWZWfJWw0KEph-HvU0ay35deyGWPIaDQmujuh2vtz-B0GoIAC58PJdXNyQRzES_nSb6Oqc_wGZsLM6EsNn_lrIp3mK_3a5jirZ8s6Z2SfcYKaLF2hCevdm05gRjFJ6ijxZrUSOR2S415wLxmqCCWCp_-sEUz8erUrf07_aj-Bv99GUub4b_znOsQF3uABKd4KKff2cNSMhAa-6sro5GDRRJg376dcLi2_9HOZbnSo90zrpVq8RNV900aydyzDdlXkZja8jdHBk4mxSSewYBvM7up6I0G4X-ZlzFOp30T7kjdLa6480Qp34iYRMMtq0Htpb5k2jPt8dNFnzW-Q2eRy1wNBbH3cCH0rd7_BIGuTCrl4hGU8QjlBiF7Gj0_-uJYhKnhg" +} +``` diff --git a/site/versioned_docs/version-4.4/developers/operations-api/users-and-roles.md b/site/versioned_docs/version-4.4/developers/operations-api/users-and-roles.md new file mode 100644 index 00000000..d95f3ad9 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/users-and-roles.md @@ -0,0 +1,484 @@ +--- +title: Users and Roles +--- + +# Users and Roles + +## List Roles +Returns a list of all roles. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `list_roles` + +### Body +```json +{ + "operation": "list_roles" +} +``` + +### Response: 200 +```json +[ + { + "__createdtime__": 1611615061106, + "__updatedtime__": 1611615061106, + "id": "05c2ffcd-f780-40b1-9432-cfe8ba5ad890", + "permission": { + "super_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + }, + "role": "developer" + }, + { + "__createdtime__": 1610749235614, + "__updatedtime__": 1610749235614, + "id": "136f03fa-a0e9-46c3-bd5d-7f3e7dd5b564", + "permission": { + "cluster_user": true + }, + "role": "cluster_user" + }, + { + "__createdtime__": 1610749235609, + "__updatedtime__": 1610749235609, + "id": "745b3138-a7cf-455a-8256-ac03722eef12", + "permission": { + "super_user": true + }, + "role": "super_user" + } +] +``` + +--- + +## Add Role +Creates a new role with the specified permissions. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_role` +* role _(required)_ - name of role you are defining +* permission _(required)_ - object defining permissions for users associated with this role: + * super_user _(optional)_ - boolean which, if set to true, gives users associated with this role full access to all operations and methods. If not included, value will be assumed to be false. + * structure_user (optional) - boolean OR array of database names (as strings). If boolean, user can create new databases and tables. If array of strings, users can only manage tables within the specified databases. This overrides any individual table permissions for specified databases, or for all databases if the value is true. + +### Body +```json +{ + "operation": "add_role", + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + } +} +``` + +### Response: 200 +```json +{ + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + }, + "id": "0a9368b0-bd81-482f-9f5a-8722e3582f96", + "__updatedtime__": 1598549532897, + "__createdtime__": 1598549532897 +} +``` + +--- + +## Alter Role +Modifies an existing role with the specified permissions. updates permissions from an existing role. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `alter_role` +* id _(required)_ - the id value for the role you are altering +* role _(optional)_ - name value to update on the role you are altering +* permission _(required)_ - object defining permissions for users associated with this role: + * super_user _(optional)_ - boolean which, if set to true, gives users associated with this role full access to all operations and methods. If not included, value will be assumed to be false. + * structure_user (optional) - boolean OR array of database names (as strings). If boolean, user can create new databases and tables. If array of strings, users can only manage tables within the specified databases. This overrides any individual table permissions for specified databases, or for all databases if the value is true. + +### Body + +```json +{ + "operation": "alter_role", + "id": "f92162e2-cd17-450c-aae0-372a76859038", + "role": "another_developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": false, + "insert": true, + "update": true + } + ] + } + } + } + } +} +``` + +### Response: 200 +```json +{ + "id": "a7cb91e9-32e4-4dbf-a327-fab4fa9191ea", + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": false, + "insert": true, + "update": true + } + ] + } + } + } + }, + "__updatedtime__": 1598549996106 +} +``` + +--- + +## Drop Role +Deletes an existing role from the database. NOTE: Role with associated users cannot be dropped. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this must always be `drop_role` +* id _(required)_ - this is the id of the role you are dropping + +### Body +```json +{ + "operation": "drop_role", + "id": "developer" +} +``` + +### Response: 200 +```json +{ + "message": "developer successfully deleted" +} +``` + +--- + +## List Users +Returns a list of all users. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `list_users` + +### Body +```json +{ + "operation": "list_users" +} +``` + +### Response: 200 +```json +[ + { + "__createdtime__": 1635520961165, + "__updatedtime__": 1635520961165, + "active": true, + "role": { + "__createdtime__": 1635520961161, + "__updatedtime__": 1635520961161, + "id": "7c78ef13-c1f3-4063-8ea3-725127a78279", + "permission": { + "super_user": true, + "system": { + "tables": { + "hdb_table": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_attribute": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_schema": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_user": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_role": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_job": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_license": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_info": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_nodes": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_temp": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + } + } + } + }, + "role": "super_user" + }, + "username": "HDB_ADMIN" + } +] +``` + +--- + +## User Info +Returns user data for the associated user credentials. + +* operation _(required)_ - must always be `user_info` + +### Body +```json +{ + "operation": "user_info" +} +``` + +### Response: 200 +```json +{ + "__createdtime__": 1610749235611, + "__updatedtime__": 1610749235611, + "active": true, + "role": { + "__createdtime__": 1610749235609, + "__updatedtime__": 1610749235609, + "id": "745b3138-a7cf-455a-8256-ac03722eef12", + "permission": { + "super_user": true + }, + "role": "super_user" + }, + "username": "HDB_ADMIN" +} +``` + +--- + +## Add User +Creates a new user with the specified role and credentials. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_user` +* role _(required)_ - 'role' name value of the role you wish to assign to the user. See `add_role` for more detail +* username _(required)_ - username assigned to the user. It can not be altered after adding the user. It serves as the hash +* password _(required)_ - clear text for password. Harper will encrypt the password upon receipt +* active _(required)_ - boolean value for status of user's access to your Harper instance. If set to false, user will not be able to access your instance of Harper. + +### Body +```json +{ + "operation": "add_user", + "role": "role_name", + "username": "hdb_user", + "password": "password", + "active": true +} +``` + +### Response: 200 +```json +{ + "message": "hdb_user successfully added" +} +``` + +--- + +## Alter User +Modifies an existing user's role and/or credentials. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super\_user roles only_ + + * operation _(required)_ - must always be `alter_user` + * username _(required)_ - username assigned to the user. It can not be altered after adding the user. It serves as the hash. + * password _(optional)_ - clear text for password. Harper will encrypt the password upon receipt + * role _(optional)_ - `role` name value of the role you wish to assign to the user. See `add_role` for more detail + * active _(optional)_ - status of user's access to your Harper instance. See `add_role` for more detail + +### Body +```json +{ + "operation": "alter_user", + "role": "role_name", + "username": "hdb_user", + "password": "password", + "active": true +} +``` + +### Response: 200 +```json +{ + "message": "updated 1 of 1 records", + "new_attributes": [], + "txn_time": 1611615114397.988, + "update_hashes": [ + "hdb_user" + ], + "skipped_hashes": [] +} +``` + +--- + +## Drop User +Deletes an existing user by username. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `drop_user` +* username _(required)_ - username assigned to the user + +### Body +```json +{ + "operation": "drop_user", + "username": "sgoldberg" +} +``` + +### Response: 200 +```json +{ + "message": "sgoldberg successfully deleted" +} +``` diff --git a/site/versioned_docs/version-4.4/developers/operations-api/utilities.md b/site/versioned_docs/version-4.4/developers/operations-api/utilities.md new file mode 100644 index 00000000..4d09f5cd --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/operations-api/utilities.md @@ -0,0 +1,442 @@ +--- +title: Utilities +--- + +# Utilities + +## Restart +Restarts the Harper instance. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `restart` + +### Body +```json +{ + "operation": "restart" +} +``` + +### Response: 200 +```json +{ + "message": "Restarting HarperDB. This may take up to 60 seconds." +} +``` +--- + +## Restart Service +Restarts servers for the specified Harper service. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `restart_service` +* service _(required)_ - must be one of: `http_workers`, `clustering_config` or `clustering` +* replicated _(optional)_ - must be a boolean. If set to `true`, Harper will replicate the restart service operation across all nodes in the cluster. The restart will occur as a rolling restart, ensuring that each node is fully restarted before the next node begins restarting. + +### Body +```json +{ + "operation": "restart_service", + "service": "http_workers" +} +``` + +### Response: 200 +```json +{ + "message": "Restarting http_workers" +} +``` + +--- +## System Information +Returns detailed metrics on the host system. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `system_information` +* attributes _(optional)_ - string array of top level attributes desired in the response, if no value is supplied all attributes will be returned. Available attributes are: ['system', 'time', 'cpu', 'memory', 'disk', 'network', 'harperdb_processes', 'table_size', 'metrics', 'threads', 'replication'] + +### Body +```json +{ + "operation": "system_information" +} +``` + +--- + +## Delete Records Before + +Delete data before the specified timestamp on the specified database table exclusively on the node where it is executed. Any clustered nodes with replicated data will retain that data. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `delete_records_before` +* date _(required)_ - records older than this date will be deleted. Supported format looks like: `YYYY-MM-DDThh:mm:ss.sZ` +* schema _(required)_ - name of the schema where you are deleting your data +* table _(required)_ - name of the table where you are deleting your data + +### Body +```json +{ + "operation": "delete_records_before", + "date": "2021-01-25T23:05:27.464", + "schema": "dev", + "table": "breed" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id d3aed926-e9fe-4ec1-aea7-0fb4451bd373", + "job_id": "d3aed926-e9fe-4ec1-aea7-0fb4451bd373" +} +``` + +--- + +## Export Local +Exports data based on a given search operation to a local file in JSON or CSV format. + +* operation _(required)_ - must always be `export_local` +* format _(required)_ - the format you wish to export the data, options are `json` & `csv` +* path _(required)_ - path local to the server to export the data +* search_operation _(required)_ - search_operation of `search_by_hash`, `search_by_value`, `search_by_conditions` or `sql` +* filename _(optional)_ - the name of the file where your export will be written to (do not include extension in filename). If one is not provided it will be autogenerated based on the epoch. + +### Body +```json +{ + "operation": "export_local", + "format": "json", + "path": "/data/", + "search_operation": { + "operation": "sql", + "sql": "SELECT * FROM dev.breed" + } +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 6fc18eaa-3504-4374-815c-44840a12e7e5" +} +``` + +--- + +## Export To S3 +Exports data based on a given search operation from table to AWS S3 in JSON or CSV format. + +* operation _(required)_ - must always be `export_to_s3` +* format _(required)_ - the format you wish to export the data, options are `json` & `csv` +* s3 _(required)_ - details your access keys, bucket, bucket region and key for saving the data to S3 +* search_operation _(required)_ - search_operation of `search_by_hash`, `search_by_value`, `search_by_conditions` or `sql` + +### Body +```json +{ + "operation": "export_to_s3", + "format": "json", + "s3": { + "aws_access_key_id": "YOUR_KEY", + "aws_secret_access_key": "YOUR_SECRET_KEY", + "bucket": "BUCKET_NAME", + "key": "OBJECT_NAME", + "region": "BUCKET_REGION" + }, + "search_operation": { + "operation": "sql", + "sql": "SELECT * FROM dev.dog" + } +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 9fa85968-4cb1-4008-976e-506c4b13fc4a", + "job_id": "9fa85968-4cb1-4008-976e-506c4b13fc4a" +} +``` + +--- + +## Install Node Modules +This operation is deprecated, as it is handled automatically by deploy_component and restart. +Executes npm install against specified custom function projects. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `install_node_modules` +* projects _(required)_ - must ba an array of custom functions projects. +* dry_run _(optional)_ - refers to the npm --dry-run flag: [https:/docs.npmjs.com/cli/v8/commands/npm-install#dry-run](https:/docs.npmjs.com/cli/v8/commands/npm-install#dry-run). Defaults to false. + +### Body +```json +{ + "operation": "install_node_modules", + "projects": [ + "dogs", + "cats" + ], + "dry_run": true +} +``` + +--- + +## Set Configuration + +Modifies the Harper configuration file parameters. Must follow with a restart or restart_service operation. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_configuration` +* logging_level _(example/optional)_ - one or more configuration keywords to be updated in the Harper configuration file +* clustering_enabled _(example/optional)_ - one or more configuration keywords to be updated in the Harper configuration file + +### Body +```json +{ + "operation": "set_configuration", + "logging_level": "trace", + "clustering_enabled": true +} +``` + +### Response: 200 +```json +{ + "message": "Configuration successfully set. You must restart HarperDB for new config settings to take effect." +} +``` + +--- + +## Get Configuration +Returns the Harper configuration parameters. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_configuration` + +### Body +```json +{ + "operation": "get_configuration" +} +``` + +### Response: 200 +```json +{ + "http": { + "compressionThreshold": 1200, + "cors": false, + "corsAccessList": [ + null + ], + "keepAliveTimeout": 30000, + "port": 9926, + "securePort": null, + "timeout": 120000 + }, + "threads": 11, + "authentication": { + "cacheTTL": 30000, + "enableSessions": true, + "operationTokenTimeout": "1d", + "refreshTokenTimeout": "30d" + }, + "analytics": { + "aggregatePeriod": 60 + }, + "replication": { + "hostname": "node1", + "databases": "*", + "routes": null, + "url": "wss:/127.0.0.1:9925" + }, + "componentsRoot": "/Users/hdb/components", + "localStudio": { + "enabled": false + }, + "logging": { + "auditAuthEvents": { + "logFailed": false, + "logSuccessful": false + }, + "auditLog": true, + "auditRetention": "3d", + "file": true, + "level": "error", + "root": "/Users/hdb/log", + "rotation": { + "enabled": false, + "compress": false, + "interval": null, + "maxSize": null, + "path": "/Users/hdb/log" + }, + "stdStreams": false + }, + "mqtt": { + "network": { + "port": 1883, + "securePort": 8883 + }, + "webSocket": true, + "requireAuthentication": true + }, + "operationsApi": { + "network": { + "cors": true, + "corsAccessList": [ + "*" + ], + "domainSocket": "/Users/hdb/operations-server", + "port": 9925, + "securePort": null + } + }, + "rootPath": "/Users/hdb", + "storage": { + "writeAsync": false, + "caching": true, + "compression": false, + "noReadAhead": true, + "path": "/Users/hdb/database", + "prefetchWrites": true + }, + "tls": { + "privateKey": "/Users/hdb/keys/privateKey.pem" + } +} +``` + +--- + +## Add Certificate + +Adds or updates a certificate in the `hdb_certificate` system table. +If a `private_key` is provided it will __not__ be stored in `hdb_certificate`, it will be written to file in `/keys/`. +If a `private_key` is not passed the operation will search for one that matches the certificate. If one is not found an error will be returned. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_certificate` +* name _(required)_ - a unique name for the certificate +* certificate _(required)_ - a PEM formatted certificate string +* is_authority _(required)_ - a boolean indicating if the certificate is a certificate authority +* hosts _(optional)_ - an array of hostnames that the certificate is valid for +* private_key _(optional)_ - a PEM formatted private key string + +### Body +```json +{ + "operation": "add_certificate", + "name": "my-cert", + "certificate": "-----BEGIN CERTIFICATE-----ZDFAay... -----END CERTIFICATE-----", + "is_authority": false, + "private_key": "-----BEGIN RSA PRIVATE KEY-----Y4dMpw5f... -----END RSA PRIVATE KEY-----" +} +``` + +### Response: 200 +```json +{ + "message": "Successfully added certificate: my-cert" +} +``` + +--- + +## Remove Certificate + +Removes a certificate from the `hdb_certificate` system table and deletes the corresponding private key file. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `remove_certificate` +* name _(required)_ - the name of the certificate + +### Body +```json +{ + "operation": "remove_certificate", + "name": "my-cert" +} +``` + +### Response: 200 +```json +{ + "message": "Successfully removed my-cert" +} +``` + +--- + +## List Certificates + +Lists all certificates in the `hdb_certificate` system table. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `list_certificates` + +### Body +```json +{ + "operation": "list_certificates" +} +``` + +### Response: 200 +```json +[ + { + "name": "HarperDB-Certificate-Authority-node1", + "certificate": "-----BEGIN CERTIFICATE-----\r\nTANBgkqhk... S34==\r\n-----END CERTIFICATE-----\r\n", + "private_key_name": "privateKey.pem", + "is_authority": true, + "details": { + "issuer": "CN=HarperDB-Certificate-Authority-node1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "subject": "CN=HarperDB-Certificate-Authority-node1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "serial_number": "5235345", + "valid_from": "Aug 27 15:00:00 2024 GMT", + "valid_to": "Aug 25 15:00:00 2034 GMT" + }, + "is_self_signed": true, + "uses": [ + "https", + "wss" + ] + }, + { + "name": "node1", + "certificate": "-----BEGIN CERTIFICATE-----\r\ngIEcSR1M... 5bv==\r\n-----END CERTIFICATE-----\r\n", + "private_key_name": "privateKey.pem", + "is_authority": false, + "details": { + "issuer": "CN=HarperDB-Certificate-Authority-node1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "subject": "CN=node.1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "subject_alt_name": "IP Address:127.0.0.1, DNS:localhost, IP Address:0:0:0:0:0:0:0:1, DNS:node.1", + "serial_number": "5243646", + "valid_from": "Aug 27 15:00:00 2024 GMT", + "valid_to": "Aug 25 15:00:00 2034 GMT" + }, + "is_self_signed": true, + "uses": [ + "https", + "wss" + ] + } +] +``` diff --git a/site/versioned_docs/version-4.4/developers/real-time.md b/site/versioned_docs/version-4.4/developers/real-time.md new file mode 100644 index 00000000..5f90e075 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/real-time.md @@ -0,0 +1,175 @@ +--- +title: Real-Time +--- + +# Real-Time + +## Real-Time + +Harper provides real-time access to data and messaging. This allows clients to monitor and subscribe to data for changes in real-time as well as handling data-oriented messaging. Harper supports multiple standardized protocols to facilitate diverse standards-based client interaction. + +Harper real-time communication is based around database tables. Declared tables are the basis for monitoring data, and defining "topics" for publishing and subscribing to messages. Declaring a table that establishes a topic can be as simple as adding a table with no attributes to your [schema.graphql in a Harper application folder](./applications/): +``` +type MyTopic @table @export +``` +You can then subscribe to records or sub-topics in this topic/namespace, as well as save data and publish messages, with the protocols discussed below. + +### Content Negotiation + +Harper is a database, not a generic broker, and therefore highly adept at handling _structured_ data. Data can be published and subscribed in all supported structured/object formats, including JSON, CBOR, and MessagePack, and the data will be stored and handled as structured data. This means that different clients can individually choose which format they prefer, both for inbound and outbound messages. One client could publish in JSON, and another client could choose to receive messages in CBOR. + +## Protocols + +### MQTT + +Harper supports MQTT as an interface to this real-time data delivery. It is important to note that MQTT in Harper is not just a generic pub/sub hub, but is deeply integrated with the database providing subscriptions directly to database records, and publishing to these records. In this document we will explain how MQTT pub/sub concepts are aligned and integrated with database functionality. + +#### Configuration + +Harper supports MQTT with its `mqtt` server module and Harper supports MQTT over standard TCP sockets or over WebSockets. This is enabled by default, but can be configured in your `harperdb-config.yaml` configuration, allowing you to change which ports it listens on, if secure TLS connections are used, and MQTT is accepted over WebSockets: + +```yaml +mqtt: + network: + port: 1883 + securePort: 8883 # for TLS + webSocket: true # will also enable WS support through the default HTTP interface/port + mTLS: false + requireAuthentication: true +``` + +Note that if you are using WebSockets for MQTT, the sub-protocol should be set to "mqtt" (this is required by the MQTT specification, and should be included by any conformant client): `Sec-WebSocket-Protocol: mqtt`. mTLS is also supported by enabling it in the configuration and using the certificate authority from the TLS section of the configuration. See the [configuration documentation for more information](../deployments/configuration). + +#### Capabilities + +Harper's MQTT capabilities includes support for MQTT versions v3.1 and v5 with standard publish and subscription capabilities with multi-level topics, QoS 0 and 1 levels, and durable (non-clean) sessions. MQTT supports QoS 2 interaction, but doesn't guarantee exactly once delivery (although any guarantees of exactly once over unstable networks is a fictional aspiration). MQTT doesn't currently support last will, nor single-level wildcards (only multi-level wildcards). + +### Topics + +In MQTT, messages are published to, and subscribed from, topics. In Harper topics are aligned with resource endpoint paths in exactly the same way as the REST endpoints. If you define a table or resource in your schema, with a path/endpoint of "my-resource", that means that this can be addressed as a topic just like a URL path. So a topic of "my-resource/some-id" would correspond to the record in the my-resource table (or custom resource) with a record id of "some-id". + +This means that you can subscribe to "my-resource/some-id" and making this subscription means you will receive notification messages for any updates to this record. If this record is modified or deleted, a message will be sent to listeners of this subscription. + +The current value of this record is also treated as the "retained" message for this topic. When you subscribe to "my-resource/some-id", you will immediately receive the record for this id, through a "publish" command from the server, as the initial "retained" message that is first delivered. This provides a simple and effective way to get the current state of a record and future updates to that record without having to worry about timing issues of aligning a retrieval and subscription separately. + +Similarly, publishing a message to a "topic" also interacts with the database. Publishing a message with "retain" flag enabled is interpreted as an update or put to that record. The published message will replace the current record with the contents of the published message. + +If a message is published without a `retain` flag, the message will not alter the record at all, but will still be published to any subscribers to that record. + +Harper supports QoS 0 and 1 for publishing and subscribing. + +Harper supports multi-level topics, both for subscribing and publishing. Harper also supports multi-level wildcards, so you can subscribe to /`my-resource/#` to receive notifications for `my-resource/some-id` as well as `my-resource/nested/id`, or you can subscribe to `my-resource/nested/#` and receive the latter, but not the former, topic messages. Harper currently only supports trailing multi-level wildcards (no single-level wildcards with '\*'). + +#### Events +JavaScript components can also listen for MQTT events. This is available on the server.mqtt.events object. For example, to set up a listener/callback for when MQTT clients connect and authorize, we can do: + +```javascript +server.mqtt.events.on('connected', (session, socket) => { + console.log('client connected with id', session.clientId); +}); +``` +The following MQTT events are available: +* `connection` - When a client initially establishes a TCP or WS connection to the server +* `connected` - When a client establishes an authorized MQTT connection +* `auth-failed` - When a client fails to authenticate +* `disconnected` - When a client disconnects from the server + +### Ordering + +Harper is designed to be a distributed database, and an intrinsic characteristic of distributed servers is that messages may take different amounts of time to traverse the network and may arrive in a different order depending on server location and network topology. Harper is designed for distributed data with minimal latency, and so messages are delivered to subscribers immediately when they arrive, Harper does not delay messages for coordinating confirmation or consensus among other nodes, which would significantly increase latency, messages are delivered as quickly as possible. + +As an example, let's consider message #1 is published to node A, which then sends the message to node B and node C, but the message takes a while to get there. Slightly later, while the first message is still in transit, message #2 is published to node B, which then replicates it to A and C, and because of network conditions, message #2 arrives at node C before message #1. Because Harper prioritizes low latency, when node C receives message #2, it immediately publishes it to all its local subscribers (it has no knowledge that message #1 is in transit). + +When message #1 is received by node C, the behavior of what it does with this message is dependent on whether the message is a "retained" message (was published with a retain flag set to true, or was put/update/upsert/inserted into the database) or was a non-retained message. In the case of a non-retained message, this message will be delivered to all local subscribers (even though it had been published earlier), thereby prioritizing the delivery of every message. On the other hand, a retained message will not deliver the earlier out-of-order message to clients, and Harper will keep the message with the latest timestamp as the "winning" record state (and will be retained message for any subsequent subscriptions). Retained messages maintain (eventual) consistency across the entire cluster of servers, all nodes will converge to the same message as the being the latest and retained message (#2 in this case). + +Non-retained messages are generally a good choice for applications like chat, where every message needs to be delivered even if they might arrive out-of-order (the order may not be consistent across all servers). Retained messages can be thought of a "superseding" messages, and are a good fit for applications like instrument measurements like temperature readings, where the priority to provide the _latest_ temperature and older temperature readings are not important to publish after a new reading, and consistency of the most-recent record (across the network) is important. + +### WebSockets + +WebSockets are supported through the REST interface and go through the `connect(incomingMessages)` method on resources. By default, making a WebSockets connection to a URL will subscribe to the referenced resource. For example, making a WebSocket connection to `new WebSocket('wss:/server/my-resource/341')` will access the resource defined for 'my-resource' and the resource id of 341 and connect to it. On the web platform this could be: + +```javascript +let ws = new WebSocket('wss:/server/my-resource/341'); +ws.onmessage = (event) => { + / received a notification from the server + let data = JSON.parse(event.data); +}; +``` + +By default, the resources will make a subscription to that resource, monitoring any changes to the records or messages published to it, and will return events on the WebSockets connection. You can also override `connect(incomingMessages)` with your own handler. The `connect` method simply needs to return an iterable (asynchronous iterable) that represents the stream of messages to be sent to the client. One easy way to create an iterable stream is to define the `connect` method as a generator and `yield` messages as they become available. For example, a simple WebSockets echo server for a resource could be written: + +```javascript +export class Echo extends Resource { + async *connect(incomingMessages) { + for await (let message of incomingMessages) { / wait for each incoming message from the client + / and send the message back to the client + yield message; + } + } +``` + +You can also call the default `connect` and it will provide a convenient streaming iterable with events for the outgoing messages, with a `send` method that you can call to send messages on the iterable, and a `close` event for determining when the connection is closed. The incoming messages iterable is also an event emitter, and you can listen for `data` events to get the incoming messages using event style: + +```javascript +export class Example extends Resource { + connect(incomingMessages) { + let outgoingMessages = super.connect(); + let timer = setInterval(() => { + outgoingMessages.send({greeting: 'hi again!'}); + }, 1000); / send a message once a second + incomingMessages.on('data', (message) => { + / another way of echo-ing the data back to the client + outgoingMessages.send(message); + }); + outgoingMessages.on('close', () => { + / make sure we end the timer once the connection is closed + clearInterval(timer); + }); + return outgoingMessages; + } +``` + +### Server Sent Events + +Server Sent Events (SSE) are also supported through the REST server interface, and provide a simple and efficient mechanism for web-based applications to receive real-time updates. For consistency of push delivery, SSE connections go through the `connect()` method on resources, much like WebSockets. The primary difference is that `connect` is called without any `incomingMessages` argument, since SSE is a one-directional transport mechanism. This can be used much like WebSockets, specifying a resource URL path will connect to that resource, and by default provides a stream of messages for changes and messages for that resource. For example, you can connect to receive notification in a browser for a resource like: + +```javascript +let eventSource = new EventSource('https:/server/my-resource/341', { withCredentials: true }); +eventSource.onmessage = (event) => { + / received a notification from the server + let data = JSON.parse(event.data); +}; +``` + +### MQTT Feature Support Matrix + +| Feature | Support | +|--------------------------------------------------------------------|----------------------------------------------------------------| +| Connections, protocol negotiation, and acknowledgement with v3.1.1 | :heavy_check_mark: | +| Connections, protocol negotiation, and acknowledgement with v5 | :heavy_check_mark: | +| Secure MQTTS | :heavy_check_mark: | +| MQTTS over WebSockets | :heavy_check_mark: | +| MQTT authentication via user/pass | :heavy_check_mark: | +| MQTT authentication via mTLS | :heavy_check_mark: | +| Publish | :heavy_check_mark: | +| Subscribe | :heavy_check_mark: | +| Multi-level wildcard | :heavy_check_mark: | +| Single-level wildcard | :heavy_check_mark: | +| QoS 0 | :heavy_check_mark: | +| QoS 1 | :heavy_check_mark: | +| QoS 2 | Not fully supported, can perform conversation but does persist | +| Keep-Alive monitoring | :heavy_check_mark: | +| Clean session | :heavy_check_mark: | +| Durable session | :heavy_check_mark: | +| Distributed durable session | | +| Will | :heavy_check_mark: | +| MQTT V5 User properties | | +| MQTT V5 Will properties | | +| MQTT V5 Connection properties | | +| MQTT V5 Connection acknowledgement properties | | +| MQTT V5 Publish properties | | +| MQTT V5 Subscribe properties retain handling | :heavy_check_mark: | +| MQTT V5 Subscribe properties | | +| MQTT V5 Ack properties | | +| MQTT V5 AUTH command | | +| MQTT V5 Shared Subscriptions | | diff --git a/site/versioned_docs/version-4.4/developers/replication/index.md b/site/versioned_docs/version-4.4/developers/replication/index.md new file mode 100644 index 00000000..d84fe214 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/replication/index.md @@ -0,0 +1,252 @@ +--- +title: Replication/Clustering +--- + +# Replication/Clustering + +Harper’s replication system is designed to make distributed data replication fast and reliable across multiple nodes. This means you can easily build a distributed database that ensures high availability, disaster recovery, and data localization. The best part? It’s simple to set up, configure, and manage. You can easily add or remove nodes, choose which data to replicate, and monitor the system’s health without jumping through hoops. + +### Replication Overview + +Harper replication uses a peer-to-peer model where every node in your cluster can send and subscribe to data. Each node connects through WebSockets, allowing data to flow seamlessly in both directions. By default, Harper takes care of managing these connections and subscriptions, so you don’t have to worry about data consistency. The system is designed to maintain secure, reliable connections between nodes, ensuring that your data is always safe. + +### Replication Configuration + +To connect your nodes, you need to provide hostnames or URLs for the nodes to connect to each other. This can be done via configuration or through operations. To configure replication, you can specify connection information the `replication` section of the [harperdb-config.yaml](../../deployments/configuration). Here, you can specify the host name of the current node, and routes to connect to other nodes, for example: + +```yaml +replication: + hostname: server-one + routes: + - server-two + - server-three +``` + +In this example, the current node is `server-one`, and it will connect to `server-two` and `server-three`. Routes to other nodes can also be configured with URLs or ports: + +```yaml +replication: + hostname: server-one + routes: + - wss:/server-two:9925 # URL based route + - hostname: server-three # define a hostname and port + port: 9930 +``` + +You can also use the [operations API](../operations-api/clustering) to dynamically add and remove nodes from the cluster. This is useful for adding new nodes to a running cluster or removing nodes that are no longer needed. For example (note this is the basic form, you would also need to provide the necessary credentials for the operation, see the section on securing connections for more details): + +```json +{ + "operation": "add_node", + "hostname": "server-two" +} + +``` + +These operations will also dynamically generating certificates as needed, if there are no existing signed certificates, or if the existing certificates are not valid for the new node. + +Harper will also automatically replicate node information to other nodes in a cluster ([gossip-style discovery](https:/highscalability.com/gossip-protocol-explained/)). This means that you only need to connect to one node in an existing cluster, and Harper will automatically detect and connect to other nodes in the cluster (bidirectionally). + +By default, Harper will replicate all the data in all the databases. You can configure which databases are replicated, and then override this behavior on a per-table basis. For example, you can indicate which databases should be replicated by default, here indicating you want to replicate the `data` and `system` databases: + +```yaml +replication: + databases: + - data + - system +``` + +By default, all tables within a replicated database will be replicated. Transactions are replicated atomically, which may involve data across multiple tables. However, you can also configure replication for individual tables, and disable and exclude replication for specific tables in a database by setting `replicate` to `false` in the table definition: + +```graphql +type LocalTableForNode @table(replicate: false) { + id: ID! + name: String! +} +``` + +You can also control which nodes data is replicated to, and how many nodes data is replicated to. By default, Harper will replicate data to all nodes in the cluster, but you can control where data is replicated to with the [sharding configuration and APIs](./sharding). + +By default replication will connect on the operations API network interface/port (9925 by default). You can configure the replication port in the `replication` section. For example, to change the replication port to 9930: + +```yaml +replication: + securePort: 9930 +``` + +This will change the replication port to 9930 and the operations API port will be on a separate port, remaining on 9925. + +### Securing Connections + +Harper supports the highest levels of security through public key infrastructure based security and authorization. Depending on your security configuration, you can configure Harper in several different ways to build a connected cluster. + +#### Provide your own certificates + +If you want to secure your Harper connections with your own signed certificates, you can easily do so. Whether you have certificates from a public authority (like Let's Encrypt or Digicert) or a corporate certificate authority, you can use them to authenticate nodes securely. You can then allow nodes to authorize each other by checking the certificate against the standard list of root certificate authorities by enabling the `enableRootCAs` option in the config: +``` +replication + enableRootCAs: true +``` + +And then just make sure the certificate’s common name (CN) matches the node's hostname. + +#### Setting Up Custom Certificates + +There are two ways to configure Harper with your own certificates: + +1. Use the `add_certificate` operation to upload them. +1. Or, specify the certificate paths directly in the `replication` section of the `harperdb-config.yaml` file. + +If your certificate is signed by a trusted public authority, just provide the path to the certificate and private key. If you're using self-signed certificates or a private certificate authority, you’ll also need to provide the certificate authority (CA) details to complete the setup.\ +\ +Example configuration: + +```yaml +tls: + certificate: /path/to/certificate.pem + certificateAuthority: /path/to/ca.pem + privateKey: /path/to/privateKey.pem +``` + +With this in place, Harper will load the provided certificates into the certificate table and use these to secure and authenticate connections between nodes. + +You have the option to skip providing a specific certificate authority (CA) and instead verify your certificate against the root certificates included in the bundled Mozilla CA store. This bundled CA store, provided by Node.js, is a snapshot of Mozilla's CA certificates that is fixed at the time of each Node.js release. + +To enable the root certificates set `replication.enableRootCAs` to `true` in the `harperdb-config.yaml` file: + +```yaml +replication: + enableRootCAs: true +``` + +#### Cross-generated certificates + +Harper can also generate its own certificates for secure connections. This is useful for setting up secure connections between nodes when no existing certificates are available, and can be used in development, testing, or production environments. Certificates will be automatically requested and signed between nodes to support a form of distributed certificate generation and signing. To establish secure connections between nodes using cross-generated certificates, you simply use the [`add_node` operation](../operations-api/clustering) over SSL, and specify the temporary authentication credentials to use for connecting and authorizing the certificate generation and signing. \ +\ +Example configuration: + +```json +{ + "operation": "add_node", + "hostname": "server-two", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password" + } +} +``` + +When you connect to another node (e.g., `server-two`), Harper uses secure WebSockets and the provided credentials to establish the connection. + +If you’re working with a fresh install, you’ll need to set `verify_tls` to `false` temporarily, so the self-signed certificate is accepted. Once the connection is made, Harper will automatically handle the certificate signing process: + +* It creates a certificate signing request (CSR), sends it to `server-two`, which then signs it and returns the signed certificate along with the certificate authority (CA). +* The signed certificate is stored for future connections between the nodes, ensuring secure communication. + +**Important:** Your credentials are not stored—they are discarded immediately after use. + +You can also provide credentials in HTTP Authorization format (Basic auth, Token auth, or JWT). This is helpful for handling authentication with the required permissions to generate and sign certificates. + +Additionally, you can use `set_node` as an alias for the `add_node` operation if you prefer. + +#### Removing Nodes + +Nodes can be removed from the cluster using the [`remove_node` operation](../operations-api/clustering). This will remove the node from the cluster, and stop replication to and from the node. For example: + +```json +{ + "operation": "remove_node", + "hostname": "server-two" +} +``` + +#### Insecure Connection IP-based Authentication + +You can completely disable secure connections and use IP addresses to authenticate nodes with each other. This can be useful for development and testing, or within a secure private network, but should never be used for production with publicly accessible servers. To disable secure connections, simply configure replication within an insecure port, either by [configuring the operations API](../../deployments/configuration) to run on an insecure port or replication to run on an insecure port. And then set up IP-based routes to connect to other nodes: + +```yaml +replication: + port: 9930 + routes: + - 127.0.0.2 + - 127.0.0.3 +``` + +Note that in this example, we are using loop back addresses, which can be a convenient way to run multiple nodes on a single machine for testing and development. + +#### Explicit Subscriptions + +#### Managing Node Connections and Subscriptions in Harper + +By default, Harper automatically handles connections and subscriptions between nodes, ensuring data consistency across your cluster. It even uses data routing to manage node failures. But if you want more control, you can manage these connections manually by explicitly subscribing to nodes. This is useful for advanced configurations, testing, or debugging. + +#### Important Notes on Explicit Subscriptions + +If you choose to manage subscriptions manually, Harper will no longer handle data consistency for you. This means there’s no guarantee that all nodes will have consistent data if subscriptions don’t fully replicate in all directions. If a node goes down, it’s possible that some data wasn’t replicated before the failure. + +#### How to Subscribe to Nodes + +To explicitly subscribe to a node, you can use operations like `add_node` and define the subscriptions. For example, you can configure a node (e.g., `server-two`) to publish transactions on a specific table (e.g., `dev.my-table`) without receiving data from that node. + +Example configuration: + +```json +{ + "operation": "add_node", + "hostname": "server-two", + "subscriptions": [{ + "database": "dev", + "table": "my-table", + "publish": true, + "subscribe": false + }] +} +``` + +To update an explicit subscription you can use the [`update_node` operation](../operations-api/clustering). + +Here we are updating the subscription to receive transactions on the `dev.my-table` table from the `server-two` node. + +```json +{ + "operation": "update_node", + "hostname": "server-two", + "subscriptions": [{ + "database": "dev", + "table": "my-table", + "publish": true, + "subscribe": true + }] +} +``` + +#### Monitoring Replication + +You can monitor the status of replication through the operations API. You can use the [`cluster_status` operation](../operations-api/clustering) to get the status of replication. For example: + +```json +{ + "operation": "cluster_status" +} +``` + +#### Database Initial Synchronization and Resynchronization + +When a new node is added to the cluster, if its database has not previously been synced, it will initially download the database from the first node it connects to. This will copy every record from the source database to the new node. Once the initial synchronization is complete, the new node will enter replication mode and receive records from each node as they are created, updated, or deleted. If a node goes down and comes back up, it will also resynchronize with the other nodes in the cluster, to ensure that it has the most up-to-date data. + +You may also specify a `start_time` in the `add_node` to specify that when a database connects, that it should not download the entire database, but only data since a given starting time. + +**Advanced Configuration** + +You can also check the configuration of the replication system, including the current known nodes and certificates, by querying the hdb\_nodes and hdb\_certificate table: + +```json +{ + "operation": "search_by_value", + "database": "system", + "table": "hdb_nodes", + "search_attribute": "name", + "search_value": "*" +} +``` diff --git a/site/versioned_docs/version-4.4/developers/replication/sharding.md b/site/versioned_docs/version-4.4/developers/replication/sharding.md new file mode 100644 index 00000000..6aa1e1e4 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/replication/sharding.md @@ -0,0 +1,99 @@ +--- +title: Sharding +--- + +Harper's replication system supports various levels of replication or sharding. Harper can be configured or set up to replicate to different data to different subsets of nodes. This can be used facilitate horizontally scalability of storage and write performance, while maintaining optimal strategies of data locality and data consistency. When sharding is configured, Harper will replicate data to only a subset of nodes, based on the sharding configuration, and can then retrieve data from the appropriate nodes as needed to fulfill requests for data. + +## Configuration +By default, Harper will replicate all data to all nodes. However, replication can easily be configured for "sharding", or storing different data in different locations or nodes. The simplest way to configure sharding and limit replication to improve performance and efficiency is to configure a replication-to count. This will limit the number of nodes that data is replicated to. For example, to specify that writes should replicate to 2 other nodes besides the node that first stored the data, you can set the `replicateTo` to 2 in the `replication` section of the `harperdb-config.yaml` file: +```yaml +replication: + replicateTo: 2 +``` +This will ensure that data is replicated to two other nodes, so that each record will be stored on three nodes in total. + +With a sharding configuration (or customization below) in place, requests will for records that don't reside on the server handling requests will automatically be forwarded to the appropriate node. This will be done transparently, so that the client will not need to know where the data is stored. + +## Replication Control with Headers +With the REST interface, replication levels and destinations can also specified with the `X-Replicate-To` header. This can be used to indicate the number of additional nodes that data should be replicated to, or to specify the nodes that data should be replicated to. The `X-Replicate-To` header can be used with the `POST` and `PUT` methods. This header can also specify if the response should wait for confirmation from other nodes, and how many, with the `confirm` parameter. For example, to specify that data should be replicated to two other nodes, and the response should be returned once confirmation is received from one other node, you can use the following header: +```http +PUT /MyTable/3 +X-Replicate-To: 2;confirm=1 + +... +``` + +You can also explicitly specify destination nodes by providing a comma-separated list of node hostnames. For example, to specify that data should be replicated to nodes `node1` and `node2`, you can use the following header: +```http +PUT /MyTable/3 +X-Replicate-To: node1,node2 +``` +(This can also be used with the `confirm` parameter.) + +## Replication Control with Operations +Likewise, you can specify replicateTo and confirm parameters in the operation object when using the Harper API. For example, to specify that data should be replicated to two other nodes, and the response should be returned once confirmation is received from one other node, you can use the following operation object: +```json +{ + "operation": "update", + "schema": "dev", + "table": "MyTable", + "hashValues": [3], + "record": { + "name": "John Doe" + }, + "replicateTo": 2, + "replicatedConfirmation": 1 +} +``` +or you can specify nodes: +```json +..., + "replicateTo": ["node-1", "node-2"] +... +``` +## Programmatic Replication Control +Additionally, you can specify `replicateTo` and `replicatedConfirmation` parameters programmatically in the context of a resource. For example, you can define a put method: +```javascript +class MyTable extends tables.MyTable { + put(record) { + const context = this.getContext(); + context.replicateTo = 2; / or an array of node names + context.replicatedConfirmation = 1; + return super.put(record); + } +} +``` + +## Custom Sharding +You can also define a custom sharding strategy by specifying a function to compute the "residency" or location of where records should be stored and reside. To do this we use the `setResidency` method, providing a function that will determine the residency of each record. The function you provide will be called with the record entry, and should return an array of nodes that the record should be replicated to (using their hostname). For example, to shard records based on the value of the `id` field, you can use the following code: +```javascript +MyTable.setResidency((record) => { + return record.id % 2 === 0 ? ['node1'] : ['node2']; +}); +``` +With this approach, the record metadata, which includes the residency information, and any indexed properties, will be replicated to all nodes, but the full record will only be replicated to the nodes specified by the residency function. + +### Custom Sharding By Primary Key +Alternately you can define a custom sharding strategy based on the primary key alone. This allows records to be retrieved without needing access to the record data or metadata. With this approach, data will only be replicated to the nodes specified by the residency function (the record metadata doesn't need to replicated to all nodes). To do this, you can use the `setResidencyById` method, providing a function that will determine the residency of each record based on the primary key. The function you provide will be called with the primary key, and should return an array of nodes that the record should be replicated to (using their hostname). For example, to shard records based on the value of the primary key, you can use the following code: + +```javascript +MyTable.setResidencyById((id) => { + return id % 2 === 0 ? ['node1'] : ['node2']; +}); +``` + +### Disabling Cross-Node Access +Normally sharding allows data to be stored in specific nodes, but still allows access to the data from any node. However, you can also disable cross-node access so that data is only returned if is stored on the node where it is accessed. To do this, you can set the `replicateFrom` property on the context of operation to `false`: +```json +{ + "operation": "search_by_id", + "table": "MyTable", + "ids": [3], + "replicateFrom": false +} +``` +Or use a header with the REST API: +```http +GET /MyTable/3 +X-Replicate-From: none +``` diff --git a/site/versioned_docs/version-4.4/developers/rest.md b/site/versioned_docs/version-4.4/developers/rest.md new file mode 100644 index 00000000..882f975a --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/rest.md @@ -0,0 +1,404 @@ +--- +title: REST +--- + +# REST + +## REST + +Harper provides a powerful, efficient, and standard-compliant HTTP REST interface for interacting with tables and other resources. The REST interface is the recommended interface for data access, querying, and manipulation (for HTTP interactions), providing the best performance and HTTP interoperability with different clients. + +Resources, including tables, can be configured as RESTful endpoints. Make sure you review the [application introduction](./applications/) and [defining schemas](./applications/defining-schemas) to properly define your schemas and select which tables are exported and available through REST interface, as tables are not exported by default. The name of the [exported](./applications/defining-schemas#export) resource defines the basis of the endpoint path available at the application HTTP server port [configured here](../deployments/configuration#http) (the default being `9926`). From there, a record id or query can be appended. Following uniform interface principles, HTTP methods define different actions with resources. For each method, this describes the default action. + +The default path structure provides access to resources at several levels: + +* `/my-resource` - The root path of a resource usually has a description of the resource (like a describe operation for a table). +* `/my-resource/` - The trailing slash in a path indicates it is a collection of the records. The root collection for a table represents all the records in a table, and usually you will append query parameters to query and search for more specific records. +* `/my-resource/record-id` - This resource locator represents a specific record, referenced by its id. This is typically how you can retrieve, update, and delete individual records. +* `/my-resource/record-id/` - Again, a trailing slash indicates a collection; here it is the collection of the records that begin with the specified id prefix. +* `/my-resource/record-id/with/multiple/parts` - A record id can consist of multiple path segments. + +### GET + +These can be used to retrieve individual records or perform searches. This is handled by the Resource method `get()` (and can be overridden). + +#### `GET /my-resource/` + +This can be used to retrieve a record by its primary key. The response will include the record as the body. + +**Caching/Conditional Requests** + +A `GET` response for a record will include an encoded version, a timestamp of the last modification, of this record in the `ETag` request headers (or any accessed record when used in a custom get method). On subsequent requests, a client (that has a cached copy) may include an `If-None-Match` request header with this tag. If the record has not been updated since this date, the response will have a 304 status and no body. This facilitates significant performance gains since the response data doesn't need to be serialized and transferred over the network. + +#### `GET /my-resource/?property=value` + +This can be used to search for records by the specified property name and value. See the querying section for more information. + +#### `GET /my-resource/.property` + +This can be used to retrieve the specified property of the specified record. + +### PUT + +This can be used to create or update a record with the provided object/data (similar to an "upsert") with a specified key. This is handled by the Resource method `put(record)`. + +#### `PUT /my-resource/` + +This will create or update the record with the URL path that maps to the record's primary key. The record will be replaced with the contents of the data in the request body. The new record will exactly match the data that was sent (this will remove any properties that were present in the previous record and not included in the body). Future GETs will return the exact data that was provided by PUT (what you PUT is what you GET). For example: + +```http +PUT /MyTable/123 +Content-Type: application/json + +{ "name": "some data" } +``` + +This will create or replace the record with a primary key of "123" with the object defined by the JSON in the body. This is handled by the Resource method `put()`. + +### DELETE + +This can be used to delete a record or records. + +### `DELETE /my-resource/` + +This will delete a record with the given primary key. This is handled by the Resource's `delete` method. For example: + +```http +DELETE /MyTable/123 +``` + +This will delete the record with the primary key of "123". + +### `DELETE /my-resource/?property=value` + +This will delete all the records that match the provided query. + +### POST + +Generally the POST method can be used for custom actions since POST has the broadest semantics. For tables that are expost\ed as endpoints, this also can be used to create new records. + +#### `POST /my-resource/` + +This is handled by the Resource method `post(data)`, which is a good method to extend to make various other types of modifications. Also, with a table you can create a new record without specifying a primary key, for example: + +````http +````http +POST /MyTable/ +Content-Type: application/json + +`{ "name": "some data" }` +```` + +This will create a new record, auto-assigning a primary key, which will be returned in the `Location` header. + +### Querying through URL query parameters + +URL query parameters provide a powerful language for specifying database queries in Harper. This can be used to search by a single attribute name and value, to find all records which provide value for the given property/attribute. It is important to note that this attribute must be configured to be indexed to search on it. For example: + +````http +GET /my-resource/?property=value +``` + +We can specify multiple properties that must match: + +```http +GET /my-resource/?property=value&property2=another-value +``` + +Note that only one of the attributes needs to be indexed for this query to execute. + +We can also specify different comparators such as less than and greater than queries using [FIQL](https:/datatracker.ietf.org/doc/html/draft-nottingham-atompub-fiql-00) syntax. If we want to specify records with an `age` value greater than 20: + +```http +GET /my-resource/?age=gt=20 +``` + +Or less than or equal to 20: + +```http +GET /my-resource/?age=le=20 +``` + +The comparison operators include standard FIQL operators, `lt` (less than), `le` (less than or equal), `gt` (greater than), `ge` (greater than or equal), and `ne` (not equal). These comparison operators can also be combined with other query parameters with `&`. For example, if we wanted products with a category of software and price between 100 and 200, we could write: + +```http +GET /Product/?category=software&price=gt=100&price=lt=200 +``` + +Comparison operators can also be used on Date fields, however, we have to ensure that the date format is properly escaped. For example, if we are looking for a listing date greater than `2017-03-08T09:00:00.000Z` we must escape the colons as `%3A`: + +``` +GET /Product/?listDate=gt=2017-03-08T09%3A30%3A00.000Z +``` + +You can also search for attributes that start with a specific string, by using the == comparator and appending a `*` to the attribute value: + +```http +GET /Product/?name==Keyboard* +``` + +**Chained Conditions** + +You can also specify that a range condition must be met for a single attribute value by chaining conditions. This is done by omitting the name in the name-value pair. For example, to find products with a price between 100 and 200, you could write: + +```http +GET /Product/?price=gt=100<=200 +``` + +Chaining can be used to combined `gt` or `ge` with `lt` or `le` to specify a range of values. Currently, no other types of chaining are supported. + +Note that some HTTP clients may be overly aggressive in encoding query parameters, and you may need to disable extra encoding of query parameters, to ensure operators are passed through without manipulation. + +Here is a full list of the supported FIQL-style operators/comparators: + +* `==`: equal +* `=lt=`: less than +* `=le=`: less than or equal +* `=gt=`: greater than +* `=ge=`: greater than or equal +* `=ne=`, !=: not equal +* `=ct=`: contains the value (for strings) +* `=sw=`, `==*`: starts with the value (for strings) +* `=ew=`: ends with the value (for strings) +* `=`, `===`: strict equality (no type conversion) +* `!==`: strict inequality (no type conversion) + +#### Unions + +Conditions can also be applied with `OR` logic, returning the union of records that match either condition. This can be specified by using the `|` operator instead of `&`. For example, to return any product a rating of `5` _or_ a `featured` attribute that is `true`, we could write: + +```http +GET /Product/?rating=5|featured=true +``` + +#### Grouping of Operators + +Multiple conditions with different operators can be combined with grouping of conditions to indicate the order of operation. Grouping conditions can be done with parenthesis, with standard grouping conventions as used in query and mathematical expressions. For example, a query to find products with a rating of 5 OR a price between 100 and 200 could be written: + +```http +GET /Product/?rating=5|(price=gt=100&price=lt=200) +``` + +Grouping conditions can also be done with square brackets, which function the same as parenthesis for grouping conditions. The advantage of using square brackets is that you can include user provided values that might have parenthesis in them, and use standard URI component encoding functionality, which will safely escape/encode square brackets, but not parenthesis. For example, if we were constructing a query for products with a rating of a 5 and matching one of a set of user provided tags, a query could be built like: + +```http +GET /Product/?rating=5&[tag=fast|tag=scalable|tag=efficient] +``` + +And the tags could be safely generated from user inputs in a tag array like: + +```javascript +let url = `/Product/?rating=5[${tags.map(encodeURIComponent).join('|')}]` +``` + +More complex queries can be created by further nesting groups: + +```http +GET /Product/?price=lt=100|[rating=5&[tag=fast|tag=scalable|tag=efficient]&inStock=true] +``` + +### Query Calls + +Harper has several special query functions that use "call" syntax. These can be included in the query string as its own query entry (separated from other query conditions with an `&`). These include: + +#### `select(properties)` + +This function allows you to specify which properties should be included in the responses. This takes several forms: + +* `?select(property)`: This will return the values of the specified property directly in the response (will not be put in an object). +* `?select(property1,property2)`: This returns the records as objects, but limited to the specified properties. +* `?select([property1,property2,...])`: This returns the records as arrays of the property values in the specified properties. +* `?select(property1,)`: This can be used to specify that objects should be returned with the single specified property. +* `?select(property{subProperty1,subProperty2{subSubProperty,..}},...)`: This can be used to specify which sub-properties should be included in nested objects and joined/references records. + +To get a list of product names with a category of software: + +```http +GET /Product/?category=software&select(name) +``` + +#### `limit(start,end)` or `limit(end)` + +This function specifies a limit on the number of records returned, optionally providing a starting offset. + +For example, to find the first twenty records with a `rating` greater than 3, `inStock` equal to true, only returning the `rating` and `name` properties, you could use: + +```http +GET /Product/?rating=gt=3&inStock=true&select(rating,name)&limit(20) +``` + +#### `sort(property)`, `sort(+property,-property,...)` + +This function allows you to indicate the sort order for the returned results. The argument for `sort()` is one or more properties that should be used to sort. If the property is prefixed with '+' or no prefix, the sort will be performed in ascending order by the indicated attribute/property. If the property is prefixed with '-', it will be sorted in descending order. If the multiple properties are specified, the sort will be performed on the first property, and for records with the same value for that property, the next property will be used to break the tie and sort results. This tie breaking will continue through any provided properties. + +For example, to sort by product name (in ascending order): + +```http +GET /Product?rating=gt=3&sort(+name) +``` + +To sort by rating in ascending order, then by price in descending order for products with the same rating: + +```http +GET /Product?sort(+rating,-price) +``` + +## Relationships + +Harper supports relationships in its data models, allowing for tables to define a relationship with data from other tables (or even itself) through foreign keys. These relationships can be one-to-many, many-to-one, or many-to-many (and even with ordered relationships). These relationships are defined in the schema, and then can easily be queried through chained attributes that act as "join" queries, allowing related attributes to referenced in conditions and selected for returned results. + +### Chained Attributes and Joins + +To support relationships and hierarchical data structures, in addition to querying on top-level attributes, you can also query on chained attributes. Most importantly, this provides Harper's "join" functionality, allowing related tables to be queried and joined in the results. Chained properties are specified by using dot syntax. In order to effectively leverage join functionality, you need to define a relationship in your schema: + +```graphql +type Product @table @export { + id: ID @primaryKey + name: String + brandId: ID @indexed + brand: Brand @relationship(from: "brandId") +} +type Brand @table @export { + id: ID @primaryKey + name: String + products: [Product] @relationship(to: "brandId") +} +``` + +And then you could query a product by brand name: + +```http +GET /Product/?brand.name=Microsoft +``` + +This will query for products for which the `brandId` references a `Brand` record with a `name` of `"Microsoft"`. + +The `brand` attribute in `Product` is a "computed" attribute from the foreign key (`brandId`), for the many-to-one relationship to the `Brand`. In the schema above, we also defined the reverse one-to-many relationship from a `Brand` to a `Product`, and we could likewise query that: + +```http +GET /Brand/?products.name=Keyboard +``` + +This would return any `Brand` with at least one product with a name `"Keyboard"`. Note, that both of these queries are effectively acting as an "INNER JOIN". + +#### Chained/Nested Select + +Computed relationship attributes are not included by default in query results. However, we can include them by specifying them in a select: + +```http +GET /Product/?brand.name=Microsoft&select(name,brand) +``` + +We can also do a "nested" select and specify which sub-attributes to include. For example, if we only wanted to include the name property from the brand, we could do so: + +```http +GET /Product/?brand.name=Microsoft&select(name,brand{name}) +``` + +Or to specify multiple sub-attributes, we can comma delimit them. Note that selects can "join" to another table without any constraint/filter on the related/joined table: + +```http +GET /Product/?name=Keyboard&select(name,brand{name,id}) +``` + +When selecting properties from a related table without any constraints on the related table, this effectively acts like a "LEFT JOIN" and will omit the `brand` property if the brandId is `null` or references a non-existent brand. + +#### Many-to-many Relationships (Array of Foreign Keys) + +Many-to-many relationships are also supported, and can easily be created using an array of foreign key values, without requiring the traditional use of a junction table. This can be done by simply creating a relationship on an array-typed property that references a local array of foreign keys. For example, we could create a relationship to the resellers of a product (each product can have multiple resellers, each ) + +```graphql +type Product @table @export { + id: ID @primaryKey + name: String + resellerIds: [ID] @indexed + resellers: [Reseller] @relationship(from: "resellerId") +} +type Reseller @table { + id: ID @primaryKey + name: String + ... +} +``` + +The product record can then hold an array of the reseller ids. When the `reseller` property is accessed (either through code or through select, conditions), the array of ids is resolved to an array of reseller records. We can also query through the resellers relationships like with the other relationships. For example, to query the products that are available through the "Cool Shop": + +```http +GET /Product/?resellers.name=Cool Shop&select(id,name,resellers{name,id}) +``` + +One of the benefits of using an array of foreign key values is that the this can be manipulated using standard array methods (in JavaScript), and the array can dictate an order to keys and therefore to the resulting records. For example, you may wish to define a specific order to the resellers and how they are listed (which comes first, last): + +```http +PUT /Product/123 +Content-Type: application/json + +{ "id": "123", "resellerIds": ["first-reseller-id", "second-reseller-id", "last-reseller-id"], +...} +``` + +#### Type Conversion + +Queries parameters are simply text, so there are several features for converting parameter values to properly typed values for performing correct searches. For the FIQL comparators, which includes `==`, `!=`, `=gt=`, `=lt=`, `=ge=`, `=gt=`, the parser will perform type conversion, according to the following rules: + +* `name==null`: Will convert the value to `null` for searching. +* `name==123`: Will convert the value to a number _if_ the attribute is untyped (there is no type specified in a GraphQL schema, or the type is specified to be `Any`). +* `name==true`: Will convert the value to a boolean _if_ the attribute is untyped (there is no type specified in a GraphQL schema, or the type is specified to be `Any`). +* `name==number:123`: Will explicitly convert the value after "number:" to a number. +* `name==boolean:true`: Will explicitly convert the value after "boolean:" to a boolean. +* `name==string:some%20text`: Will explicitly keep the value after "string:" as a string (and perform URL component decoding) +* `name==date:2024-01-05T20%3A07%3A27.955Z`: Will explicitly convert the value after "date:" to a Date object. + +If the attribute specifies a type (like `Float`) in the schema definition, the value will always be converted to the specified type before searching. + +For "strict" operators, which includes `=`, `===`, and `!==`, no automatic type conversion will be applied, the value will be decoded as string with URL component decoding, and have type conversion applied if the attribute specifies a type, in which case the attribute type will specify the type conversion. + +#### Content Types and Negotiation + +HTTP defines a couple of headers for indicating the (preferred) content type of the request and response. The `Content-Type` request header can be used to specify the content type of the request body (for PUT, PATCH, and POST). The `Accept` request header indicates the preferred content type of the response. For general records with object structures, Harper supports the following content types: `application/json` - Common format, easy to read, with great tooling support. `application/cbor` - Recommended binary format for optimal encoding efficiency and performance. `application/x-msgpack` - This is also an efficient format, but CBOR is preferable, as it has better streaming capabilities and faster time-to-first-byte. `text/csv` - CSV, lacks explicit typing, not well suited for heterogeneous data structures, but good for moving data to and from a spreadsheet. + +CBOR is generally the most efficient and powerful encoding format, with the best performance, most compact encoding, and most expansive ability to encode different data types like Dates, Maps, and Sets. MessagePack is very similar and tends to have broader adoption. However, JSON can be easier to work with and may have better tooling. Also, if you are using compression for data transfer (gzip or brotli), JSON will often result in more compact compressed data due to character frequencies that better align with Huffman coding, making JSON a good choice for web applications that do not require specific data types beyond the standard JSON types. + +Requesting a specific content type can also be done in a URL by suffixing the path with extension for the content type. If you want to retrieve a record in CSV format, you could request: + +```http +GET /product/some-id.csv +``` + +Or you could request a query response in MessagePack: + +```http +GET /product/.msgpack?category=software +``` + +However, generally it is not recommended that you use extensions in paths and it is best practice to use the `Accept` header to specify acceptable content types. + +#### Specific Content Objects + +You can specify other content types, and the data will be stored as a record or object that holds the type and contents of the data. For example, if you do: + +``` +PUT /my-resource/33 +Content-Type: text/calendar + +BEGIN:VCALENDAR +VERSION:2.0 +... +``` + +This would store a record equivalent to JSON: + +``` +{ "contentType": "text/calendar", data: "BEGIN:VCALENDAR\nVERSION:2.0\n... +``` + +Retrieving a record with `contentType` and `data` properties will likewise return a response with the specified `Content-Type` and body. If the `Content-Type` is not of the `text` family, the data will be treated as binary data (a Node.js `Buffer`). + +You can also use `application/octet-stream` to indicate that the request body should be preserved in binary form. This also useful for uploading to a specific property: + +``` +PUT /my-resource/33/image +Content-Type: image/gif + +...image data... +``` diff --git a/site/versioned_docs/version-4.4/developers/security/basic-auth.md b/site/versioned_docs/version-4.4/developers/security/basic-auth.md new file mode 100644 index 00000000..6736f2c8 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/security/basic-auth.md @@ -0,0 +1,62 @@ +--- +title: Basic Authentication +--- + +# Basic Authentication + +Harper uses Basic Auth and JSON Web Tokens (JWTs) to secure our HTTP requests. In the context of an HTTP transaction, **basic access authentication** is a method for an HTTP user agent to provide a username and password when making a request. + +** _**You do not need to log in separately. Basic Auth is added to each HTTP request like create\_database, create\_table, insert etc… via headers.**_ ** + +A header is added to each HTTP request. The header key is **“Authorization”** the header value is **“Basic <<your username and password buffer token>>”** + +## Authentication in Harper Studio + +In the below code sample, you can see where we add the authorization header to the request. This needs to be added for each and every HTTP request for Harper. + +_Note: This function uses btoa. Learn about_ [_btoa here_](https:/developer.mozilla.org/en-US/docs/Web/API/btoa)_._ + +```javascript +function callHarperDB(call_object, operation, callback){ + + const options = { + "method": "POST", + "hostname": call_object.endpoint_url, + "port": call_object.endpoint_port, + "path": "/", + "headers": { + "content-type": "application/json", + "authorization": "Basic " + btoa(call_object.username + ':' + call_object.password), + "cache-control": "no-cache" + + } + }; + + const http_req = http.request(options, function (hdb_res) { + let chunks = []; + + hdb_res.on("data", function (chunk) { + chunks.push(chunk); + }); + + hdb_res.on("end", function () { + const body = Buffer.concat(chunks); + if (isJson(body)) { + return callback(null, JSON.parse(body)); + } else { + return callback(body, null); + + } + + }); + }); + + http_req.on("error", function (chunk) { + return callback("Failed to connect", null); + }); + + http_req.write(JSON.stringify(operation)); + http_req.end(); + +} +``` diff --git a/site/versioned_docs/version-4.4/developers/security/certificate-management.md b/site/versioned_docs/version-4.4/developers/security/certificate-management.md new file mode 100644 index 00000000..fdc8cc22 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/security/certificate-management.md @@ -0,0 +1,74 @@ +--- +title: Certificate Management +--- + +# Certificate Management + +This document is information on managing certificates for Harper external facing APIs. For information on certificate management for clustering see [clustering certificate management](../clustering/certificate-management). + +## Development + +An out of the box install of Harper does not have HTTPS enabled (see [configuration](../../deployments/configuration#http) for relevant configuration file settings.) This is great for local development. If you are developing using a remote server and your requests are traversing the Internet, we recommend that you enable HTTPS. + +To enable HTTPS, set `http.securePort` in `harperdb-config.yaml` to the port you wish to use for HTTPS connections and restart Harper. + +By default Harper will generate certificates and place them at `/keys/`. These certificates will not have a valid Common Name (CN) for your Harper node, so you will be able to use HTTPS, but your HTTPS client must be configured to accept the invalid certificate. + +## Production + +For production deployments, in addition to using HTTPS, we recommend using your own certificate authority (CA) or a public CA such as Let's Encrypt, to generate certificates with CNs that match the Fully Qualified Domain Name (FQDN) of your Harper node. + +We have a few recommended options for enabling HTTPS in a production setting. + +### Option: Enable Harper HTTPS and Replace Certificates + +To enable HTTPS, set `http.securePort` in `harperdb-config.yaml` to the port you wish to use for HTTPS connections and restart Harper. + +To replace the certificates, either replace the contents of the existing certificate files at `/keys/`, or update the Harper configuration with the path of your new certificate files, and then restart Harper. + +```yaml +tls: + certificate: ~/hdb/keys/certificate.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`operationsApi.tls` configuration is optional. If it is not set Harper will default to the values in the `tls` section. + +```yaml +operationsApi: + tls: + certificate: ~/hdb/keys/certificate.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +### mTLS + +Mutual TLS (mTLS) is a security protocol that requires both the client and the server to present certificates to each other. Requiring a client certificate can be useful for authenticating clients and ensuring that only authorized clients can access your Harper instance. This can be enabled by setting the `http.mtls` configuration in `harperdb-config.yaml` to `true` and providing a certificate authority in the TLS section: + +```yaml + +http: + mtls: true + ... +tls: + certificateAuthority: ~/hdb/keys/ca.pem + ... +``` + +### Option: Nginx Reverse Proxy + +Instead of enabling HTTPS for Harper, Nginx can be used as a reverse proxy for Harper. + +Install Nginx, configure Nginx to use certificates issued from your own CA or a public CA, then configure Nginx to listen for HTTPS requests and forward to Harper as HTTP requests. + +[Certbot](https:/certbot.eff.org/) is a great tool for automatically requesting and renewing Let’s Encrypt certificates used by Nginx. + +### Option: External Reverse Proxy + +Instead of enabling HTTPS for Harper, a number of different external services can be used as a reverse proxy for Harper. These services typically have integrated certificate management. Configure the service to listen for HTTPS requests and forward (over a private network) to Harper as HTTP requests. + +Examples of these types of services include an AWS Application Load Balancer or a GCP external HTTP(S) load balancer. + +### Additional Considerations + +It is possible to use different certificates for the Operations API and the Custom Functions API. In scenarios where only your Custom Functions endpoints need to be exposed to the Internet and the Operations API is reserved for Harper administration, you may want to use a private CA to issue certificates for the Operations API and a public CA for the Custom Functions API certificates. diff --git a/site/versioned_docs/version-4.4/developers/security/configuration.md b/site/versioned_docs/version-4.4/developers/security/configuration.md new file mode 100644 index 00000000..f21eb9b2 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/security/configuration.md @@ -0,0 +1,39 @@ +--- +title: Configuration +--- + +# Configuration + +Harper was set up to require very minimal configuration to work out of the box. There are, however, some best practices we encourage for anyone building an app with Harper. + +## CORS + +Harper allows for managing [cross-origin HTTP requests](https:/developer.mozilla.org/en-US/docs/Web/HTTP/Access\_control\_CORS). By default, Harper enables CORS for all domains if you need to disable CORS completely or set up an access list of domains you can do the following: + +1. Open the harperdb-config.yaml file, which can be found in \, the location you specified during install. +1. In harperdb-config.yaml there should be 2 entries under `operationsApi.network`: cors and corsAccessList. + * `cors` + 1. To turn off, change to: `cors: false` + 1. To turn on, change to: `cors: true` + * `corsAccessList` + 1. The `corsAccessList` will only be recognized by the system when `cors` is `true` + 1. To create an access list you set `corsAccessList` to a comma-separated list of domains. + + i.e. `corsAccessList` is `http:/harpersystems.dev,http:/products.harpersystems.dev` + 1. To clear out the access list and allow all domains: `corsAccessList` is `[null]` + +## SSL + +HarperDprovides the option to use an HTTP or HTTPS and HTTP/2 interface. The default port for the server is 9925. + +These default ports can be changed by updating the `operationsApi.network.port` value in `/harperdb-config.yaml` + +By default, HTTPS is turned off and HTTP is turned on. It is recommended that you never directly expose Harper's HTTP interface through a publicly available port. HTTP is intended for local or private network use. + +You can toggle HTTPS and HTTP in the settings file. By setting `operationsApi.network.https` to true/false. When `https` is set to `false`, the server will use HTTP (version 1.1). Enabling HTTPS will enable both HTTPS/1.1 and HTTPS/2. + +Harper automatically generates a certificate (certificate.pem), a certificate authority (ca.pem) and a private key file (privateKey.pem) which live at `/keys/`. + +You can replace these with your own certificates and key. + +**Changes to these settings require a restart. Use operation `harperdb restart` from Harper Operations API.** diff --git a/site/versioned_docs/version-4.4/developers/security/index.md b/site/versioned_docs/version-4.4/developers/security/index.md new file mode 100644 index 00000000..55897945 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/security/index.md @@ -0,0 +1,13 @@ +--- +title: Security +--- + +# Security + +Harper uses role-based, attribute-level security to ensure that users can only gain access to the data they’re supposed to be able to access. Our granular permissions allow for unparalleled flexibility and control, and can actually lower the total cost of ownership compared to other database solutions, since you no longer have to replicate subsets of your data to isolate use cases. + +* [JWT Authentication](./jwt-auth) +* [Basic Authentication](./basic-auth) +* [mTLS Authentication](./mtls-auth) +* [Configuration](./configuration) +* [Users and Roles](./users-and-roles) diff --git a/site/versioned_docs/version-4.4/developers/security/jwt-auth.md b/site/versioned_docs/version-4.4/developers/security/jwt-auth.md new file mode 100644 index 00000000..a62d2841 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/security/jwt-auth.md @@ -0,0 +1,96 @@ +--- +title: JWT Authentication +--- + +# JWT Authentication + +Harper uses token based authentication with JSON Web Tokens, JWTs. + +This consists of two primary operations `create_authentication_tokens` and `refresh_operation_token`. These generate two types of tokens, as follows: + +* The `operation_token` which is used to authenticate all Harper operations in the Bearer Token Authorization Header. The default expiry is one day. +* The `refresh_token` which is used to generate a new `operation_token` upon expiry. This token is used in the Bearer Token Authorization Header for the `refresh_operation_token` operation only. The default expiry is thirty days. + +The `create_authentication_tokens` operation can be used at any time to refresh both tokens in the event that both have expired or been lost. + +## Create Authentication Tokens + +Users must initially create tokens using their Harper credentials. The following POST body is sent to Harper. No headers are required for this POST operation. + +```json +{ + "operation": "create_authentication_tokens", + "username": "username", + "password": "password" +} +``` + +A full cURL example can be seen here: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "operation": "create_authentication_tokens", + "username": "username", + "password": "password" +}' +``` + +An example expected return object is: + +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDUwNjQ2MDAsInN1YiI6Im9wZXJhdGlvbiJ9.MpQA-9CMjA-mn-7mHyUXSuSC_-kqMqJXp_NDiKLFtbtMRbodCuY3DzH401rvy_4vb0yCELf0B5EapLVY1545sv80nxSl6FoZFxQaDWYXycoia6zHpiveR8hKlmA6_XTWHJbY2FM1HAFrdtt3yUTiF-ylkdNbPG7u7fRjTmHfsZ78gd2MNWIDkHoqWuFxIyqk8XydQpsjULf2Uacirt9FmHfkMZ-Jr_rRpcIEW0FZyLInbm6uxLfseFt87wA0TbZ0ofImjAuaW_3mYs-3H48CxP152UJ0jByPb0kHsk1QKP7YHWx1-Wce9NgNADfG5rfgMHANL85zvkv8sJmIGZIoSpMuU3CIqD2rgYnMY-L5dQN1fgfROrPMuAtlYCRK7r-IpjvMDQtRmCiNG45nGsM4DTzsa5GyDrkGssd5OBhl9gr9z9Bb5HQVYhSKIOiy72dK5dQNBklD4eGLMmo-u322zBITmE0lKaBcwYGJw2mmkYcrjDOmsDseU6Bf_zVUd9WF3FqwNkhg4D7nrfNSC_flalkxPHckU5EC_79cqoUIX2ogufBW5XgYbU4WfLloKcIpb51YTZlZfwBHlHPSyaq_guaXFaeCUXKq39_i1n0HRF_mRaxNru0cNDFT9Fm3eD7V8axFijSVAMDyQs_JR7SY483YDKUfN4l-vw-EVynImr4", + "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDc1NzAyMDAsInN1YiI6InJlZnJlc2gifQ.acaCsk-CJWIMLGDZdGnsthyZsJfQ8ihXLyE8mTji8PgGkpbwhs7e1O0uitMgP_pGjHq2tey1BHSwoeCL49b18WyMIB10hK-q2BXGKQkykltjTrQbg7VsdFi0h57mGfO0IqAwYd55_hzHZNnyJMh4b0iPQFDwU7iTD7x9doHhZAvzElpkWbc_NKVw5_Mw3znjntSzbuPN105zlp4Niurin-_5BnukwvoJWLEJ-ZlF6hE4wKhaMB1pWTJjMvJQJE8khTTvlUN8tGxmzoaDYoe1aCGNxmDEQnx8Y5gKzVd89sylhqi54d2nQrJ2-ElfEDsMoXpR01Ps6fNDFtLTuPTp7ixj8LvgL2nCjAg996Ga3PtdvXJAZPDYCqqvaBkZZcsiqOgqLV0vGo3VVlfrcgJXQImMYRr_Inu0FCe47A93IAWuQTs-KplM1KdGJsHSnNBV6oe6QEkROJT5qZME-8xhvBYvOXqp9Znwg39bmiBCMxk26Ce66_vw06MNgoa3D5AlXPWemfdVKPZDnj_aLVjZSs0gAfFElcVn7l9yjWJOaT2Muk26U8bJl-2BEq_DSclqKHODuYM5kkPKIdE4NFrsqsDYuGxcA25rlNETFyl0q-UXj1aoz_joy5Hdnr4mFELmjnoo4jYQuakufP9xeGPsj1skaodKl0mmoGcCD6v1F60" +} +``` + +## Using JWT Authentication Tokens + +The `operation_token` value is used to authenticate all operations in place of our standard Basic auth. In order to pass the token you will need to create an Bearer Token Authorization Header like the following request: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDUwNjQ2MDAsInN1YiI6Im9wZXJhdGlvbiJ9.MpQA-9CMjA-mn-7mHyUXSuSC_-kqMqJXp_NDiKLFtbtMRbodCuY3DzH401rvy_4vb0yCELf0B5EapLVY1545sv80nxSl6FoZFxQaDWYXycoia6zHpiveR8hKlmA6_XTWHJbY2FM1HAFrdtt3yUTiF-ylkdNbPG7u7fRjTmHfsZ78gd2MNWIDkHoqWuFxIyqk8XydQpsjULf2Uacirt9FmHfkMZ-Jr_rRpcIEW0FZyLInbm6uxLfseFt87wA0TbZ0ofImjAuaW_3mYs-3H48CxP152UJ0jByPb0kHsk1QKP7YHWx1-Wce9NgNADfG5rfgMHANL85zvkv8sJmIGZIoSpMuU3CIqD2rgYnMY-L5dQN1fgfROrPMuAtlYCRK7r-IpjvMDQtRmCiNG45nGsM4DTzsa5GyDrkGssd5OBhl9gr9z9Bb5HQVYhSKIOiy72dK5dQNBklD4eGLMmo-u322zBITmE0lKaBcwYGJw2mmkYcrjDOmsDseU6Bf_zVUd9WF3FqwNkhg4D7nrfNSC_flalkxPHckU5EC_79cqoUIX2ogufBW5XgYbU4WfLloKcIpb51YTZlZfwBHlHPSyaq_guaXFaeCUXKq39_i1n0HRF_mRaxNru0cNDFT9Fm3eD7V8axFijSVAMDyQs_JR7SY483YDKUfN4l-vw-EVynImr4' \ +--data-raw '{ + "operation":"search_by_hash", + "schema":"dev", + "table":"dog", + "hash_values":[1], + "get_attributes": ["*"] +}' +``` + +## Token Expiration + +`operation_token` expires at a set interval. Once it expires it will no longer be accepted by Harper. This duration defaults to one day, and is configurable in [harperdb-config.yaml](../../deployments/configuration). To generate a new `operation_token`, the `refresh_operation_token` operation is used, passing the `refresh_token` in the Bearer Token Authorization Header. A full cURL example can be seen here: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDc1NzAyMDAsInN1YiI6InJlZnJlc2gifQ.acaCsk-CJWIMLGDZdGnsthyZsJfQ8ihXLyE8mTji8PgGkpbwhs7e1O0uitMgP_pGjHq2tey1BHSwoeCL49b18WyMIB10hK-q2BXGKQkykltjTrQbg7VsdFi0h57mGfO0IqAwYd55_hzHZNnyJMh4b0iPQFDwU7iTD7x9doHhZAvzElpkWbc_NKVw5_Mw3znjntSzbuPN105zlp4Niurin-_5BnukwvoJWLEJ-ZlF6hE4wKhaMB1pWTJjMvJQJE8khTTvlUN8tGxmzoaDYoe1aCGNxmDEQnx8Y5gKzVd89sylhqi54d2nQrJ2-ElfEDsMoXpR01Ps6fNDFtLTuPTp7ixj8LvgL2nCjAg996Ga3PtdvXJAZPDYCqqvaBkZZcsiqOgqLV0vGo3VVlfrcgJXQImMYRr_Inu0FCe47A93IAWuQTs-KplM1KdGJsHSnNBV6oe6QEkROJT5qZME-8xhvBYvOXqp9Znwg39bmiBCMxk26Ce66_vw06MNgoa3D5AlXPWemfdVKPZDnj_aLVjZSs0gAfFElcVn7l9yjWJOaT2Muk26U8bJl-2BEq_DSclqKHODuYM5kkPKIdE4NFrsqsDYuGxcA25rlNETFyl0q-UXj1aoz_joy5Hdnr4mFELmjnoo4jYQuakufP9xeGPsj1skaodKl0mmoGcCD6v1F60' \ +--data-raw '{ + "operation":"refresh_operation_token" +}' +``` + +This will return a new `operation_token`. An example expected return object is: + +```bash +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6eyJfX2NyZWF0ZWR0aW1lX18iOjE2MDQ5NzgxODkxNTEsIl9fdXBkYXRlZHRpbWVfXyI6MTYwNDk3ODE4OTE1MSwiYWN0aXZlIjp0cnVlLCJyb2xlIjp7Il9fY3JlYXRlZHRpbWVfXyI6MTYwNDk0NDE1MTM0NywiX191cGRhdGVkdGltZV9fIjoxNjA0OTQ0MTUxMzQ3LCJpZCI6IjdiNDNlNzM1LTkzYzctNDQzYi05NGY3LWQwMzY3Njg5NDc4YSIsInBlcm1pc3Npb24iOnsic3VwZXJfdXNlciI6dHJ1ZSwic3lzdGVtIjp7InRhYmxlcyI6eyJoZGJfdGFibGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9hdHRyaWJ1dGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9zY2hlbWEiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl91c2VyIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfcm9sZSI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2pvYiI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2xpY2Vuc2UiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9pbmZvIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfbm9kZXMiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl90ZW1wIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119fX19LCJyb2xlIjoic3VwZXJfdXNlciJ9LCJ1c2VybmFtZSI6InVzZXJuYW1lIn0sImlhdCI6MTYwNDk3ODcxMywiZXhwIjoxNjA1MDY1MTEzLCJzdWIiOiJvcGVyYXRpb24ifQ.qB4FS7fzryCO5epQlFCQe4mQcUEhzXjfsXRFPgauXrGZwSeSr2o2a1tE1xjiI3qjK0r3f2bdi2xpFlDR1thdY-m0mOpHTICNOae4KdKzp7cyzRaOFurQnVYmkWjuV_Ww4PJgr6P3XDgXs5_B2d7ZVBR-BaAimYhVRIIShfpWk-4iN1XDk96TwloCkYx01BuN87o-VOvAnOG-K_EISA9RuEBpSkfUEuvHx8IU4VgfywdbhNMh6WXM0VP7ZzSpshgsS07MGjysGtZHNTVExEvFh14lyfjfqKjDoIJbo2msQwD2FvrTTb0iaQry1-Wwz9QJjVAUtid7tJuP8aBeNqvKyMIXRVnl5viFUr-Gs-Zl_WtyVvKlYWw0_rUn3ucmurK8tTy6iHyJ6XdUf4pYQebpEkIvi2rd__e_Z60V84MPvIYs6F_8CAy78aaYmUg5pihUEehIvGRj1RUZgdfaXElw90-m-M5hMOTI04LrzzVnBu7DcMYg4UC1W-WDrrj4zUq7y8_LczDA-yBC2-bkvWwLVtHLgV5yIEuIx2zAN74RQ4eCy1ffWDrVxYJBau4yiIyCc68dsatwHHH6bMK0uI9ib6Y9lsxCYjh-7MFcbP-4UBhgoDDXN9xoUToDLRqR9FTHqAHrGHp7BCdF5d6TQTVL5fmmg61MrLucOo-LZBXs1NY" +} +``` + +The `refresh_token` also expires at a set interval, but a longer interval. Once it expires it will no longer be accepted by Harper. This duration defaults to thirty days, and is configurable in [harperdb-config.yaml](../../deployments/configuration). To generate a new `operation_token` and a new `refresh_token` the `create_authentication_tokensoperation` is called. + +## Configuration + +Token timeouts are configurable in [harperdb-config.yaml](../../deployments/configuration) with the following parameters: + +* `operationsApi.authentication.operationTokenTimeout`: Defines the length of time until the operation\_token expires (default 1d). +* `operationsApi.authentication.refreshTokenTimeout`: Defines the length of time until the refresh\_token expires (default 30d). + +A full list of valid values for both parameters can be found [here](https:/github.com/vercel/ms). diff --git a/site/versioned_docs/version-4.4/developers/security/mtls-auth.md b/site/versioned_docs/version-4.4/developers/security/mtls-auth.md new file mode 100644 index 00000000..0d4538aa --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/security/mtls-auth.md @@ -0,0 +1,7 @@ +--- +title: mTLS Authentication +--- + +# mTLS Authentication + +Harper supports mTLS authentication for incoming connections. When enabled in the [HTTP config settings](../../deployments/configuration#http) the client certificate will be checked against the certificate authority specified with `tls.certificateAuthority`. If the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. The [HTTP config settings](../../deployments/configuration#http) allow you to determine if mTLS is required for all connections or optional. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/developers/security/users-and-roles.md b/site/versioned_docs/version-4.4/developers/security/users-and-roles.md new file mode 100644 index 00000000..b1b5ffc3 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/security/users-and-roles.md @@ -0,0 +1,267 @@ +--- +title: Users & Roles +--- + +# Users & Roles + +Harper utilizes a Role-Based Access Control (RBAC) framework to manage access to Harper instances. A user is assigned a role that determines the user’s permissions to access database resources and run core operations. + +## Roles in Harper + +Role permissions in Harper are broken into two categories – permissions around database manipulation and permissions around database definition. + +**Database Manipulation**: A role defines CRUD (create, read, update, delete) permissions against database resources (i.e. data) in a Harper instance. + +1. At the table-level access, permissions must be explicitly defined when adding or altering a role – _i.e. Harper will assume CRUD access to be FALSE if not explicitly provided in the permissions JSON passed to the `add_role` and/or `alter_role` API operations._ +1. At the attribute-level, permissions for attributes in all tables included in the permissions set will be assigned based on either the specific attribute-level permissions defined in the table’s permission set or, if there are no attribute-level permissions defined, permissions will be based on the table’s CRUD set. + +**Database Definition**: Permissions related to managing databases, tables, roles, users, and other system settings and operations are restricted to the built-in `super_user` role. + +**Built-In Roles** + +There are three built-in roles within Harper. See full breakdown of operations restricted to only super\_user roles [here](./users-and-roles#role-based-operation-restrictions). + +* `super_user` - This role provides full access to all operations and methods within a Harper instance, this can be considered the admin role. + * This role provides full access to all Database Definition operations and the ability to run Database Manipulation operations across the entire database schema with no restrictions. +* `cluster_user` - This role is an internal system role type that is managed internally to allow clustered instances to communicate with one another. + * This role is an internally managed role to facilitate communication between clustered instances. +* `structure_user` - This role provides specific access for creation and deletion of data. + * When defining this role type you can either assign a value of true which will allow the role to create and drop databases & tables. Alternatively the role type can be assigned a string array. The values in this array are databases and allows the role to only create and drop tables in the designated databases. + +**User-Defined Roles** + +In addition to built-in roles, admins (i.e. users assigned to the super\_user role) can create customized roles for other users to interact with and manipulate the data within explicitly defined tables and attributes. + +* Unless the user-defined role is given `super_user` permissions, permissions must be defined explicitly within the request body JSON. +* Describe operations will return metadata for all databases, tables, and attributes that a user-defined role has CRUD permissions for. + +**Role Permissions** + +When creating a new, user-defined role in a Harper instance, you must provide a role name and the permissions to assign to that role. _Reminder, only super users can create and manage roles._ + +* `role` name used to easily identify the role assigned to individual users. + + _Roles can be altered/dropped based on the role name used in and returned from a successful `add_role` , `alter_role`, or `list_roles` operation._ +* `permissions` used to explicitly define CRUD access to existing table data. + +Example JSON for `add_role` request + +```json +{ + "operation":"add_role", + "role":"software_developer", + "permission":{ + "super_user":false, + "database_name":{ + "tables": { + "table_name1": { + "read":true, + "insert":true, + "update":true, + "delete":false, + "attribute_permissions":[ + { + "attribute_name":"attribute1", + "read":true, + "insert":true, + "update":true + } + ] + }, + "table_name2": { + "read":true, + "insert":true, + "update":true, + "delete":false, + "attribute_permissions":[] + } + } + } + } +} +``` + +**Setting Role Permissions** + +There are two parts to a permissions set: + +* `super_user` – boolean value indicating if role should be provided super\_user access. + + _If `super_user` is set to true, there should be no additional database-specific permissions values included since the role will have access to the entire database schema. If permissions are included in the body of the operation, they will be stored within Harper, but ignored, as super\_users have full access to the database._ +* `permissions`: Database tables that a role should have specific CRUD access to should be included in the final, database-specific `permissions` JSON. + + _For user-defined roles (i.e. non-super\_user roles, blank permissions will result in the user being restricted from accessing any of the database schema._ + +**Table Permissions JSON** + +Each table that a role should be given some level of CRUD permissions to must be included in the `tables` array for its database in the roles permissions JSON passed to the API (_see example above_). + +```json +{ + "table_name": { / the name of the table to define CRUD perms for + "read": boolean, / access to read from this table + "insert": boolean, / access to insert data to table + "update": boolean, / access to update data in table + "delete": boolean, / access to delete row data in table + "attribute_permissions": [ / permissions for specific table attributes + { + "attribute_name": "attribute_name", / attribute to assign permissions to + "read": boolean, / access to read this attribute from table + "insert": boolean, / access to insert this attribute into the table + "update": boolean / access to update this attribute in the table + } + ] +} +``` + +**Important Notes About Table Permissions** + +1. If a database and/or any of its tables are not included in the permissions JSON, the role will not have any CRUD access to the database and/or tables. +1. If a table-level CRUD permission is set to false, any attribute-level with that same CRUD permission set to true will return an error. + +**Important Notes About Attribute Permissions** + +1. If there are attribute-specific CRUD permissions that need to be enforced on a table, those need to be explicitly described in the `attribute_permissions` array. +1. If a non-hash attribute is given some level of CRUD access, that same access will be assigned to the table’s `hash_attribute` (also referred to as the `primary_key`), even if it is not explicitly defined in the permissions JSON. + + _See table\_name1’s permission set for an example of this – even though the table’s hash attribute is not specifically defined in the attribute\_permissions array, because the role has CRUD access to ‘attribute1’, the role will have the same access to the table’s hash attribute._ +1. If attribute-level permissions are set – _i.e. attribute\_permissions.length > 0_ – any table attribute not explicitly included will be assumed to have not CRUD access (with the exception of the `hash_attribute` described in #2). + + _See table\_name1’s permission set for an example of this – in this scenario, the role will have the ability to create, insert and update ‘attribute1’ and the table’s hash attribute but no other attributes on that table._ +1. If an `attribute_permissions` array is empty, the role’s access to a table’s attributes will be based on the table-level CRUD permissions. + + _See table\_name2’s permission set for an example of this._ +1. The `__createdtime__` and `__updatedtime__` attributes that Harper manages internally can have read perms set but, if set, all other attribute-level permissions will be ignored. +1. Please note that DELETE permissions are not included as a part of an individual attribute-level permission set. That is because it is not possible to delete individual attributes from a row, rows must be deleted in full. + * If a role needs the ability to delete rows from a table, that permission should be set on the table-level. + * The practical approach to deleting an individual attribute of a row would be to set that attribute to null via an update statement. + +## `Role-Based Operation Restrictions ` + +The table below includes all API operations available in Harper and indicates whether or not the operation is restricted to super\_user roles. + +_Keep in mind that non-super\_user roles will also be restricted within the operations they do have access to by the database-level CRUD permissions set for the roles._ + +| Databases and Tables | Restricted to Super\_Users | +|----------------------| :------------------------: | +| describe\_all | | +| describe\_database | | +| describe\_table | | +| create\_database | X | +| drop\_database | X | +| create\_table | X | +| drop\_table | X | +| create\_attribute | | +| drop\_attribute | X | + +| NoSQL Operations | Restricted to Super\_Users | +| ---------------------- | :------------------------: | +| insert | | +| update | | +| upsert | | +| delete | | +| search\_by\_hash | | +| search\_by\_value | | +| search\_by\_conditions | | + +| SQL Operations | Restricted to Super\_Users | +| -------------- | :------------------------: | +| select | | +| insert | | +| update | | +| delete | | + +| Bulk Operations | Restricted to Super\_Users | +| ---------------- | :------------------------: | +| csv\_data\_load | | +| csv\_file\_load | | +| csv\_url\_load | | +| import\_from\_s3 | | + +| Users and Roles | Restricted to Super\_Users | +| --------------- | :------------------------: | +| list\_roles | X | +| add\_role | X | +| alter\_role | X | +| drop\_role | X | +| list\_users | X | +| user\_info | | +| add\_user | X | +| alter\_user | X | +| drop\_user | X | + +| Clustering | Restricted to Super\_Users | +| ----------------------- | :------------------------: | +| cluster\_set\_routes | X | +| cluster\_get\_routes | X | +| cluster\_delete\_routes | X | +| add\_node | X | +| update\_node | X | +| cluster\_status | X | +| remove\_node | X | +| configure\_cluster | X | + +| Components | Restricted to Super\_Users | +| -------------------- | :------------------------: | +| get\_components | X | +| get\_component\_file | X | +| set\_component\_file | X | +| drop\_component | X | +| add\_component | X | +| package\_component | X | +| deploy\_component | X | + +| Custom Functions | Restricted to Super\_Users | +| ---------------------------------- | :------------------------: | +| custom\_functions\_status | X | +| get\_custom\_functions | X | +| get\_custom\_function | X | +| set\_custom\_function | X | +| drop\_custom\_function | X | +| add\_custom\_function\_project | X | +| drop\_custom\_function\_project | X | +| package\_custom\_function\_project | X | +| deploy\_custom\_function\_project | X | + +| Registration | Restricted to Super\_Users | +| ------------------ | :------------------------: | +| registration\_info | | +| get\_fingerprint | X | +| set\_license | X | + +| Jobs | Restricted to Super\_Users | +| ----------------------------- | :------------------------: | +| get\_job | | +| search\_jobs\_by\_start\_date | X | + +| Logs | Restricted to Super\_Users | +| --------------------------------- | :------------------------: | +| read\_log | X | +| read\_transaction\_log | X | +| delete\_transaction\_logs\_before | X | +| read\_audit\_log | X | +| delete\_audit\_logs\_before | X | + +| Utilities | Restricted to Super\_Users | +| ----------------------- | :------------------------: | +| delete\_records\_before | X | +| export\_local | X | +| export\_to\_s3 | X | +| system\_information | X | +| restart | X | +| restart\_service | X | +| get\_configuration | X | +| configure\_cluster | X | + +| Token Authentication | Restricted to Super\_Users | +| ------------------------------ | :------------------------: | +| create\_authentication\_tokens | | +| refresh\_operation\_token | | + +## Error: Must execute as User + +**You may have gotten an error like,** `Error: Must execute as <>`. + +This means that you installed Harper as `<>`. Because Harper stores files natively on the operating system, we only allow the Harper executable to be run by a single user. This prevents permissions issues on files. + +For example if you installed as user\_a, but later wanted to run as user\_b. User\_b may not have access to the hdb files Harper needs. This also keeps Harper more secure as it allows you to lock files down to a specific user and prevents other users from accessing your files. diff --git a/site/versioned_docs/version-4.4/developers/sql-guide/date-functions.md b/site/versioned_docs/version-4.4/developers/sql-guide/date-functions.md new file mode 100644 index 00000000..4ce2c203 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/sql-guide/date-functions.md @@ -0,0 +1,226 @@ +--- +title: SQL Date Functions +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL Date Functions + +Harper utilizes [Coordinated Universal Time (UTC)](https:/en.wikipedia.org/wiki/Coordinated_Universal_Time) in all internal SQL operations. This means that date values passed into any of the functions below will be assumed to be in UTC or in a format that can be translated to UTC. + +When parsing date values passed to SQL date functions in HDB, we first check for [ISO 8601](https:/en.wikipedia.org/wiki/ISO_8601) formats, then for [RFC 2822](https:/tools.ietf.org/html/rfc2822#section-3.3) date-time format and then fall back to new Date(date_string)if a known format is not found. + +### CURRENT_DATE() + +Returns the current date in UTC in `YYYY-MM-DD` String format. + +``` +"SELECT CURRENT_DATE() AS current_date_result" returns + { + "current_date_result": "2020-04-22" + } +``` + +### CURRENT_TIME() + +Returns the current time in UTC in `HH:mm:ss.SSS` String format. + +``` +"SELECT CURRENT_TIME() AS current_time_result" returns + { + "current_time_result": "15:18:14.639" + } +``` + +### CURRENT_TIMESTAMP + +Referencing this variable will evaluate as the current Unix Timestamp in milliseconds. + +``` +"SELECT CURRENT_TIMESTAMP AS current_timestamp_result" returns + { + "current_timestamp_result": 1587568845765 + } +``` +### DATE([date_string]) + +Formats and returns the date_string argument in UTC in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. + +If a date_string is not provided, the function will return the current UTC date/time value in the return format defined above. + +``` +"SELECT DATE(1587568845765) AS date_result" returns + { + "date_result": "2020-04-22T15:20:45.765+0000" + } +``` + +``` +"SELECT DATE(CURRENT_TIMESTAMP) AS date_result2" returns + { + "date_result2": "2020-04-22T15:20:45.765+0000" + } +``` + +### DATE_ADD(date, value, interval) + +Adds the defined amount of time to the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted interval values: Either string value (key or shorthand) can be passed as the interval argument. + + +| Key | Shorthand | +|--------------|-----------| +| years | y | +| quarters | Q | +| months | M | +| weeks | w | +| days | d | +| hours | h | +| minutes | m | +| seconds | s | +| milliseconds | ms | + + +``` +"SELECT DATE_ADD(1587568845765, 1, 'days') AS date_add_result" AND +"SELECT DATE_ADD(1587568845765, 1, 'd') AS date_add_result" both return + { + "date_add_result": 1587655245765 + } +``` + +``` +"SELECT DATE_ADD(CURRENT_TIMESTAMP, 2, 'years') +AS date_add_result2" returns + { + "date_add_result2": 1650643129017 + } +``` + +### DATE_DIFF(date_1, date_2[, interval]) + +Returns the difference between the two date values passed based on the interval as a Number. If an interval is not provided, the function will return the difference value in milliseconds. + +Accepted interval values: +* years +* months +* weeks +* days +* hours +* minutes +* seconds + +``` +"SELECT DATE_DIFF(CURRENT_TIMESTAMP, 1650643129017, 'hours') +AS date_diff_result" returns + { + "date_diff_result": -17519.753333333334 + } +``` + +### DATE_FORMAT(date, format) + +Formats and returns a date value in the String format provided. Find more details on accepted format values in the [moment.js docs](https:/momentjs.com/docs/#/displaying/format/). + +``` +"SELECT DATE_FORMAT(1524412627973, 'YYYY-MM-DD HH:mm:ss') +AS date_format_result" returns + { + "date_format_result": "2018-04-22 15:57:07" + } +``` + +### DATE_SUB(date, value, interval) + +Subtracts the defined amount of time from the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted date_sub interval values- Either string value (key or shorthand) can be passed as the interval argument. + +| Key | Shorthand | +|--------------|-----------| +| years | y | +| quarters | Q | +| months | M | +| weeks | w | +| days | d | +| hours | h | +| minutes | m | +| seconds | s | +| milliseconds | ms | + + +``` +"SELECT DATE_SUB(1587568845765, 2, 'years') AS date_sub_result" returns + { + "date_sub_result": 1524410445765 + } +``` + +### EXTRACT(date, date_part) + +Extracts and returns the date_part requested as a String value. Accepted date_part values below show value returned for date = “2020-03-26T15:13:02.041+000” + +| date_part | Example return value* | +|--------------|------------------------| +| year | “2020” | +| month | “3” | +| day | “26” | + | hour | “15” | +| minute | “13” | +| second | “2” | +| millisecond | “41” | + +``` +"SELECT EXTRACT(1587568845765, 'year') AS extract_result" returns + { + "extract_result": "2020" + } +``` + +### GETDATE() + +Returns the current Unix Timestamp in milliseconds. + +``` +"SELECT GETDATE() AS getdate_result" returns + { + "getdate_result": 1587568845765 + } +``` + +### GET_SERVER_TIME() +Returns the current date/time value based on the server’s timezone in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. + +``` +"SELECT GET_SERVER_TIME() AS get_server_time_result" returns + { + "get_server_time_result": "2020-04-22T15:20:45.765+0000" + } +``` + +### OFFSET_UTC(date, offset) +Returns the UTC date time value with the offset provided included in the return String value formatted as `YYYY-MM-DDTHH:mm:ss.SSSZZ`. The offset argument will be added as minutes unless the value is less than 16 and greater than -16, in which case it will be treated as hours. + +``` +"SELECT OFFSET_UTC(1587568845765, 240) AS offset_utc_result" returns + { + "offset_utc_result": "2020-04-22T19:20:45.765+0400" + } +``` + +``` +"SELECT OFFSET_UTC(1587568845765, 10) AS offset_utc_result2" returns + { + "offset_utc_result2": "2020-04-23T01:20:45.765+1000" + } +``` + +### NOW() +Returns the current Unix Timestamp in milliseconds. + +``` +"SELECT NOW() AS now_result" returns + { + "now_result": 1587568845765 + } +``` + diff --git a/site/versioned_docs/version-4.4/developers/sql-guide/features-matrix.md b/site/versioned_docs/version-4.4/developers/sql-guide/features-matrix.md new file mode 100644 index 00000000..f4225cf9 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/sql-guide/features-matrix.md @@ -0,0 +1,88 @@ +--- +title: SQL Features Matrix +--- + +# SQL Features Matrix + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +## SQL Features Matrix + +Harper provides access to most SQL functions, and we’re always expanding that list. Check below to see if we cover what you need. + +| INSERT | | +| ---------------------------------- | - | +| Values - multiple values supported | ✔ | +| Sub-SELECT | ✗ | + +| UPDATE | | +| ---------------- | - | +| SET | ✔ | +| Sub-SELECT | ✗ | +| Conditions | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | + +| DELETE | | +| ---------- | - | +| FROM | ✔ | +| Sub-SELECT | ✗ | +| Conditions | ✔ | + +| SELECT | | +| -------------------- | - | +| Column SELECT | ✔ | +| Aliases | ✔ | +| Aggregator Functions | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | +| Constant Values | ✔ | +| Distinct | ✔ | +| Sub-SELECT | ✗ | + +| FROM | | +| ---------------- | - | +| Multi-table JOIN | ✔ | +| INNER JOIN | ✔ | +| LEFT OUTER JOIN | ✔ | +| LEFT INNER JOIN | ✔ | +| RIGHT OUTER JOIN | ✔ | +| RIGHT INNER JOIN | ✔ | +| FULL JOIN | ✔ | +| UNION | ✗ | +| Sub-SELECT | ✗ | +| TOP | ✔ | + +| WHERE | | +| -------------------------- | - | +| Multi-Conditions | ✔ | +| Wildcards | ✔ | +| IN | ✔ | +| LIKE | ✔ | +| Bit-wise Operators AND, OR | ✔ | +| Bit-wise Operators NOT | ✔ | +| NULL | ✔ | +| BETWEEN | ✔ | +| EXISTS,ANY,ALL | ✔ | +| Compare columns | ✔ | +| Compare constants | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | +| Sub-SELECT | ✗ | + +| GROUP BY | | +| --------------------- | - | +| Multi-Column GROUP BY | ✔ | + +| HAVING | | +| ----------------------------- | - | +| Aggregate function conditions | ✔ | + +| ORDER BY | | +| --------------------- | - | +| Multi-Column ORDER BY | ✔ | +| Aliases | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | diff --git a/site/versioned_docs/version-4.4/developers/sql-guide/functions.md b/site/versioned_docs/version-4.4/developers/sql-guide/functions.md new file mode 100644 index 00000000..eeebd8b4 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/sql-guide/functions.md @@ -0,0 +1,157 @@ +--- +title: Harper SQL Functions +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# Harper SQL Functions + +This SQL keywords reference contains the SQL functions available in Harper. + +## Functions +### Aggregate + +| Keyword | Syntax | Description | +|-----------------|---------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------| +| AVG | AVG(_expression_) | Returns the average of a given numeric expression. | +| COUNT | SELECT COUNT(_column_name_) FROM _database.table_ WHERE _condition_ | Returns the number records that match the given criteria. Nulls are not counted. | +| GROUP_CONCAT | GROUP_CONCAT(_expression_) | Returns a string with concatenated values that are comma separated and that are non-null from a group. Will return null when there are non-null values. | +| MAX | SELECT MAX(_column_name_) FROM _database.table_ WHERE _condition_ | Returns largest value in a specified column. | +| MIN | SELECT MIN(_column_name_) FROM _database.table_ WHERE _condition_ | Returns smallest value in a specified column. | +| SUM | SUM(_column_name_) | Returns the sum of the numeric values provided. | +| ARRAY* | ARRAY(_expression_) | Returns a list of data as a field. | +| DISTINCT_ARRAY* | DISTINCT_ARRAY(_expression_) | When placed around a standard ARRAY() function, returns a distinct (deduplicated) results set. | + +*For more information on ARRAY() and DISTINCT_ARRAY() see [this blog](https:/www.harperdb.io/post/sql-queries-to-complex-objects). + +### Conversion + +| Keyword | Syntax | Description | +|---------|--------------------------------------------------|------------------------------------------------------------------------| +| CAST | CAST(_expression AS datatype(length)_) | Converts a value to a specified datatype. | +| CONVERT | CONVERT(_data_type(length), expression, style_) | Converts a value from one datatype to a different, specified datatype. | + + +### Date & Time + +| Keyword | Syntax | Description | +|-------------------|-----------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CURRENT_DATE | CURRENT_DATE() | Returns the current date in UTC in “YYYY-MM-DD” String format. | +| CURRENT_TIME | CURRENT_TIME() | Returns the current time in UTC in “HH:mm:ss.SSS” string format. | +| CURRENT_TIMESTAMP | CURRENT_TIMESTAMP | Referencing this variable will evaluate as the current Unix Timestamp in milliseconds. For more information, go here. | +| +| DATE | DATE([_date_string_]) | Formats and returns the date_string argument in UTC in ‘YYYY-MM-DDTHH:mm:ss.SSSZZ’ string format. If a date_string is not provided, the function will return the current UTC date/time value in the return format defined above. For more information, go here. | +| +| DATE_ADD | DATE_ADD(_date, value, interval_) | Adds the defined amount of time to the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted interval values: Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | +| +| DATE_DIFF | DATEDIFF(_date_1, date_2[, interval]_) | Returns the difference between the two date values passed based on the interval as a Number. If an interval is not provided, the function will return the difference value in milliseconds. For more information, go here. | +| +| DATE_FORMAT | DATE_FORMAT(_date, format_) | Formats and returns a date value in the String format provided. Find more details on accepted format values in the moment.js docs. For more information, go here. | +| +| DATE_SUB | DATE_SUB(_date, format_) | Subtracts the defined amount of time from the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted date_sub interval values- Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | +| +| DAY | DAY(_date_) | Return the day of the month for the given date. | +| +| DAYOFWEEK | DAYOFWEEK(_date_) | Returns the numeric value of the weekday of the date given(“YYYY-MM-DD”).NOTE: 0=Sunday, 1=Monday, 2=Tuesday, 3=Wednesday, 4=Thursday, 5=Friday, and 6=Saturday. | +| EXTRACT | EXTRACT(_date, date_part_) | Extracts and returns the date_part requested as a String value. Accepted date_part values below show value returned for date = “2020-03-26T15:13:02.041+000” For more information, go here. | +| +| GETDATE | GETDATE() | Returns the current Unix Timestamp in milliseconds. | +| GET_SERVER_TIME | GET_SERVER_TIME() | Returns the current date/time value based on the server’s timezone in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. | +| OFFSET_UTC | OFFSET_UTC(_date, offset_) | Returns the UTC date time value with the offset provided included in the return String value formatted as `YYYY-MM-DDTHH:mm:ss.SSSZZ`. The offset argument will be added as minutes unless the value is less than 16 and greater than -16, in which case it will be treated as hours. | +| NOW | NOW() | Returns the current Unix Timestamp in milliseconds. | +| +| HOUR | HOUR(_datetime_) | Returns the hour part of a given date in range of 0 to 838. | +| +| MINUTE | MINUTE(_datetime_) | Returns the minute part of a time/datetime in range of 0 to 59. | +| +| MONTH | MONTH(_date_) | Returns month part for a specified date in range of 1 to 12. | +| +| SECOND | SECOND(_datetime_) | Returns the seconds part of a time/datetime in range of 0 to 59. | +| YEAR | YEAR(_date_) | Returns the year part for a specified date. | +| + +### Logical + +| Keyword | Syntax | Description | +|---------|--------------------------------------------------|--------------------------------------------------------------------------------------------| +| IF | IF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | +| IIF | IIF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | +| IFNULL | IFNULL(_expression, alt_value_) | Returns a specified value if the expression is null. | +| NULLIF | NULLIF(_expression_1, expression_2_) | Returns null if expression_1 is equal to expression_2, if not equal, returns expression_1. | + +### Mathematical + +| Keyword | Syntax | Description | +|---------|---------------------------------|-----------------------------------------------------------------------------------------------------| +| ABS | ABS(_expression_) | Returns the absolute value of a given numeric expression. | +| CEIL | CEIL(_number_) | Returns integer ceiling, the smallest integer value that is bigger than or equal to a given number. | +| EXP | EXP(_number_) | Returns e to the power of a specified number. | +| FLOOR | FLOOR(_number_) | Returns the largest integer value that is smaller than, or equal to, a given number. | +| RANDOM | RANDOM(_seed_) | Returns a pseudo random number. | +| ROUND | ROUND(_number,decimal_places_) | Rounds a given number to a specified number of decimal places. | +| SQRT | SQRT(_expression_) | Returns the square root of an expression. | + + +### String + +| Keyword | Syntax | Description | +|-------------|------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CONCAT | CONCAT(_string_1, string_2, ...., string_n_) | Concatenates, or joins, two or more strings together, resulting in a single string. | +| CONCAT_WS | CONCAT_WS(_separator, string_1, string_2, ...., string_n_) | Concatenates, or joins, two or more strings together with a separator, resulting in a single string. | +| INSTR | INSTR(_string_1, string_2_) | Returns the first position, as an integer, of string_2 within string_1. | +| LEN | LEN(_string_) | Returns the length of a string. | +| LOWER | LOWER(_string_) | Converts a string to lower-case. | +| REGEXP | SELECT _column_name_ FROM _database.table_ WHERE _column_name_ REGEXP _pattern_ | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | +| REGEXP_LIKE | SELECT _column_name_ FROM _database.table_ WHERE REGEXP_LIKE(_column_name, pattern_) | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | +| REPLACE | REPLACE(_string, old_string, new_string_) | Replaces all instances of old_string within new_string, with string. | +| SUBSTRING | SUBSTRING(_string, string_position, length_of_substring_) | Extracts a specified amount of characters from a string. | +| TRIM | TRIM([_character(s) FROM_] _string_) | Removes leading and trailing spaces, or specified character(s), from a string. | +| UPPER | UPPER(_string_) | Converts a string to upper-case. | + +## Operators +### Logical Operators + +| Keyword | Syntax | Description | +|----------|--------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------| +| BETWEEN | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ BETWEEN _value_1_ AND _value_2_ | (inclusive) Returns values(numbers, text, or dates) within a given range. | +| IN | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IN(_value(s)_) | Used to specify multiple values in a WHERE clause. | +| LIKE | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_n_ LIKE _pattern_ | Searches for a specified pattern within a WHERE clause. | + +## Queries +### General + +| Keyword | Syntax | Description | +|-----------|--------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------| +| DISTINCT | SELECT DISTINCT _column_name(s)_ FROM _database.table_ | Returns only unique values, eliminating duplicate records. | +| FROM | FROM _database.table_ | Used to list the database(s), table(s), and any joins required for a SQL statement. | +| GROUP BY | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ GROUP BY _column_name(s)_ ORDER BY _column_name(s)_ | Groups rows that have the same values into summary rows. | +| HAVING | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ GROUP BY _column_name(s)_ HAVING _condition_ ORDER BY _column_name(s)_ | Filters data based on a group or aggregate function. | +| SELECT | SELECT _column_name(s)_ FROM _database.table_ | Selects data from table. | +| WHERE | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ | Extracts records based on a defined condition. | + +### Joins + +| Keyword | Syntax | Description | +|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CROSS JOIN | SELECT _column_name(s)_ FROM _database.table_1_ CROSS JOIN _database.table_2_ | Returns a paired combination of each row from _table_1_ with row from _table_2_. _Note: CROSS JOIN can return very large result sets and is generally considered bad practice._ | +| FULL OUTER | SELECT _column_name(s)_ FROM _database.table_1_ FULL OUTER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ WHERE _condition_ | Returns all records when there is a match in either _table_1_ (left table) or _table_2_ (right table). | +| [INNER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ INNER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return only matching records from _table_1_ (left table) and _table_2_ (right table). The INNER keyword is optional and does not affect the result. | +| LEFT [OUTER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ LEFT OUTER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return all records from _table_1_ (left table) and matching data from _table_2_ (right table). The OUTER keyword is optional and does not affect the result. | +| RIGHT [OUTER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ RIGHT OUTER JOIN _database.table_2_ ON _table_1.column_name = table_2.column_name_ | Return all records from _table_2_ (right table) and matching data from _table_1_ (left table). The OUTER keyword is optional and does not affect the result. | + +### Predicates + +| Keyword | Syntax | Description | +|--------------|------------------------------------------------------------------------------|----------------------------| +| IS NOT NULL | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IS NOT NULL | Tests for non-null values. | +| IS NULL | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IS NULL | Tests for null values. | + +### Statements + +| Keyword | Syntax | Description | +|---------|---------------------------------------------------------------------------------------------|-------------------------------------| +| DELETE | DELETE FROM _database.table_ WHERE condition | Deletes existing data from a table. | +| INSERT | INSERT INTO _database.table(column_name(s))_ VALUES(_value(s)_) | Inserts new records into a table. | +| UPDATE | UPDATE _database.table_ SET _column_1 = value_1, column_2 = value_2, ....,_ WHERE _condition_ | Alters existing records in a table. | diff --git a/site/versioned_docs/version-4.4/developers/sql-guide/index.md b/site/versioned_docs/version-4.4/developers/sql-guide/index.md new file mode 100644 index 00000000..941be5d0 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/sql-guide/index.md @@ -0,0 +1,88 @@ +--- +title: SQL Guide +--- + +# SQL Guide + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +## Harper SQL Guide + +The purpose of this guide is to describe the available functionality of Harper as it relates to supported SQL functionality. The SQL parser is still actively being developed, many SQL features may not be optimized or utilize indexes. This document will be updated as more features and functionality becomes available. Generally, the REST interface provides a more stable, secure, and performant interface for data interaction, but the SQL functionality can be useful for administrative ad-hoc querying, and utilizing existing SQL statements. **A high-level view of supported features can be found** [**here**](./features-matrix)**.** + +Harper adheres to the concept of database & tables. This allows developers to isolate table structures from each other all within one database. + +## Select + +Harper has robust SELECT support, from simple queries all the way to complex joins with multi-conditions, aggregates, grouping & ordering. + +All results are returned as JSON object arrays. + +Query for all records and attributes in the dev.dog table: + +``` +SELECT * FROM dev.dog +``` + +Query specific columns from all rows in the dev.dog table: + +``` +SELECT id, dog_name, age FROM dev.dog +``` + +Query for all records and attributes in the dev.dog table ORDERED BY age in ASC order: + +``` +SELECT * FROM dev.dog ORDER BY age +``` + +_The ORDER BY keyword sorts in ascending order by default. To sort in descending order, use the DESC keyword._ + +## Insert + +Harper supports inserting 1 to n records into a table. The primary key must be unique (not used by any other record). If no primary key is provided, it will be assigned an auto-generated UUID. Harper does not support selecting from one table to insert into another at this time. + +``` +INSERT INTO dev.dog (id, dog_name, age, breed_id) + VALUES(1, 'Penny', 5, 347), (2, 'Kato', 4, 347) +``` + +## Update + +Harper supports updating existing table row(s) via UPDATE statements. Multiple conditions can be applied to filter the row(s) to update. At this time selecting from one table to update another is not supported. + +``` +UPDATE dev.dog + SET owner_name = 'Kyle' + WHERE id IN (1, 2) +``` + +## Delete + +Harper supports deleting records from a table with condition support. + +``` +DELETE FROM dev.dog + WHERE age < 4 +``` + +## Joins + +Harper allows developers to join any number of tables and currently supports the following join types: + +* INNER JOIN LEFT +* INNER JOIN LEFT +* OUTER JOIN + +Here’s a basic example joining two tables from our Get Started example- joining a dogs table with a breeds table: + +``` +SELECT d.id, d.dog_name, d.owner_name, b.name, b.section + FROM dev.dog AS d + INNER JOIN dev.breed AS b ON d.breed_id = b.id + WHERE d.owner_name IN ('Kyle', 'Zach', 'Stephen') + AND b.section = 'Mutt' + ORDER BY d.dog_name +``` diff --git a/site/versioned_docs/version-4.4/developers/sql-guide/json-search.md b/site/versioned_docs/version-4.4/developers/sql-guide/json-search.md new file mode 100644 index 00000000..0727b07f --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/sql-guide/json-search.md @@ -0,0 +1,177 @@ +--- +title: SQL JSON Search +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL JSON Search + +Harper automatically indexes all top level attributes in a row / object written to a table. However, any attributes which hold JSON data do not have their nested attributes indexed. In order to make searching and/or transforming these JSON documents easy, Harper offers a special SQL function called SEARCH\_JSON. The SEARCH\_JSON function works in SELECT & WHERE clauses allowing queries to perform powerful filtering on any element of your JSON by implementing the [JSONata library](http:/docs.jsonata.org/overview.html) into our SQL engine. + +## Syntax + +SEARCH\_JSON(_expression, attribute_) + +Executes the supplied string _expression_ against data of the defined top level _attribute_ for each row. The expression both filters and defines output from the JSON document. + +### Example 1 + +#### Search a string array + +Here are two records in the database: + +```json +[ + { + "id": 1, + "name": ["Harper", "Penny"] + }, + { + "id": 2, + "name": ["Penny"] + } +] +``` + +Here is a simple query that gets any record with "Harper" found in the name. + +``` +SELECT * +FROM dev.dog +WHERE search_json('"Harper" in *', name) +``` + +### Example 2 + +The purpose of this query is to give us every movie where at least two of our favorite actors from Marvel films have acted together. The results will return the movie title, the overview, release date and an object array of the actor’s name and their character name in the movie. + +Both function calls evaluate the credits.cast attribute, this attribute is an object array of every cast member in a movie. + +``` +SELECT m.title, + m.overview, + m.release_date, + SEARCH_JSON($[name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"]].{"actor": name, "character": character}, c.`cast`) AS characters +FROM movies.credits c + INNER JOIN movies.movie m + ON c.movie_id = m.id +WHERE SEARCH_JSON($count($[name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"]]), c.`cast`) >= 2 +``` + +A sample of this data from the movie The Avengers looks like + +```json +[ + { + "cast_id": 46, + "character": "Tony Stark / Iron Man", + "credit_id": "52fe4495c3a368484e02b251", + "gender": "male", + "id": 3223, + "name": "Robert Downey Jr.", + "order": 0 + }, + { + "cast_id": 2, + "character": "Steve Rogers / Captain America", + "credit_id": "52fe4495c3a368484e02b19b", + "gender": "male", + "id": 16828, + "name": "Chris Evans", + "order": 1 + }, + { + "cast_id": 307, + "character": "Bruce Banner / The Hulk", + "credit_id": "5e85e8083344c60015411cfa", + "gender": "male", + "id": 103, + "name": "Mark Ruffalo", + "order": 2 + } +] +``` + +Let’s break down the SEARCH\_JSON function call in the SELECT: + +``` +SEARCH_JSON( + $[name in [ + "Robert Downey Jr.", + "Chris Evans", + "Scarlett Johansson", + "Mark Ruffalo", + "Chris Hemsworth", + "Jeremy Renner", + "Clark Gregg", + "Samuel L. Jackson", + "Gwyneth Paltrow", + "Don Cheadle" + ]].{ + "actor": name, + "character": character + }, + c.`cast` +) +``` + +The first argument passed to SEARCH\_JSON is the expression to execute against the second argument which is the cast attribute on the credits table. This expression will execute for every row. Looking into the expression it starts with “$\[…]” this tells the expression to iterate all elements of the cast array. + +Then the expression tells the function to only return entries where the name attribute matches any of the actors defined in the array: + +``` +name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"] +``` + +So far, we’ve iterated the array and filtered out rows, but we also want the results formatted in a specific way, so we’ve chained an expression on our filter with: `{“actor”: name, “character”: character}`. This tells the function to create a specific object for each matching entry. + +**Sample Result** + +```json +[ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + } +] +``` + +Just having the SEARCH\_JSON function in our SELECT is powerful, but given our criteria it would still return every other movie that doesn’t have our matching actors, in order to filter out the movies we do not want we also use SEARCH\_JSON in the WHERE clause. + +This function call in the WHERE clause is similar, but we don’t need to perform the same transformation as occurred in the SELECT: + +``` +SEARCH_JSON( + $count( + $[name in [ + "Robert Downey Jr.", + "Chris Evans", + "Scarlett Johansson", + "Mark Ruffalo", + "Chris Hemsworth", + "Jeremy Renner", + "Clark Gregg", + "Samuel L. Jackson", + "Gwyneth Paltrow", + "Don Cheadle" + ]] + ), + c.`cast` +) >= 2 +``` + +As seen above we execute the same name filter against the cast array, the primary difference is we are wrapping the filtered results in $count(…). As it looks this returns a count of the results back which we then use against our SQL comparator of >= 2. + +To see further SEARCH\_JSON examples in action view our Postman Collection that provides a [sample database & data with query examples](../operations-api/advanced-json-sql-examples). + +To learn more about how to build expressions check out the JSONata documentation: [http:/docs.jsonata.org/overview](http:/docs.jsonata.org/overview) diff --git a/site/versioned_docs/version-4.4/developers/sql-guide/reserved-word.md b/site/versioned_docs/version-4.4/developers/sql-guide/reserved-word.md new file mode 100644 index 00000000..8ce9f025 --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/sql-guide/reserved-word.md @@ -0,0 +1,207 @@ +--- +title: Harper SQL Reserved Words +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# Harper SQL Reserved Words + +This is a list of reserved words in the SQL Parser. Use of these words or symbols may result in unexpected behavior or inaccessible tables/attributes. If any of these words must be used, any SQL call referencing a database, table, or attribute must have backticks (`…`) or brackets ([…]) around the variable. + +For Example, for a table called `ASSERT` in the `data` database, a SQL select on that table would look like: + +``` +SELECT * from data.`ASSERT` +``` + +Alternatively: + +``` +SELECT * from data.[ASSERT] +``` + +### RESERVED WORD LIST + +* ABSOLUTE +* ACTION +* ADD +* AGGR +* ALL +* ALTER +* AND +* ANTI +* ANY +* APPLY +* ARRAY +* AS +* ASSERT +* ASC +* ATTACH +* AUTOINCREMENT +* AUTO_INCREMENT +* AVG +* BEGIN +* BETWEEN +* BREAK +* BY +* CALL +* CASE +* CAST +* CHECK +* CLASS +* CLOSE +* COLLATE +* COLUMN +* COLUMNS +* COMMIT +* CONSTRAINT +* CONTENT +* CONTINUE +* CONVERT +* CORRESPONDING +* COUNT +* CREATE +* CROSS +* CUBE +* CURRENT_TIMESTAMP +* CURSOR +* DATABASE +* DECLARE +* DEFAULT +* DELETE +* DELETED +* DESC +* DETACH +* DISTINCT +* DOUBLEPRECISION +* DROP +* ECHO +* EDGE +* END +* ENUM +* ELSE +* EXCEPT +* EXISTS +* EXPLAIN +* FALSE +* FETCH +* FIRST +* FOREIGN +* FROM +* GO +* GRAPH +* GROUP +* GROUPING +* HAVING +* HDB_HASH +* HELP +* IF +* IDENTITY +* IS +* IN +* INDEX +* INNER +* INSERT +* INSERTED +* INTERSECT +* INTO +* JOIN +* KEY +* LAST +* LET +* LEFT +* LIKE +* LIMIT +* LOOP +* MATCHED +* MATRIX +* MAX +* MERGE +* MIN +* MINUS +* MODIFY +* NATURAL +* NEXT +* NEW +* NOCASE +* NO +* NOT +* NULL +* OFF +* ON +* ONLY +* OFFSET +* OPEN +* OPTION +* OR +* ORDER +* OUTER +* OVER +* PATH +* PARTITION +* PERCENT +* PLAN +* PRIMARY +* PRINT +* PRIOR +* QUERY +* READ +* RECORDSET +* REDUCE +* REFERENCES +* RELATIVE +* REPLACE +* REMOVE +* RENAME +* REQUIRE +* RESTORE +* RETURN +* RETURNS +* RIGHT +* ROLLBACK +* ROLLUP +* ROW +* SCHEMA +* SCHEMAS +* SEARCH +* SELECT +* SEMI +* SET +* SETS +* SHOW +* SOME +* SOURCE +* STRATEGY +* STORE +* SYSTEM +* SUM +* TABLE +* TABLES +* TARGET +* TEMP +* TEMPORARY +* TEXTSTRING +* THEN +* TIMEOUT +* TO +* TOP +* TRAN +* TRANSACTION +* TRIGGER +* TRUE +* TRUNCATE +* UNION +* UNIQUE +* UPDATE +* USE +* USING +* VALUE +* VERTEX +* VIEW +* WHEN +* WHERE +* WHILE +* WITH +* WORK diff --git a/site/versioned_docs/version-4.4/developers/sql-guide/sql-geospatial-functions.md b/site/versioned_docs/version-4.4/developers/sql-guide/sql-geospatial-functions.md new file mode 100644 index 00000000..17ea789a --- /dev/null +++ b/site/versioned_docs/version-4.4/developers/sql-guide/sql-geospatial-functions.md @@ -0,0 +1,384 @@ +--- +title: SQL Geospatial Functions +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL Geospatial Functions + +Harper geospatial features require data to be stored in a single column using the [GeoJSON standard](http:/geojson.org/), a standard commonly used in geospatial technologies. Geospatial functions are available to be used in SQL statements. + + + +If you are new to GeoJSON you should check out the full specification here: http:/geojson.org/. There are a few important things to point out before getting started. + + + +1) All GeoJSON coordinates are stored in `[longitude, latitude]` format. +2) Coordinates or GeoJSON geometries must be passed as string when written directly in a SQL statement. +3) Note if you are using Postman for you testing. Due to limitations in the Postman client, you will need to escape quotes in your strings and your SQL will need to be passed on a single line. + + +In the examples contained in the left-hand navigation, database and table names may change, but all GeoJSON data will be stored in a column named geo_data. + +# geoArea + +The geoArea() function returns the area of one or more features in square meters. + +### Syntax +geoArea(_geoJSON_) + +### Parameters +| Parameter | Description | +|-----------|---------------------------------| +| geoJSON | Required. One or more features. | + +#### Example 1 +Calculate the area, in square meters, of a manually passed GeoJSON polygon. + +``` +SELECT geoArea('{ + "type":"Feature", + "geometry":{ + "type":"Polygon", + "coordinates":[[ + [0,0], + [0.123456,0], + [0.123456,0.123456], + [0,0.123456] + ]] + } +}') +``` + +#### Example 2 +Find all records that have an area less than 1 square mile (or 2589988 square meters). + +``` +SELECT * FROM dev.locations +WHERE geoArea(geo_data) < 2589988 +``` + +# geoLength +Takes a GeoJSON and measures its length in the specified units (default is kilometers). + +## Syntax +geoLength(_geoJSON_[_, units_]) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------| +| geoJSON | Required. GeoJSON to measure. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 +Calculate the length, in kilometers, of a manually passed GeoJSON linestring. + +``` +SELECT geoLength('{ + "type": "Feature", + "geometry": { + "type": "LineString", + "coordinates": [ + [-104.97963309288025,39.76163265441438], + [-104.9823260307312,39.76365323407955], + [-104.99193906784058,39.75616442110704] + ] + } +}') +``` + +### Example 2 +Find all data plus the calculated length in miles of the GeoJSON, restrict the response to only lengths less than 5 miles, and return the data in order of lengths smallest to largest. + +``` +SELECT *, geoLength(geo_data, 'miles') as length +FROM dev.locations +WHERE geoLength(geo_data, 'miles') < 5 +ORDER BY length ASC +``` +# geoDifference +Returns a new polygon with the difference of the second polygon clipped from the first polygon. + +## Syntax +geoDifference(_polygon1, polygon2_) + +## Parameters +| Parameter | Description | +|------------|----------------------------------------------------------------------------| +| polygon1 | Required. Polygon or MultiPolygon GeoJSON feature. | +| polygon2 | Required. Polygon or MultiPolygon GeoJSON feature to remove from polygon1. | + +### Example +Return a GeoJSON Polygon that removes City Park (_polygon2_) from Colorado (_polygon1_). + +``` +SELECT geoDifference('{ + "type": "Feature", + "properties": { + "name":"Colorado" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-109.072265625,37.00255267215955], + [-102.01904296874999,37.00255267215955], + [-102.01904296874999,41.0130657870063], + [-109.072265625,41.0130657870063], + [-109.072265625,37.00255267215955] + ]] + } + }', + '{ + "type": "Feature", + "properties": { + "name":"City Park" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.95973110198975,39.7543828214657], + [-104.95955944061278,39.744781185675386], + [-104.95904445648193,39.74422022399989], + [-104.95835781097412,39.74402223643582], + [-104.94097709655762,39.74392324244047], + [-104.9408483505249,39.75434982844515], + [-104.95973110198975,39.7543828214657] + ]] + } + }' +) +``` + +# geoDistance +Calculates the distance between two points in units (default is kilometers). + +## Syntax +geoDistance(_point1, point2_[_, units_]) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------| +| point1 | Required. GeoJSON Point specifying the origin. | +| point2 | Required. GeoJSON Point specifying the destination. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 +Calculate the distance, in miles, between Harper’s headquarters and the Washington Monument. + +``` +SELECT geoDistance('[-104.979127,39.761563]', '[-77.035248,38.889475]', 'miles') +``` + +### Example 2 +Find all locations that are within 40 kilometers of a given point, return that distance in miles, and sort by distance in an ascending order. + +``` +SELECT *, geoDistance('[-104.979127,39.761563]', geo_data, 'miles') as distance +FROM dev.locations +WHERE geoDistance('[-104.979127,39.761563]', geo_data, 'kilometers') < 40 +ORDER BY distance ASC +``` + +# geoNear +Determines if point1 and point2 are within a specified distance from each other, default units are kilometers. Returns a Boolean. + +## Syntax +geoNear(_point1, point2, distance_[_, units_]) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------| +| point1 | Required. GeoJSON Point specifying the origin. | +| point2 | Required. GeoJSON Point specifying the destination. | +| distance | Required. The maximum distance in units as an integer or decimal. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 +Return all locations within 50 miles of a given point. + +``` +SELECT * +FROM dev.locations +WHERE geoNear('[-104.979127,39.761563]', geo_data, 50, 'miles') +``` + +### Example 2 +Return all locations within 2 degrees of the earth of a given point. (Each degree lat/long is about 69 miles [111 kilometers]). Return all data and the distance in miles, sorted by ascending distance. + +``` +SELECT *, geoDistance('[-104.979127,39.761563]', geo_data, 'miles') as distance +FROM dev.locations +WHERE geoNear('[-104.979127,39.761563]', geo_data, 2, 'degrees') +ORDER BY distance ASC +``` + +# geoContains +Determines if geo2 is completely contained by geo1. Returns a Boolean. + +## Syntax +geoContains(_geo1, geo2_) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------| +| geo1 | Required. Polygon or MultiPolygon GeoJSON feature. | +| geo2 | Required. Polygon or MultiPolygon GeoJSON feature tested to be contained by geo1. | + +### Example 1 +Return all locations within the state of Colorado (passed as a GeoJSON string). + +``` +SELECT * +FROM dev.locations +WHERE geoContains('{ + "type": "Feature", + "properties": { + "name":"Colorado" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-109.072265625,37.00255267], + [-102.01904296874999,37.00255267], + [-102.01904296874999,41.01306579], + [-109.072265625,41.01306579], + [-109.072265625,37.00255267] + ]] + } +}', geo_data) +``` + +### Example 2 +Return all locations which contain Harper Headquarters. + +``` +SELECT * +FROM dev.locations +WHERE geoContains(geo_data, '{ + "type": "Feature", + "properties": { + "name": "Harper Headquarters" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.98060941696167,39.760704817357905], + [-104.98053967952728,39.76065120861263], + [-104.98055577278137,39.760642961109674], + [-104.98037070035934,39.76049450588716], + [-104.9802714586258,39.76056254790385], + [-104.9805235862732,39.76076461167841], + [-104.98060941696167,39.760704817357905] + ]] + } +}') +``` + +# geoEqual +Determines if two GeoJSON features are the same type and have identical X,Y coordinate values. For more information see https:/developers.arcgis.com/documentation/spatial-references/. Returns a Boolean. + +## Syntax +geoEqual(_geo1_, _geo2_) + +## Parameters +| Parameter | Description | +|------------|----------------------------------------| +| geo1 | Required. GeoJSON geometry or feature. | +| geo2 | Required. GeoJSON geometry or feature. | + +### Example +Find Harper Headquarters within all locations within the database. + +``` +SELECT * +FROM dev.locations +WHERE geoEqual(geo_data, '{ + "type": "Feature", + "properties": { + "name": "Harper Headquarters" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.98060941696167,39.760704817357905], + [-104.98053967952728,39.76065120861263], + [-104.98055577278137,39.760642961109674], + [-104.98037070035934,39.76049450588716], + [-104.9802714586258,39.76056254790385], + [-104.9805235862732,39.76076461167841], + [-104.98060941696167,39.760704817357905] + ]] + } +}') +``` + +# geoCrosses +Determines if the geometries cross over each other. Returns boolean. + +## Syntax +geoCrosses(_geo1, geo2_) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------| +| geo1 | Required. GeoJSON geometry or feature. | +| geo2 | Required. GeoJSON geometry or feature. | + +### Example +Find all locations that cross over a highway. + +``` +SELECT * +FROM dev.locations +WHERE geoCrosses( + geo_data, + '{ + "type": "Feature", + "properties": { + "name": "Highway I-25" + }, + "geometry": { + "type": "LineString", + "coordinates": [ + [-104.9139404296875,41.00477542222947], + [-105.0238037109375,39.715638134796336], + [-104.853515625,39.53370327008705], + [-104.853515625,38.81403111409755], + [-104.61181640625,38.39764411353178], + [-104.8974609375,37.68382032669382], + [-104.501953125,37.00255267215955] + ] + } + }' +) +``` + +# geoConvert + +Converts a series of coordinates into a GeoJSON of the specified type. + +## Syntax +geoConvert(_coordinates, geo_type_[, _properties_]) + +## Parameters +| Parameter | Description | +|--------------|------------------------------------------------------------------------------------------------------------------------------------| +| coordinates | Required. One or more coordinates | +| geo_type | Required. GeoJSON geometry type. Options are ‘point’, ‘lineString’, ‘multiLineString’, ‘multiPoint’, ‘multiPolygon’, and ‘polygon’ | +| properties | Optional. Escaped JSON array with properties to be added to the GeoJSON output. | + +### Example +Convert a given coordinate into a GeoJSON point with specified properties. + +``` +SELECT geoConvert( + '[-104.979127,39.761563]', + 'point', + '{ + "name": "Harper Headquarters" + }' +) +``` diff --git a/site/versioned_docs/version-4.4/getting-started.md b/site/versioned_docs/version-4.4/getting-started.md new file mode 100644 index 00000000..77daf75b --- /dev/null +++ b/site/versioned_docs/version-4.4/getting-started.md @@ -0,0 +1,84 @@ +--- +title: Getting Started +--- + +# Getting Started + +Harper is designed for quick and simple setup and deployment, with smart defaults that lead to fast, scalable, and globally distributed database applications. + +You can easily create a Harper database in the cloud through our studio or install it locally. The quickest way to get Harper up and running is with [Harper Cloud](./deployments/harper-cloud/), our database-as-a-service offering. However, Harper is a [database application platform](./developers/applications/), and to leverage Harper’s full application development capabilities of defining schemas, endpoints, messaging, and gateway capabilities, you may wish to install and run Harper locally so that you can use your standard local IDE tools, debugging, and version control. + +### Installing a Harper Instance + +You can simply install Harper with npm (or yarn, or other package managers): + +```shell +npm install -g harperdb +``` + +Here we installed Harper globally (and we recommend this) to make it easy to run a single Harper instance with multiple projects, but you can install it locally (not globally) as well. + +You can run Harper by running: + +```javascript +harperdb +``` + +You can now use Harper as a standalone database. You can also create a cloud instance (see below), which is also an easy way to get started. + +#### Developing Database Applications with Harper + +Harper is more than just a database, with Harper you build "database applications" which package your schema, endpoints, and application logic together. You can then deploy your application to an entire cluster of Harper instances, ready to scale to on-the-edge delivery of data and application endpoints directly to your users. To get started with Harper, take a look at our application development guide, with quick and easy examples: + +[Database application development guide](./developers/applications/) + +### Setting up a Cloud Instance + +To set up a Harper cloud instance, simply sign up and create a new instance: + +1. [Sign up for the Harper Studio](https:/studio.harperdb.io/sign-up) +1. Create a new Harper Cloud instance + +Note that a local instance and cloud instance are not mutually exclusive. You can register your local instance in the Harper Studio, and a common development flow is to develop locally and then deploy your application to your cloud instance. + +Harper Cloud instance provisioning typically takes 5-15 minutes. You will receive an email notification when your instance is ready. + +#### Using the Harper Studio + +Now that you have a Harper instance, if you want to use Harper as a standalone database, you can fully administer and interact with our database through the Studio. This section links to appropriate articles to get you started interacting with your data. + +1. Create a database +1. Create a table +1. Add a record +1. Load CSV data (Here’s a sample CSV of the Harper team’s dogs) +1. Browse data + +## Administering Harper + +If you are deploying and administering Harper, you may want to look at our [configuration documentation](./deployments/configuration) and our administrative operations API below. + +### Harper APIs + +The preferred way to interact with Harper for typical querying, accessing, and updating data (CRUD) operations is through the REST interface, described in the [REST documentation](./developers/rest). + +The Operations API provides extensive administrative capabilities for Harper, and the [Operations API documentation has usage and examples](./developers/operations-api/). Generally it is recommended that you use the RESTful interface as your primary interface for performant data access, querying, and manipulation (DML) for building production applications (under heavy load), and the operations API (and SQL) for data definition (DDL) and administrative purposes. + +The Harper Operations API is single endpoint, which means the only thing that needs to change across different calls is the body. For example purposes, a basic cURL command is shown below to create a database called dev. To change this behavior, swap out the operation in the `data-raw` body parameter. + +``` +curl --location --request POST 'https:/instance-subdomain.harperdbcloud.com' \ +--header 'Authorization: Basic YourBase64EncodedInstanceUser:Pass' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "operation": "create_schema", + "database": "dev" +}' +``` + +## Support and Learning More + +If you find yourself in need of additional support you can submit a [Harper support ticket](https:/harperdbhelp.zendesk.com/hc/en-us/requests/new). You can also learn more about available Harper projects by searching [Github](https:/github.com/search?q=harperdb). + +### Video Tutorials + +[Harper video tutorials are available on our YouTube channel](https:/www.youtube.com/@harperdbio). Harper and the Harper Studio are constantly changing, as such, there may be small discrepancies in UI/UX. diff --git a/site/versioned_docs/version-4.4/index.md b/site/versioned_docs/version-4.4/index.md new file mode 100644 index 00000000..3812e62f --- /dev/null +++ b/site/versioned_docs/version-4.4/index.md @@ -0,0 +1,154 @@ +--- +title: Harper Docs +--- + +# Harper Docs + +:::info +[Connect with our team!](https:/www.harpersystems.dev/contact) +::: + +## What is Harper? Performance, Simplicity, and Scale. + +Harper is an all-in-one backend technology that fuses database technologies, caching, application hosting, and messaging functions into a single system. Unlike traditional architectures where each piece runs independently and incurs extra costs and latency from serialization and network operations between processes, Harper systems can handle workloads seamlessly and efficiently. + +Harper simplifies scaling with clustering and native data replication. At scale, architectures tend to include 4 to 16 redundant, geo-distributed nodes located near every user population center. This ensures that every user experiences minimal network latency and maximum reliability in addition to the already rapid server responses. + +![](/img/v4.4/harperstack.jpg) + +## Understanding the Paradigm Shift + +Have you ever combined MongoDB with Redis, Next.js with Postgres, or perhaps Fastify with anything else? The options seem endless. It turns out that the cost of serialization, network hops, and intermediary processes in these systems adds up to 50% of the total system resources used (often more). Not to mention the hundreds of milliseconds of latency they can add. + +What we realized is that networking systems together in this way is inefficient and only necessary because a fused technology did not exist. So, we built Harper, a database fused with a complete JavaScript application system. It’s not only orders of magnitude more performant than separated systems, but it’s also easier to deploy and manage at scale. + +## Build With Harper + +Start by running Harper locally with [npm](https:/www.npmjs.com/package/harperdb) or [Docker](https:/hub.docker.com/r/harperdb/harperdb). + +Since technology tends to be built around the storage, processing, and transfer of data, start by [defining your schema](./developers/applications/#creating-our-first-table) with the `schema.graphql` file in the root of the application directory. + +If you would like to [query](./developers/applications/#adding-an-endpoint) this data, add the `@export` directive to our data schema and test out the [REST](./developers/rest), [MQTT](./developers/real-time#mqtt), or [WebSocket](./developers/real-time#websockets) endpoints. + +When you are ready for something a little more advanced, start [customizing your application](./developers/applications/#custom-functionality-with-javascript). + +Finally, when it’s time to deploy, explore [replication](./developers/replication/) between nodes. + +If you would like to jump into the most advanced capabilities, learn about [components](./developers/components/). + +For a more comprehensive deep dive, take a look at our [Getting Started Guide](./getting-started). + +:::warning +Need help? Please don’t hesitate to [reach out](https:/www.harpersystems.dev/contact). +::: + +## Popular Use Cases + +With so much functionality built in, the use cases span nearly all application systems. Some of the most popular are listed below, motivated by new levels of performance and system simplicity. + +### Online Catalogs & Content Delivery + +For use cases like e-commerce, real estate listing, and content-oriented sites, Harper’s breakthroughs in performance and distribution pay dividends in the form of better SEO and higher conversion rates. One common implementation leverages Harper’s [Next.js Component](https:/github.com/HarperDB/nextjs) to host modern, performant frontend applications. Other implementations leverage the built-in caching layer and JavaScript application system to [server-side render pages](https:/www.harpersystems.dev/development/tutorials/server-side-rendering-with-multi-tier-cache) that remain fully responsive because of built-in WebSocket connections. + +### Data Delivery Networks + +For use cases like real-time sports updates, flight tracking, and zero-day software update distribution, Harper is rapidly gaining popularity. Harper’s ability to receive and broadcast messages while simultaneously handling application logic and data storage streamlines operations and eliminates the need for multiple separate systems. To build an understanding of our messaging system function, refer to our [real-time documentation](./developers/real-time). + +### Edge Inference Systems + +Capturing, storing, and processing real-time data streams from client and IoT systems typically requires a stack of technology. Harper’s selective data replication and self-healing connections make for an ideal multi-tier system where edge and cloud systems both run Harper, making everything more performant. + +[We’re happy](https:/www.harpersystems.dev/contact) to walk you through how to do this. + +## Getting Started + +
+
+

+ + Getting Started Guide + +

+

+ Get up and running with Harper +

+
+
+

+ + Quick Install Harper + +

+

+ Run Harper on your on hardware +

+
+
+

+ + Try Harper Cloud + +

+

+ Spin up an instance in minutes to get going fast +

+
+
+ +## Building with Harper + +
+
+

+ + Harper Applications + +

+

+ Build your a fully featured Harper Component with custom functionality +

+
+
+

+ + REST Queries + +

+

+ The recommended HTTP interface for data access, querying, and manipulation +

+
+
+

+ + Operations API + +

+

+ Configure, deploy, administer, and control your Harper instance +

+
+
+ +
+
+

+ + Clustering & Replication + +

+

+ The process of connecting multiple Harper databases together to create a database mesh network that enables users to define data replication patterns. +

+
+
+

+ + Explore the Harper Studio + +

+

+ The web-based GUI for Harper. Studio enables you to administer, navigate, and monitor all of your Harper instances in a simple, user friendly interface. +

+
+
diff --git a/site/versioned_docs/version-4.4/technical-details/_category_.json b/site/versioned_docs/version-4.4/technical-details/_category_.json new file mode 100644 index 00000000..69ce80a6 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Technical Details", + "position": 4, + "link": { + "type": "generated-index", + "title": "Technical Details Documentation", + "description": "Reference documentation and technical specifications", + "keywords": [ + "technical-details" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/reference/analytics.md b/site/versioned_docs/version-4.4/technical-details/reference/analytics.md new file mode 100644 index 00000000..d3156053 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/reference/analytics.md @@ -0,0 +1,117 @@ +--- +title: Analytics +--- + +# Analytics + +Harper provides extensive telemetry and analytics data to help monitor the status of the server and work loads, and to help understand traffic and usage patterns to identify issues and scaling needs, and identify queries and actions that are consuming the most resources. + +Harper collects statistics for all operations, URL endpoints, and messaging topics, aggregating information by thread, operation, resource, and methods, in real-time. These statistics are logged in the `hdb_raw_analytics` and `hdb_analytics` table in the `system` database. + +There are two "levels" of analytics in the Harper analytics table: the first is the immediate level of raw direct logging of real-time statistics. These analytics entries are recorded once a second (when there is activity) by each thread, and include all recorded activity in the last second, along with system resource information. The records have a primary key that is the timestamp in milliseconds since epoch. This can be queried (with `superuser` permission) using the search\_by\_conditions operation (this will search for 10 seconds worth of analytics) on the `hdb_raw_analytics` table: + +``` +POST http:/localhost:9925 +Content-Type: application/json + +{ + "operation": "search_by_conditions", + "schema": "system", + "table": "hdb_raw_analytics", + "conditions": [{ + "search_attribute": "id", + "search_type": "between", + "search_value": [168859400000, 1688594010000] + }] +} +``` + +And a typical response looks like: + +``` +{ + "time": 1688594390708, + "period": 1000.8336279988289, + "metrics": [ + { + "metric": "bytes-sent", + "path": "search_by_conditions", + "type": "operation", + "median": 202, + "mean": 202, + "p95": 202, + "p90": 202, + "count": 1 + }, + ... + { + "metric": "memory", + "threadId": 2, + "rss": 1492664320, + "heapTotal": 124596224, + "heapUsed": 119563120, + "external": 3469790, + "arrayBuffers": 798721 + }, + { + "metric": "utilization", + "idle": 138227.52767700003, + "active": 70.5066209952347, + "utilization": 0.0005098165086230495 + } + ], + "threadId": 2, + "totalBytesProcessed": 12182820, + "id": 1688594390708.6853 +} +``` + +The second level of analytics recording is aggregate data. The aggregate records are recorded once a minute, and aggregate the results from all the per-second entries from all the threads, creating a summary of statistics once a minute. The ids for these milliseconds since epoch can be queried from the `hdb_analytics` table. You can query these with an operation like: + +``` +POST http:/localhost:9925 +Content-Type: application/json + +{ + "operation": "search_by_conditions", + "schema": "system", + "table": "hdb_analytics", + "conditions": [{ + "search_attribute": "id", + "search_type": "between", + "search_value": [1688194100000, 1688594990000] + }] +} +``` + +And a summary record looks like: + +``` +{ + "period": 60000, + "metric": "bytes-sent", + "method": "connack", + "type": "mqtt", + "median": 4, + "mean": 4, + "p95": 4, + "p90": 4, + "count": 1, + "id": 1688589569646, + "time": 1688589569646 +} +``` + +The following are general resource usage statistics that are tracked: + +* memory - This includes RSS, heap, buffer and external data usage. +* utilization - How much of the time the worker was processing requests. +* mqtt-connections - The number of MQTT connections. + +The following types of information is tracked for each HTTP request: + +* success - How many requests returned a successful response (20x response code). TTFB - Time to first byte in the response to the client. +* transfer - Time to finish the transfer of the data to the client. +* bytes-sent - How many bytes of data were sent to the client. + +Requests are categorized by operation name, for the operations API, by the resource (name) with the REST API, and by command for the MQTT interface. diff --git a/site/versioned_docs/version-4.4/technical-details/reference/architecture.md b/site/versioned_docs/version-4.4/technical-details/reference/architecture.md new file mode 100644 index 00000000..dd451ded --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/reference/architecture.md @@ -0,0 +1,42 @@ +--- +title: Architecture +--- + +# Architecture + +Harper's architecture consists of resources, which includes tables and user defined data sources and extensions, and server interfaces, which includes the RESTful HTTP interface, operations API, and MQTT. Servers are supported by routing and auth services. + +``` + ┌──────────┐ ┌──────────┐ + │ Clients │ │ Clients │ + └────┬─────┘ └────┬─────┘ + │ │ + ▼ ▼ + ┌────────────────────────────────────────┐ + │ │ + │ Socket routing/management │ + ├───────────────────────┬────────────────┤ + │ │ │ + │ Server Interfaces ─►│ Authentication │ + │ RESTful HTTP, MQTT │ Authorization │ + │ ◄─┤ │ + │ ▲ └────────────────┤ + │ │ │ │ + ├───┼──────────┼─────────────────────────┤ + │ │ │ ▲ │ + │ ▼ Resources ▲ │ ┌───────────┐ │ + │ │ └─┤ │ │ + ├─────────────────┴────┐ │ App │ │ + │ ├─►│ resources │ │ + │ Database tables │ └───────────┘ │ + │ │ ▲ │ + ├──────────────────────┘ │ │ + │ ▲ ▼ │ │ + │ ┌────────────────┐ │ │ + │ │ External │ │ │ + │ │ data sources ├────┘ │ + │ │ │ │ + │ └────────────────┘ │ + │ │ + └────────────────────────────────────────┘ +``` diff --git a/site/versioned_docs/version-4.4/technical-details/reference/content-types.md b/site/versioned_docs/version-4.4/technical-details/reference/content-types.md new file mode 100644 index 00000000..735b268d --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/reference/content-types.md @@ -0,0 +1,29 @@ +--- +title: Content Types +--- + +# Content Types + +Harper supports several different content types (or MIME types) for both HTTP request bodies (describing operations) as well as for serializing content into HTTP response bodies. Harper follows HTTP standards for specifying both request body content types and acceptable response body content types. Any of these content types can be used with any of the standard Harper operations. + +For request body content, the content type should be specified with the `Content-Type` header. For example with JSON, use `Content-Type: application/json` and for CBOR, include `Content-Type: application/cbor`. To request that the response body be encoded with a specific content type, use the `Accept` header. If you want the response to be in JSON, use `Accept: application/json`. If you want the response to be in CBOR, use `Accept: application/cbor`. + +The following content types are supported: + +## JSON - application/json + +JSON is the most widely used content type, and is relatively readable and easy to work with. However, JSON does not support all the data types that are supported by Harper, and can't be used to natively encode data types like binary data or explicit Maps/Sets. Also, JSON is not as efficient as binary formats. When using JSON, compression is recommended (this also follows standard HTTP protocol with the `Accept-Encoding` header) to improve network transfer performance (although there is server performance overhead). JSON is a good choice for web development and when standard JSON types are sufficient and when combined with compression and debuggability/observability is important. + +## CBOR - application/cbor + +CBOR is a highly efficient binary format, and is a recommended format for most production use cases with Harper. CBOR supports the full range of Harper data types, including binary data, typed dates, and explicit Maps/Sets. CBOR is very performant and space efficient even without compression. Compression will still yield better network transfer size/performance, but compressed CBOR is generally not any smaller than compressed JSON. CBOR also natively supports streaming for optimal performance (using indefinite length arrays). The CBOR format has excellent standardization and Harper's CBOR provides an excellent balance of performance and size efficiency. + +## MessagePack - application/x-msgpack + +MessagePack is another efficient binary format like CBOR, with support for all Harper data types. MessagePack generally has wider adoption than CBOR and can be useful in systems that don't have CBOR support (or good support). However, MessagePack does not have native support for streaming of arrays of data (for query results), and so query results are returned as a (concatenated) sequence of MessagePack objects/maps. MessagePack decoders used with Harper's MessagePack must be prepared to decode a direct sequence of MessagePack values to properly read responses. + +## Comma-separated Values (CSV) - text/csv + +Comma-separated values is an easy to use and understand format that can be readily imported into spreadsheets or used for data processing. CSV lacks hierarchical structure for most data types, and shouldn't be used for frequent/production use, but when you need it, it is available. + +In addition, with the REST interface, you can use file-style extensions to indicate an encoding like http:/host/path.csv to indicate CSV encoding. See the [REST documentation](../../developers/rest) for more information on how to do this. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/reference/data-types.md b/site/versioned_docs/version-4.4/technical-details/reference/data-types.md new file mode 100644 index 00000000..2ae66e4b --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/reference/data-types.md @@ -0,0 +1,52 @@ +--- +title: Data Types +--- + +# Data Types + +Harper supports a rich set of data types for use in records in databases. Various data types can be used from both direct JavaScript interfaces in Custom Functions and the HTTP operations APIs. Using JSON for communication naturally limits the data types to those available in JSON (Harper’s supports all of JSON data types), but JavaScript code and alternate data formats facilitate the use of additional data types. Harper supports MessagePack and CBOR, which allows for all of Harper supported data types. [Schema definitions can specify the expected types for fields, with GraphQL Schema Types](../../developers/applications/defining-schemas), which are used for validation of incoming typed data (JSON, MessagePack), and is used for auto-conversion of untyped data (CSV, [query parameters](../../developers/rest)). Available data types include: + +(Note that these labels are descriptive, they do not necessarily correspond to the GraphQL schema type names, but the schema type names are noted where possible) + +## Boolean + +true or false. The GraphQL schema type name is `Boolean`. + +## String + +Strings, or text, are a sequence of any unicode characters and are internally encoded with UTF-8. The GraphQL schema type name is `String`. + +## Number + +Numbers can be stored as signed integers up to a 1000 bits of precision (about 300 digits) or floating point with 64-bit floating point precision, and numbers are automatically stored using the most optimal type. With JSON, numbers are automatically parsed and stored in the most appropriate format. Custom components and applications may use BigInt numbers to store/access integers that are larger than 53-bit. The following GraphQL schema type name are supported: + +* `Float` - Any number that can be represented with [64-bit double precision floating point number](https:/en.wikipedia.org/wiki/Double-precision\_floating-point\_format) ("double") +* `Int` - Any integer between from -2147483648 to 2147483647 +* `Long` - Any integer between from -9007199254740992 to 9007199254740992 +* `BigInt` - Any integer (negative or positive) with less than 300 digits + +Note that `BigInt` is a distinct and separate type from standard numbers in JavaScript, so custom code should handle this type appropriately. + +## Object/Map + +Objects, or maps, that hold a set named properties can be stored in Harper. When provided as JSON objects or JavaScript objects, all property keys are stored as strings. The order of properties is also preserved in Harper’s storage. Duplicate property keys are not allowed (they are dropped in parsing any incoming data). + +## Array + +Arrays hold an ordered sequence of values and can be stored in Harper. There is no support for sparse arrays, although you can use objects to store data with numbers (converted to strings) as properties. + +## Null + +A null value can be stored in Harper property values as well. + +## Date + +Dates can be stored as a specific data type. This is not supported in JSON, but is supported by MessagePack and CBOR. Custom Functions can also store and use Dates using JavaScript Date instances. The GraphQL schema type name is `Date`. + +## Binary Data + +Binary data can be stored in property values as well. JSON doesn’t have any support for encoding binary data, but MessagePack and CBOR support binary data in data structures, and this will be preserved in Harper. Custom Functions can also store binary data by using NodeJS’s Buffer or Uint8Array instances to hold the binary data. The GraphQL schema type name is `Bytes`. + +## Explicit Map/Set + +Explicit instances of JavaScript Maps and Sets can be stored and preserved in Harper as well. This can’t be represented with JSON, but can be with CBOR. diff --git a/site/versioned_docs/version-4.4/technical-details/reference/dynamic-schema.md b/site/versioned_docs/version-4.4/technical-details/reference/dynamic-schema.md new file mode 100644 index 00000000..05abd4d5 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/reference/dynamic-schema.md @@ -0,0 +1,148 @@ +--- +title: Dynamic Schema +--- + +# Dynamic Schema + +When tables are created without any schema, through the operations API (without specifying attributes) or studio, the tables follow "dynamic-schema" behavior. Generally it is best-practice to define schemas for your tables to ensure predictable, consistent structures with data integrity and precise control over indexing, without dependency on data itself. However, it can often be simpler and quicker to simply create a table and let the data auto-generate the schema dynamically with everything being auto-indexed for broad querying. + +With dynamic schemas individual attributes are reflexively created as data is ingested, meaning the table will adapt to the structure of data ingested. Harper tracks the metadata around schemas, tables, and attributes allowing for describe table, describe schema, and describe all operations. + +### Databases + +Harper databases hold a collection of tables together in a single file that are transactionally connected. This means that operations across tables within a database can be performed in a single atomic transaction. By default tables are added to the default database called "data", but other databases can be created and specified for tables. + +### Tables + +Harper tables group records together with a common data pattern. To create a table users must provide a table name and a primary key. + +* **Table Name**: Used to identify the table. +* **Primary Key**: This is a required attribute that serves as the unique identifier for a record and is also known as the `hash_attribute` in Harper operations API. + +## Primary Key + +The primary key (also referred to as the `hash_attribute`) is used to uniquely identify records. Uniqueness is enforced on the primary; inserts with the same primary key will be rejected. If a primary key is not provided on insert, a GUID will be automatically generated and returned to the user. The [Harper Storage Algorithm](./storage-algorithm) utilizes this value for indexing. + +**Standard Attributes** + +With tables that are using dynamic schemas, additional attributes are reflexively added via insert and update operations (in both SQL and NoSQL) when new attributes are included in the data structure provided to Harper. As a result, schemas are additive, meaning new attributes are created in the underlying storage algorithm as additional data structures are provided. Harper offers `create_attribute` and `drop_attribute` operations for users who prefer to manually define their data model independent of data ingestion. When new attributes are added to tables with existing data the value of that new attribute will be assumed `null` for all existing records. + +**Audit Attributes** + +Harper automatically creates two audit attributes used on each record if the table is created without a schema. + +* `__createdtime__`: The time the record was created in [Unix Epoch with milliseconds](https:/www.epochconverter.com/) format. +* `__updatedtime__`: The time the record was updated in [Unix Epoch with milliseconds](https:/www.epochconverter.com/) format. + +### Dynamic Schema Example + +To better understand the behavior let’s take a look at an example. This example utilizes [Harper API operations](../../developers/operations-api/databases-and-tables). + +**Create a Database** + +```bash +{ + "operation": "create_database", + "schema": "dev" +} +``` + +**Create a Table** + +Notice the schema name, table name, and primary key name are the only required parameters. + +```bash +{ + "operation": "create_table", + "database": "dev", + "table": "dog", + "primary_key": "id" +} +``` + +At this point the table does not have structure beyond what we provided, so the table looks like this: + +**dev.dog** + +![](/img/v4.4/reference/dynamic\_schema\_2\_create\_table.png.webp) + +**Insert Record** + +To define attributes we do not need to do anything beyond sending them in with an insert operation. + +```bash +{ + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + {"id": 1, "dog_name": "Penny", "owner_name": "Kyle"} + ] +} +``` + +With a single record inserted and new attributes defined, our table now looks like this: + +**dev.dog** + +![](/img/v4.4/reference/dynamic\_schema\_3\_insert\_record.png.webp) + +Indexes have been automatically created for `dog_name` and `owner_name` attributes. + +**Insert Additional Record** + +If we continue inserting records with the same data schema no schema updates are required. One record will omit the hash attribute from the insert to demonstrate GUID generation. + +```bash +{ + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + {"id": 2, "dog_name": "Monk", "owner_name": "Aron"}, + {"dog_name": "Harper","owner_name": "Stephen"} + ] +} +``` + +In this case, there is no change to the schema. Our table now looks like this: + +**dev.dog** + +![](/img/v4.4/reference/dynamic\_schema\_4\_insert\_additional\_record.png.webp) + +**Update Existing Record** + +In this case, we will update a record with a new attribute not previously defined on the table. + +```bash +{ + "operation": "update", + "database": "dev", + "table": "dog", + "records": [ + {"id": 2, "weight_lbs": 35} + ] +} +``` + +Now we have a new attribute called `weight_lbs`. Our table now looks like this: + +**dev.dog** + +![](/img/v4.4/reference/dynamic\_schema\_5\_update\_existing\_record.png.webp) + +**Query Table with SQL** + +Now if we query for all records where `weight_lbs` is `null` we expect to get back two records. + +```bash +{ + "operation": "sql", + "sql": "SELECT * FROM dev.dog WHERE weight_lbs IS NULL" +} +``` + +This results in the expected two records being returned. + +![](/img/v4.4/reference/dynamic\_schema\_6\_query\_table\_with\_sql.png.webp) diff --git a/site/versioned_docs/version-4.4/technical-details/reference/globals.md b/site/versioned_docs/version-4.4/technical-details/reference/globals.md new file mode 100644 index 00000000..e07b496b --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/reference/globals.md @@ -0,0 +1,252 @@ +--- +title: Globals +--- + +# Globals + +The primary way that JavaScript code can interact with Harper is through the global variables, which has several objects and classes that provide access to the tables, server hooks, and resources that Harper provides for building applications. As global variables, these can be directly accessed in any module. + +These global variables are also available through the `harperdb` module/package, which can provide better typing in TypeScript. To use this with your own directory, make sure you link the package to your current `harperdb` installation: + +```bash +npm link harperdb +``` + +The `harperdb` package is automatically linked for all installed components. Once linked, if you are using EcmaScript module syntax you can import function from `harperdb` like: + +```javascript +import { tables, Resource } from 'harperdb'; +``` + +Or if you are using CommonJS format for your modules: + +```javascript +const { tables, Resource } = require('harperdb'); +``` + +The global variables include: + +## `tables` + +This is an object that holds all the tables for the default database (called `data`) as properties. Each of these property values is a table class that subclasses the Resource interface and provides access to the table through the Resource interface. For example, you can get a record from a table (in the default database) called 'my-table' with: + +```javascript +import { tables } from 'harperdb'; +const { MyTable } = tables; +async function getRecord() { + let record = await MyTable.get(recordId); +} +``` + +It is recommended that you [define a database](../../getting-started) for all the tables that are required to exist in your application. This will ensure that the tables exist on the `tables` object. Also note that the property names follow a CamelCase convention for use in JavaScript and in the GraphQL Schemas, but these are translated to snake\_case for the actual table names, and converted back to CamelCase when added to the `tables` object. + +## `databases` + +This is an object that holds all the databases in Harper, and can be used to explicitly access a table by database name. Each database will be a property on this object, each of these property values will be an object with the set of all tables in that database. The default database, `databases.data` should equal the `tables` export. For example, if you want to access the "dog" table in the "dev" database, you could do so: + +```javascript +import { databases } from 'harperdb'; +const { Dog } = databases.dev; +``` + +## `Resource` + +This is the base class for all resources, including tables and external data sources. This is provided so that you can extend it to implement custom data source providers. See the [Resource API documentation](./resource) for more details about implementing a Resource class. + +## `auth(username, password?): Promise` + +This returns the user object with permissions/authorization information based on the provided username. If a password is provided, the password will be verified before returning the user object (if the password is incorrect, an error will be thrown). + +## `logger` + +This provides methods `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify` for logging. See the [logging documentation](../../administration/logging/standard-logging) for more information. + +## `server` + +The `server` global object provides a number of functions and objects to interact with Harper's HTTP service. + +### `server.http(listener: RequestListener, options: HttpOptions): HttpServer[]` + +Alias: `server.request` + +Add a handler method to the HTTP server request listener middleware chain. + +Returns an array of server instances based on the specified `options.port` and `options.securePort`. + +Example: + +```js +server.http((request, next) => { + return request.url === '/graphql' + ? handleGraphQLRequest(request) + : next(request); +}, { + runFirst: true, / run this handler first +}); +``` + +#### `RequestListener` + +Type: `(request: Request, next: RequestListener) => Promise` + +The HTTP request listener to be added to the middleware chain. To continue chain execution pass the `request` to the `next` function such as `return next(request);`. + +### `Request` and `Response` + +The `Request` and `Response` classes are based on the WHATWG APIs for the [`Request`](https:/developer.mozilla.org/en-US/docs/Web/API/Request) and [`Response`](https:/developer.mozilla.org/en-US/docs/Web/API/Response) classes. Requests and responses are based on these standard-based APIs to facilitate reuse with modern web code. While Node.js' HTTP APIs are powerful low-level APIs, the `Request`/`Response` APIs provide excellent composability characteristics, well suited for layered middleware and for clean mapping to [RESTful method handlers](./resource) with promise-based responses, as well as interoperability with other standards-based APIs like [streams](https:/developer.mozilla.org/en-US/docs/Web/API/ReadableStream) used with [`Blob`s](https:/developer.mozilla.org/en-US/docs/Web/API/Blob). However, the Harper implementation of these classes is not a direct implementation of the WHATWG APIs, but implements additional/distinct properties for the the Harper server environment: + +#### `Request` +A `Request` object is passed to the direct static REST handlers, and preserved as the context for instance methods, and has the following properties: +- `url` - This is the request target, which is the portion of the URL that was received by the server. If a client sends a request to `http:/example.com:8080/path?query=string`, the actual received request is `GET /path?query=string` and the `url` property will be `/path?query=string`. +- `method` - This is the HTTP method of the request. This is a string like `GET`, `POST`, `PUT`, `DELETE`, etc. +- `headers` - This is a [`Headers`](https:/developer.mozilla.org/en-US/docs/Web/API/Headers) object that contains the headers of the request. +- `pathname` - This is the path portion of the URL, without the query string. For example, if the URL is `/path?query=string`, the `pathname` will be `/path`. +- `protocol` - This is the protocol of the request, like `http` or `https`. +- `data` - This is the deserialized body of the request (based on the type of data specified by `Content-Type` header). +- `ip` - This is the remote IP address of the client that made the request (or the remote IP address of the last proxy to connect to Harper). +- `host` - This is the host of the request, like `example.com`. +- `sendEarlyHints(link: string, headers?: object): void` - This method sends an early hints response to the client, prior to actually returning a response. This is useful for sending a link header to the client to indicate that another resource should be preloaded. The `headers` argument can be used to send additional headers with the early hints response, in addition to the `link`. +- `login(username, password): Promise` - This method can be called to start an authenticated session. The login will authenticate the user by username and password. If the authentication was successful, a session will be created and a cookie will be set on the response header that references the session. All subsequent requests from the client that sends the cookie in requests will be authenticated as the user that logged in and the session record will be attached to the request. This method returns a promise that resolves when the login is successful, and rejects if the login is unsuccessful. +- `session` - This is the session object that is associated with current cookie-maintained session. This object is used to store session data for the current session. This is `Table` record instance, and can be updated by calling `request.session.update({ key: value })` or session can be retrieved with `request.session.get()`. If the cookie has not been set yet, a cookie will be set the first time a session is updated or a login occurs. +- `_nodeRequest` - This is the underlying Node.js [`http.IncomingMessage`](https:/nodejs.org/api/http.html#http_class_http_incomingmessage) object. This can be used to access the raw request data, such as the raw headers, raw body, etc. However, this is discouraged and should be used with caution since it will likely break any other server handlers that depends on the layered `Request` call with `Response` return pattern. +- `_nodeResponse` - This is the underlying Node.js [`http.ServerResponse`](https:/nodejs.org/api/http.html#http_class_http_serverresponse) object. This can be used to access the raw response data, such as the raw headers. Again, this is discouraged and can cause problems for middleware, should only be used if you are certain that other server handlers will not attempt to return a different `Response` object. + +#### `Response` + +REST methods can directly return data that is serialized and returned to users, or it can return a `Response` object (or a promise to a `Response`), or it can return a `Response`-like object with the following properties (or again, a promise to it): +- `status` - This is the HTTP status code of the response. This is a number like `200`, `404`, `500`, etc. +- `headers` - This is a [`Headers`](https:/developer.mozilla.org/en-US/docs/Web/API/Headers) object that contains the headers of the response. +- `data` - This is the data to be returned of the response. This will be serialized using Harper's content negotiation. +- `body` - Alternately (to `data`), the raw body can be returned as a `Buffer`, string, stream (Node.js or [`ReadableStream`](https:/developer.mozilla.org/en-US/docs/Web/API/ReadableStream)), or a [`Blob`](https:/developer.mozilla.org/en-US/docs/Web/API/Blob). + +#### `HttpOptions` + +Type: `Object` + +Properties: + +* `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` +* `port` - _optional_ - `number` - Specify which HTTP server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` +* `securePort` - _optional_ - `number` - Specify which HTTPS server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` + +#### `HttpServer` + +Node.js [`http.Server`](https:/nodejs.org/api/http.html#class-httpserver) or [`https.SecureServer`](https:/nodejs.org/api/https.html#class-httpsserver) instance. + +### `server.socket(listener: ConnectionListener, options: SocketOptions): SocketServer` + +Creates a socket server on the specified `options.port` or `options.securePort`. + +Only one socket server will be created. A `securePort` takes precedence. + +#### `ConnectionListener` + +Node.js socket server connection listener as documented in [`net.createServer`](https:/nodejs.org/api/net.html#netcreateserveroptions-connectionlistener) or [`tls.createServer`](https:/nodejs.org/api/tls.html#tlscreateserveroptions-secureconnectionlistener) + +#### `SocketOptions` + +* `port` - _optional_ - `number` - Specify the port for the [`net.Server`](https:/nodejs.org/api/net.html#class-netserver) instance. +* `securePort` - _optional_ - `number` - Specify the port for the [`tls.Server`](https:/nodejs.org/api/tls.html#class-tlsserver) instance. + +#### `SocketServer` + +Node.js [`net.Server`](https:/nodejs.org/api/net.html#class-netserver) or [`tls.Server`](https:/nodejs.org/api/tls.html#class-tlsserver) instance. + +### `server.ws(listener: WsListener, options: WsOptions): HttpServer[]` + +Add a listener to the WebSocket connection listener middleware chain. The WebSocket server is associated with the HTTP server specified by the `options.port` or `options.securePort`. Use the [`server.upgrade()`](./globals#serverupgradelistener-upgradelistener-options-upgradeoptions-void) method to add a listener to the upgrade middleware chain. + +Example: + +```js +server.ws((ws, request, chainCompletion) => { + chainCompletion.then(() => { + ws.on('error', console.error); + + ws.on('message', function message(data) { + console.log('received: %s', data); + }); + + ws.send('something'); + }); +}); +``` + +#### `WsListener` + +Type: `(ws: WebSocket, request: Request, chainCompletion: ChainCompletion, next: WsListener): Promise` + +The WebSocket connection listener. + +* The `ws` argument is the [WebSocket](https:/github.com/websockets/ws/blob/master/doc/ws.md#class-websocket) instance as defined by the `ws` module. +* The `request` argument is Harper's transformation of the `IncomingMessage` argument of the standard ['connection'](https:/github.com/websockets/ws/blob/master/doc/ws.md#event-connection) listener event for a WebSocket server. +* The `chainCompletion` argument is a `Promise` of the associated HTTP server's request chain. Awaiting this promise enables the user to ensure the HTTP request has finished being processed before operating on the WebSocket. +* The `next` argument is similar to that of other `next` arguments in Harper's server middlewares. To continue execution of the WebSocket connection listener middleware chain, pass all of the other arguments to this one such as: `next(ws, request, chainCompletion)` + +#### `WsOptions` + +Type: `Object` + +Properties: + +* `maxPayload` - _optional_ - `number` - Set the max payload size for the WebSocket server. Defaults to 100 MB. +* `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` +* `port` - _optional_ - `number` - Specify which WebSocket server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` +* `securePort` - _optional_ - `number` - Specify which WebSocket secure server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` + +### `server.upgrade(listener: UpgradeListener, options: UpgradeOptions): void` + +Add a listener to the HTTP Server [upgrade](https:/nodejs.org/api/http.html#event-upgrade_1) event. If a WebSocket connection listener is added using [`server.ws()`](./globals#serverwslistener-wslistener-options-wsoptions-httpserver), a default upgrade handler will be added as well. The default upgrade handler will add a `__harperdb_request_upgraded` boolean to the `request` argument to signal the connection has already been upgraded. It will also check for this boolean _before_ upgrading and if it is `true`, it will pass the arguments along to the `next` listener. + +This method should be used to delegate HTTP upgrade events to an external WebSocket server instance. + +Example: + +> This example is from the Harper Next.js component. See the complete source code [here](https:/github.com/HarperDB/nextjs/blob/main/extension.js) + +```js +server.upgrade( + (request, socket, head, next) => { + if (request.url === '/_next/webpack-hmr') { + return upgradeHandler(request, socket, head).then(() => { + request.__harperdb_request_upgraded = true; + + next(request, socket, head); + }); + } + + return next(request, socket, head); + }, + { runFirst: true } +); +``` + +#### `UpgradeListener` + +Type: `(request, socket, head, next) => void` + +The arguments are passed to the middleware chain from the HTTP server [`'upgrade'`](https:/nodejs.org/api/http.html#event-upgrade_1) event. + +#### `UpgradeOptions` + +Type: `Object` + +Properties: + +* `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` +* `port` - _optional_ - `number` - Specify which HTTP server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` +* `securePort` - _optional_ - `number` - Specify which HTTP secure server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` + +### `server.config` + +This provides access to the Harper configuration object. This comes from the [harperdb-config.yaml](../../deployments/configuration) (parsed into object form). + +### `server.recordAnalytics(value, metric, path?, method?, type?)` + +This records the provided value as a metric into Harper's analytics. Harper efficiently records and tracks these metrics and makes them available through [analytics API](./analytics). The values are aggregated and statistical information is computed when many operations are performed. The optional parameters can be used to group statistics. For the parameters, make sure you are not grouping on too fine of a level for useful aggregation. The parameters are: + +* `value` - This is a numeric value for the metric that is being recorded. This can be a value measuring time or bytes, for example. +* `metric` - This is the name of the metric. +* `path` - This is an optional path (like a URL path). For a URL like /my-resource/, you would typically include a path of "my-resource", not including the id so you can group by all the requests to "my-resource" instead of individually aggregating by each individual id. +* `method` - Optional method to group by. +* `type` - Optional type to group by. diff --git a/site/versioned_docs/version-4.4/technical-details/reference/graphql.md b/site/versioned_docs/version-4.4/technical-details/reference/graphql.md new file mode 100644 index 00000000..0ae8eda3 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/reference/graphql.md @@ -0,0 +1,248 @@ +--- +title: GraphQL Querying +--- + +# GraphQL Querying + +Harper supports GraphQL in a variety of ways. It can be used for [defining schemas](../../developers/applications/defining-schemas), and for querying [Resources](./resource). + +Get started by setting `graphql: true` in `config.yaml`. + +This automatically enables a `/graphql` endpoint that can be used for GraphQL queries. + +> GraphQL querying provides functionality for mapping GraphQL querying functionality to exported resources, and is based on the [GraphQL Over HTTP / GraphQL specifications](https:/graphql.github.io/graphql-over-http/draft/#) (it is designed to intuitively map queries to Harper resources, but does not implement the full [specification](https:/spec.graphql.org/) of resolvers, subscribers, and mutations). + +Queries can either be `GET` or `POST` requests, and both follow essentially the same request format. `GET` requests must use search parameters, and `POST` requests use the request body. + +For example, to request the GraphQL Query: +```graphql +query GetDogs { + Dog { + id + name + } +} +``` + +The `GET` request would look like: + +```http +GET /graphql?query=query+GetDogs+%7B+Dog+%7B+id+name+%7D+%7D+%7D +Accept: application/graphql-response+json +``` + +And the `POST` request would look like: + +```http +POST /graphql/ +Content-Type: application/json +Accept: application/graphql-response+json + +{ + "query": "query GetDogs { Dog { id name } } }" +} +``` + +> Tip: For the best user experience, include the `Accept: application/graphql-response+json` header in your request. This provides better status codes for errors. + +The Harper GraphQL querying system is strictly limited to exported Harper Resources. For many users, this will typically be a table that uses the `@exported` directive in its schema. Queries can only specify Harper Resources and their attributes in the selection set. Queries can filter using [arguments](https:/graphql.org/learn/queries/#arguments) on the top-level Resource field. Harper provides a short form pattern for simple queries, and a long form pattern based off of the [Resource Query API](./resource#query) for more complex queries. + +Unlike REST queries, GraphQL queries can specify multiple resources simultaneously: + +```graphql +query GetDogsAndOwners { + Dog { + id + name + breed + } + + Owner { + id + name + occupation + } +} +``` + +This will return all dogs and owners in the database. And is equivalent to executing two REST queries: + +```http +GET /Dog/?select(id,name,breed) +# and +GET /Owner/?select(id,name,occupation) +``` + +### Request Parameters + +There are three request parameters for GraphQL queries: `query`, `operationName`, and `variables` + +1. `query` - _Required_ - The string representation of the GraphQL document. + 1. Limited to [Executable Definitions](https:/spec.graphql.org/October2021/#executabledefinition) only. + 1. i.e. GraphQL [`query`](https:/graphql.org/learn/queries/#fields) or `mutation` (coming soon) operations, and [fragments](https:/graphql.org/learn/queries/#fragments). + 1. If an shorthand, unnamed, or singular named query is provided, they will be executed by default. Otherwise, if there are multiple queries, the `operationName` parameter must be used. +1. `operationName` - _Optional_ - The name of the query operation to execute if multiple queries are provided in the `query` parameter +1. `variables` - _Optional_ - A map of variable values to be used for the specified query + +### Type Checking + +The Harper GraphQL Querying system takes many liberties from the GraphQL specification. This extends to how it handle type checking. In general, the querying system does **not** type check. Harper uses the `graphql` parser directly, and then performs a transformation on the resulting AST. We do not control any type checking/casting behavior of the parser, and since the execution step diverges from the spec greatly, the type checking behavior is only loosely defined. + +In variable definitions, the querying system will ensure non-null values exist (and error appropriately), but it will not do any type checking of the value itself. + +For example, the variable `$name: String!` states that `name` should be a non-null, string value. +- If the request does not contain the `name` variable, an error will be returned +- If the request provides `null` for the `name` variable, an error will be returned +- If the request provides any non-string value for the `name` variable, i.e. `1`, `true`, `{ foo: "bar" }`, the behavior is undefined and an error may or may not be returned. +- If the variable definition is changed to include a default value, `$name: String! = "John"`, then when omitted, `"John"` will be used. + - If `null` is provided as the variable value, an error will still be returned. + - If the default value does not match the type specified (i.e. `$name: String! = 0`), this is also considered undefined behavior. It may or may not fail in a variety of ways. +- Fragments will generally extend non-specified types, and the querying system will do no validity checking on them. For example, `fragment Fields on Any { ... }` is just as valid as `fragment Fields on MadeUpTypeName { ... }`. See the Fragments sections for more details. + +The only notable place the querying system will do some level of type analysis is the transformation of arguments into a query. +- Objects will be transformed into properly nested attributes +- Strings and Boolean values are passed through as their AST values +- Float and Int values will be parsed using the JavaScript `parseFloat` and `parseInt` methods respectively. +- List and Enums are not supported. + +### Fragments + +The querying system loosely supports fragments. Both fragment definitions and inline fragments are supported, and are entirely a composition utility. Since this system does very little type checking, the `on Type` part of fragments is entirely pointless. Any value can be used for `Type` and it will have the same effect. + +For example, in the query + +```graphql +query Get { + Dog { + ...DogFields + } +} + +fragment DogFields on Dog { + name + breed +} +``` + +The `Dog` type in the fragment has no correlation to the `Dog` resource in the query (that correlates to the Harper `Dog` resource). + +You can literally specify anything in the fragment and it will behave the same way: + +```graphql +fragment DogFields on Any { ... } # this is recommended +fragment DogFields on Cat { ... } +fragment DogFields on Animal { ... } +fragment DogFields on LiterallyAnything { ... } +``` + +As an actual example, fragments should be used for composition: + +```graphql +query Get { + Dog { + ...sharedFields + breed + } + Owner { + ...sharedFields + occupation + } +} + +fragment sharedFields on Any { + id + name +} +``` + +### Short Form Querying + +Any attribute can be used as an argument for a query. In this short form, multiple arguments is treated as multiple equivalency conditions with the default `and` operation. + +For example, the following query requires an `id` variable to be provided, and the system will search for a `Dog` record matching that id. + +```graphql +query GetDog($id: ID!) { + Dog(id: $id) { + name + breed + owner { + name + } + } +} +``` + +And as a properly formed request: +```http +POST /graphql/ +Content-Type: application/json +Accept: application/graphql-response+json + +{ + "query": "query GetDog($id: ID!) { Dog(id: $id) { name breed owner {name}}", + "variables": { + "id": "0" + } +} +``` + +The REST equivalent would be: +```http +GET /Dog/?id==0&select(name,breed,owner{name}) +# or +GET /Dog/0?select(name,breed,owner{name}) +``` + +Short form queries can handle nested attributes as well. + +For example, return all dogs who have an owner with the name `"John"` + +```graphql +query GetDog { + Dog(owner: { name: "John" }) { + name + breed + owner { + name + } + } +} +``` + +Would be equivalent to +```http +GET /Dog/?owner.name==John&select(name,breed,owner{name}) +``` + +And finally, we can put all of these together to create semi-complex, equality based queries! + +The following query has two variables and will return all dogs who have the specified name as well as the specified owner name. + +```graphql +query GetDog($dogName: String!, $ownerName: String! ) { + Dog(name: $dogName, owner: { name: $ownerName }) { + name + breed + owner { + name + } + } +} +``` + +### Long Form Querying + +> Coming soon! + +### Mutations + +> Coming soon! + +### Subscriptions + +> Coming soon! + +### Directives + +> Coming soon! \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/reference/headers.md b/site/versioned_docs/version-4.4/technical-details/reference/headers.md new file mode 100644 index 00000000..0301b152 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/reference/headers.md @@ -0,0 +1,12 @@ +--- +title: Harper Headers +--- + +# Harper Headers + +All Harper API responses include headers that are important for interoperability and debugging purposes. The following headers are returned with all Harper API responses: + +| Key | Example Value | Description | +|-------------------|------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------| +| server-timing | db;dur=7.165 | This reports the duration of the operation, in milliseconds. This follows the standard for Server-Timing and can be consumed by network monitoring tools. | +| content-type | application/json | This reports the MIME type of the returned content, which is negotiated based on the requested content type in the Accept header. | diff --git a/site/versioned_docs/version-4.4/technical-details/reference/index.md b/site/versioned_docs/version-4.4/technical-details/reference/index.md new file mode 100644 index 00000000..8b2629e5 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/reference/index.md @@ -0,0 +1,16 @@ +--- +title: Reference +--- + +# Reference + +This section contains technical details and reference materials for Harper. + +* [Resource API](./resource) +* [Transactions](./transactions) +* [Storage Algorithm](./storage-algorithm) +* [Dynamic Schema](./dynamic-schema) +* [Headers](./headers) +* [Limitations](./limits) +* Content Types +* [Data Types](./data-types) diff --git a/site/versioned_docs/version-4.4/technical-details/reference/limits.md b/site/versioned_docs/version-4.4/technical-details/reference/limits.md new file mode 100644 index 00000000..9e343887 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/reference/limits.md @@ -0,0 +1,36 @@ +--- +title: Harper Limits +--- + +# Harper Limits + +This document outlines limitations of Harper. + +## Database Naming Restrictions + +**Case Sensitivity** + +Harper database metadata (database names, table names, and attribute/column names) are case sensitive. Meaning databases, tables, and attributes can differ only by the case of their characters. + +**Restrictions on Database Metadata Names** + +Harper database metadata (database names, table names, and attribute names) cannot contain the following UTF-8 characters: + +``` +/`¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ +``` + +Additionally, they cannot contain the first 31 non-printing characters. Spaces are allowed, but not recommended as best practice. The regular expression used to verify a name is valid is: + +``` +^[\x20-\x2E|\x30-\x5F|\x61-\x7E]*$ +``` + +## Table Limitations + +**Attribute Maximum** + +Harper limits the number of total indexed attributes across tables (including the primary key of each table) to 10,000 per database. + +## Primary Keys +The maximum length of a primary key is 1978 bytes or 659 characters (whichever is shortest). \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/reference/resource.md b/site/versioned_docs/version-4.4/technical-details/reference/resource.md new file mode 100644 index 00000000..4dd67bcd --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/reference/resource.md @@ -0,0 +1,724 @@ +--- +title: Resource Class +--- + +# Resource Class + +## Resource Class + +The Resource class is designed to provide a unified API for modeling different data resources within Harper. Database/table data can be accessed through the Resource API. The Resource class can be extended to create new data sources. Resources can be exported to define endpoints. Tables themselves extend the Resource class, and can be extended by users. + +Conceptually, a Resource class provides an interface for accessing, querying, modifying, and monitoring a set of entities or records. Instances of a Resource class can represent a single record or entity, or a collection of records, at a given point in time, that you can interact with through various methods or queries. Resource instances can represent an atomic transactional view of a resource and facilitate transactional interaction. A Resource instance holds the primary key/identifier, context information, and any pending updates to the record, so any instance methods can act on the record and have full access to this information to during execution. Therefore, there are distinct resource instances created for every record or query that is accessed, and the instance methods are used for interaction with the data. + +Resource classes also have static methods, which are generally the preferred way to externally interact with tables and resources. The static methods handle parsing paths and query strings, starting a transaction as necessary, performing access authorization checks (if required), creating a resource instance, and calling the instance methods. This general rule for how to interact with resources: + +* If you want to _act upon_ a table or resource, querying or writing to it, then use the static methods to initial access or write data. For example, you could use `MyTable.get(34)` to access the record with a primary key of `34`. + * You can subsequently use the instance methods on the returned resource instance to perform additional actions on the record. +* If you want to _define custom behavior_ for a table or resource (to control how a resource responds to queries/writes), then extend the class and override/define instance methods. + +The Resource API is heavily influenced by the REST/HTTP API, and the methods and properties of the Resource class are designed to map to and be used in a similar way to how you would interact with a RESTful API. + +The REST-based API is a little different than traditional Create-Read-Update-Delete (CRUD) APIs that were designed with single-server interactions in mind, but semantics that attempt to guarantee no existing record or overwrite-only behavior require locks that don't scale well in distributed database. Centralizing writes around `put` calls provides much more scalable, simple, and consistent behavior in a distributed eventually consistent database. You can generally think of CRUD operations mapping to REST operations like this: + +* Read - `get` +* Create with a known primary key - `put` +* Create with a generated primary key - `post`/`create` +* Update (Full) - `put` +* Update (Partial) - `patch` +* Delete - `delete` + +The RESTful HTTP server and other server interfaces will directly call resource methods of the same name to fulfill incoming requests so resources can be defined as endpoints for external interaction. When resources are used by the server interfaces, the static method will be executed (which starts a transaction and does access checks), which will then create the resource instance and call the corresponding instance method. Paths (URL, MQTT topics) are mapped to different resource instances. Using a path that specifies an ID like `/MyResource/3492` will be mapped to a Resource instance where the instance's ID will be `3492`, and interactions will use the instance methods like `get()`, `put()`, and `post()`. Using the root path (`/MyResource/`) will map to a Resource instance with an ID of `null`, and this represents the collection of all the records in the resource or table. + +You can create classes that extend `Resource` to define your own data sources, typically to interface with external data sources (the `Resource` base class is available as a global variable in the Harper JS environment). In doing this, you will generally be extending and providing implementations for the instance methods below. For example: + +```javascript +export class MyExternalData extends Resource { + async get() { + / fetch data from an external source, using our id + let response = await this.fetch(this.id); + / do something with the response + } + put(data) { + / send the data into the external source + } + delete() { + / delete an entity in the external data source + } + subscribe(options) { + / if the external data source is capable of real-time notification of changes, can subscribe + } +} +/ we can export this class from resources.json as our own endpoint, or use this as the source for +/ a Harper data to store and cache the data coming from this data source: +tables.MyCache.sourcedFrom(MyExternalData); +``` + +You can also extend table classes in the same way, overriding the instance methods for custom functionality. The `tables` object is a global variable in the Harper JavaScript environment, along with `Resource`: + +```javascript +export class MyTable extends tables.MyTable { + get() { + / we can add properties or change properties before returning data: + this.newProperty = 'newValue'; + this.existingProperty = 44; + return super.get(); / returns the record, modified with the changes above + } + put(data) { + / can change data any way we want + super.put(data); + } + delete() { + super.delete(); + } + post(data) { + / providing a post handler (for HTTP POST requests) is a common way to create additional + / actions that aren't well described with just PUT or DELETE + } +} +``` + +Make sure that if are extending and `export`ing your table with this class, that you remove the `@export` directive in your schema, so that you aren't exporting the same table/class name twice. + +All Resource methods that are called from HTTP methods may directly return data or may return a [`Response`](https:/developer.mozilla.org/en-US/docs/Web/API/Response) object or an object with `headers` and a `status` (HTTP status code), to explicitly return specific headers and status code. + +## Global Variables + +### `tables` + +This is an object with all the tables in the default database (the default database is "data"). Each table that has been declared or created will be available as a (standard) property on this object, and the value will be the table class that can be used to interact with that table. The table classes implement the Resource API. + +### `databases` + +This is an object with all the databases that have been defined in Harper (in the running instance). Each database that has been declared or created will be available as a (standard) property on this object. The property values are an object with the tables in that database, where each property is a table, like the `tables` object. In fact, `databases.data === tables` should always be true. + +### `Resource` + +This is the Resource base class. This can be directly extended for custom resources, and is the base class for all tables. + +### `server` + +This object provides extension points for extension components that wish to implement new server functionality (new protocols, authentication, etc.). See the [extensions documentation for more information](../../developers/components/reference#extensions). + +### `transaction` + +This provides a function for starting transactions. See the transactions section below for more information. + +### `contentTypes` + +This provides an interface for defining new content type handlers. See the content type extensions documentation for more information. + +### TypeScript Support + +While these objects/methods are all available as global variables, it is easier to get TypeScript support (code assistance, type checking) for these interfaces by explicitly `import`ing them. This can be done by setting up a package link to the main Harper package in your app: + +``` +# you may need to go to your harper directory and set it up as a link first +npm link harperdb +``` + +And then you can import any of the main Harper APIs you will use, and your IDE should understand the full typings associated with them: + +``` +import { databases, tables, Resource } from 'harperdb'; +``` + +## Resource Class (Instance) Methods + +### Properties/attributes declared in schema + +Properties that have been defined in your table's schema can be accessed and modified as direct properties on the Resource instances. + +### `get(queryOrProperty?)`: Resource|AsyncIterable + +This is called to return the record or data for this resource, and is called by HTTP GET requests. This may be optionally called with a `query` object to specify a query should be performed, or a string to indicate that the specified property value should be returned. When defining Resource classes, you can define or override this method to define exactly what should be returned when retrieving a record. The default `get` method (`super.get()`) returns the current record as a plain object. + +The query object can be used to access any query parameters that were included in the URL. For example, with a request to `/my-resource/some-id?param1=value`, we can access URL/request information: + +```javascript +get(query) { + / note that query will only exist (as an object) if there is a query string + let param1 = query?.get?.('param1'); / returns 'value' + let id = this.getId(); / returns 'some-id' + ... +} +``` + +If `get` is called for a single record (for a request like `/Table/some-id`), the default action is to return `this` instance of the resource. If `get` is called on a collection (`/Table/?name=value`), the default action is to `search` and return an AsyncIterable of results. + +It is important to note that `this` is the resource instance for a specific record, specified by the primary key. Therefore, calling `super.get(query)` performs a `get` on this specific record/resource, not on the whole table. If you wish to access a _different_ record, you should use the static `get` method on the table class, like `Table.get(otherId, context)`. + +### `search(query: Query)`: AsyncIterable + +- Arguments + - `query`: The [Query](#query) object to use for the search +- Return value + - An [AsyncIterable](https:/developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/AsyncIterator) of records that match the query + +This performs a query on this resource, searching for records that are descendants. By default, this is called by `get(query)` from a collection resource. When this is called for the root resource (like `/Table/`) it searches through all records in the table. However, if you call search from an instance with a specific ID like `1` from a path like `Table/1`, it will only return records that are descendants of that record, like `[1, 1]` (path of Table/1/1) and `[1, 2]` (path of Table/1/2). If you want to do a standard search of the table, make you call the static method like `Table.search(...)`. You can define or override this method to define how records should be queried. The default `search` method on tables (`super.search(query)`) will perform a query and return an AsyncIterable of results. The query object can be used to specify the desired query. + +### `getId(): string|number|Array` + +Returns the primary key value for this resource. + +### `put(data: object, query?: Query): Resource|void|Response` + +This will assign the provided record or data to this resource, and is called for HTTP PUT requests. You can define or override this method to define how records should be updated. The default `put` method on tables (`super.put(data)`) writes the record to the table (updating or inserting depending on if the record previously existed) as part of the current transaction for the resource instance. + +It is important to note that `this` is the resource instance for a specific record, specified by the primary key. Therefore, calling `super.put(data)` updates this specific record/resource, not another records in the table. If you wish to update a _different_ record, you should use the static `put` method on the table class, like `Table.put(data, context)`. + +The `query` argument is used to represent any additional query parameters that were included in the URL. For example, with a request to `/my-resource/some-id?param1=value`, we can access URL/request information: + +```javascript +put(data, query) { + let param1 = query?.get?.('param1'); / returns 'value' + ... +} +``` + +### `patch(data: object): Resource|void|Response` + +### `patch(data: object, query?: Query)` + +This will update the existing record with the provided data's properties, and is called for HTTP PATCH requests. You can define or override this method to define how records should be updated. The default `patch` method on tables (`super.patch(data)`) updates the record. The properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `data` object. This is performed as part of the current transaction for the resource instance. The `query` argument is used to represent any additional query parameters that were included. + +### `update(data: object, fullUpdate: boolean?)` + +This is called by the default `put` and `patch` handlers to update a record. `put` calls with `fullUpdate` as `true` to indicate a full record replacement (`patch` calls it with the second argument as `false`). Any additional property changes that are made before the transaction commits will also be persisted. + +### `delete(queryOrProperty?): Resource|void|Response` + +This will delete this record or resource, and is called for HTTP DELETE requests. You can define or override this method to define how records should be deleted. The default `delete` method on tables (`super.put(record)`) deletes the record from the table as part of the current transaction. + +### `publish(message): Resource|void|Response` + +This will publish a message to this resource, and is called for MQTT publish commands. You can define or override this method to define how messages should be published. The default `publish` method on tables (`super.publish(message)`) records the published message as part of the current transaction; this will not change the data in the record but will notify any subscribers to the record/topic. + +### `post(data: object, query?: Query): Resource|void|Response` + +This is called for HTTP POST requests. You can define this method to provide your own implementation of how POST requests should be handled. Generally `POST` provides a generic mechanism for various types of data updates, and is a good place to define custom functionality for updating records. The default behavior is to create a new record/resource. The `query` argument is used to represent any additional query parameters that were included. + +### `invalidate()` + +This method is available on tables. This will invalidate the current record in the table. This can be used with a caching table and is used to indicate that the source data has changed, and the record needs to be reloaded when next accessed. + +### `subscribe(subscriptionRequest: SubscriptionRequest): Promise` + +This will subscribe to the current resource, and is called for MQTT subscribe commands. You can define or override this method to define how subscriptions should be handled. The default `subscribe` method on tables (`super.publish(message)`) will set up a listener that will be called for any changes or published messages to this resource. + +The returned (promise resolves to) Subscription object is an `AsyncIterable` that you can use a `for await` to iterate through. It also has a `queue` property which holds (an array of) any messages that are ready to be delivered immediately (if you have specified a start time, previous count, or there is a message for the current or "retained" record, these may be immediately returned). + +The `SubscriptionRequest` object supports the following properties (all optional): + +* `includeDescendants` - If this is enabled, this will create a subscription to all the record updates/messages that are prefixed with the id. For example, a subscription request of `{id:'sub', includeDescendants: true}` would return events for any update with an id/topic of the form sub/\* (like `sub/1`). +* `startTime` - This will begin the subscription at a past point in time, returning all updates/messages since the start time (a catch-up of historical messages). This can be used to resume a subscription, getting all messages since the last subscription. +* `previousCount` - This specifies the number of previous updates/messages to deliver. For example, `previousCount: 10` would return the last ten messages. Note that `previousCount` can not be used in conjunction with `startTime`. +* `omitCurrent` - Indicates that the current (or retained) record should _not_ be immediately sent as the first update in the subscription (if no `startTime` or `previousCount` was used). By default, the current record is sent as the first update. + +### `connect(incomingMessages?: AsyncIterable, query?: Query): AsyncIterable` + +This is called when a connection is received through WebSockets or Server Sent Events (SSE) to this resource path. This is called with `incomingMessages` as an iterable stream of incoming messages when the connection is from WebSockets, and is called with no arguments when the connection is from a SSE connection. This can return an asynchronous iterable representing the stream of messages to be sent to the client. + +### `set(property, value)` + +This will assign the provided value to the designated property in the resource's record. During a write operation, this will indicate that the record has changed and the changes will be saved during commit. During a read operation, this will modify the copy of the record that will be serialized during serialization (converted to the output format of JSON, MessagePack, etc.). + +### `allowCreate(user: any, data: Promise, context: Context): boolean | Promise` + +This is called to determine if the user has permission to create the current resource. This is called as part of external incoming requests (HTTP). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's insert permission to the table. The allow method may be asynchronous and return a promise that resolves to a boolean, and may await the `data` promise to determine if the data is valid for creation. + +### `allowRead(user: any, query: Map | void, context: Context): boolean | Promise` + +This is called to determine if the user has permission to read from the current resource. This is called as part of external incoming requests (HTTP GET). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's read permission to the table. The allow method may be asynchronous and return a promise that resolves to a boolean. + +### `allowUpdate(user: any, data: Promise, context: Context): boolean | Promise` + +This is called to determine if the user has permission to update the current resource. This is called as part of external incoming requests (HTTP PUT). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's update permission to the table. The allow method may be asynchronous and return a promise that resolves to a boolean, and may await the `data` promise to determine if the data is valid for creation. + +### `allowDelete(user: any, query: Map | void, context: Context): boolean | Promise` + +This is called to determine if the user has permission to delete the current resource. This is called as part of external incoming requests (HTTP DELETE). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's delete permission to the table. The allow method may be asynchronous and return a promise that resolves to a boolean. + +### `addTo(property, value)` + +This adds to provided value to the specified property using conflict-free data type (CRDT) incrementation. This ensures that even if multiple calls are simultaneously made to increment a value, the resulting merge of data changes from different threads and nodes will properly sum all the added values. + +### `getUpdatedTime(): number` + +This returns the last updated time of the resource (timestamp of last commit). This is returned as milliseconds from epoch. + +### `wasLoadedFromSource(): boolean` + +Indicates if the record had been loaded from source. When using caching tables, this indicates that there was a cache miss and the data had to be loaded from the source (or waiting on an inflight request from the source to finish). + +### `getContext(): Context` + +Returns the context for this resource. The context contains information about the current transaction, the user that initiated this action, and other metadata that should be retained through the life of an action. + +#### `Context` + +The `Context` object has the following (potential) properties: + +* `user` - This is the user object, which includes information about the username, role, and authorizations. +* `transaction` - The current transaction If the current method was triggered by an HTTP request, the following properties are available: +* `lastModified` - This value is used to indicate the last modified or updated timestamp of any resource(s) that are accessed and will inform the response's `ETag` (or `Last-Modified`) header. This can be updated by application code if it knows that modification should cause this timestamp to be updated. + +When a resource gets a request through HTTP, the request object is the context, which has the following properties: + +* `url` - The local path/URL of the request (this will not include the protocol or host name, but will start at the path and includes the query string). +* `method` - The method of the HTTP request. +* `headers` - This is an object with the headers that were included in the HTTP request. You can access headers by calling `context.headers.get(headerName)`. +* `responseHeaders` - This is an object with the headers that will be included in the HTTP response. You can set headers by calling `context.responseHeaders.set(headerName, value)`. +* `pathname` - This provides the path part of the URL (no querystring). +* `host` - This provides the host name of the request (from the `Host` header). +* `ip` - This provides the ip address of the client that made the request. +* `body` - This is the request body as a raw NodeJS Readable stream, if there is a request body. +* `data` - If the HTTP request had a request body, this provides a promise to the deserialized data from the request body. (Note that for methods that normally have a request body like `POST` and `PUT`, the resolved deserialized data is passed in as the main argument, but accessing the data from the context provides access to this for requests that do not traditionally have a request body like `DELETE`). + +When a resource is accessed as a data source: + +* `requestContext` - For resources that are acting as a data source for another resource, this provides access to the context of the resource that is making a request for data from the data source resource. Note that it is generally not recommended to rely on this context. The resolved data may be used fulfilled many different requests, and relying on this first request context may not be representative of future requests. Also, source resolution may be triggered by various actions, not just specified endpoints (for example queries, operations, studio, etc.), so make sure you are not relying on specific request context information. + +### `operation(operationObject: Object, authorize?: boolean): Promise` + +This method is available on tables and will execute a Harper operation, using the current table as the target of the operation (the `table` and `database` do not need to be specified). See the [operations API](../../developers/operations-api/) for available operations that can be performed. You can set the second argument to `true` if you want the current user to be checked for authorization for the operation (if `true`, will throw an error if they are not authorized). + +### `allowStaleWhileRevalidate(entry: { version: number, localTime: number, expiresAt: number, value: object }, id): boolean` + +For caching tables, this can be defined to allow stale entries to be returned while revalidation is taking place, rather than waiting for revalidation. The `version` is the timestamp/version from the source, the `localTime` is when the resource was last refreshed, the `expiresAt` is when the resource expired and became stale, and the `value` is the last value (the stale value) of the record/resource. All times are in milliseconds since epoch. Returning `true` will allow the current stale value to be returned while revalidation takes place concurrently. Returning `false` will cause the response to wait for the data source or origin to revalidate or provide the latest value first, and then return the latest value. + +## Resource Static Methods and Properties + +The Resource class also has static methods that mirror the instance methods with an initial argument that is the id of the record to act on. The static methods are generally the preferred and most convenient method for interacting with tables outside of methods that are directly extending a table. Whereas instances methods are bound to a specific record, the static methods allow you to specify any record in the table to act on. + +The `get`, `put`, `delete`, `publish`, `subscribe`, and `connect` methods all have static equivalents. There is also a `static search()` method for specifically handling searching a table with query parameters. By default, the Resource static methods default to creating an instance bound to the record specified by the arguments, and calling the instance methods. Again, generally static methods are the preferred way to interact with resources and call them from application code. These methods are available on all user Resource classes and tables. + +### `get(id: Id, context?: Resource|Context)` + +This will retrieve a resource instance by id. For example, if you want to retrieve comments by id in the retrieval of a blog post you could do: + +```javascript +const { MyTable, Comment } = tables; +... +/ in class: + async get() { + for (let commentId of this.commentIds) { + let comment = await Comment.get(commentId, this); + / now you can do something with the comment record + } + } +``` + +Type definition for `Id`: + +```typescript +Id = string|number|array +``` + +### `get(query: Query, context?: Resource|Context)` + +This can be used to retrieve a resource instance by a query. The query can be used to specify a single/unique record by an `id` property, and can be combined with a `select`: + +```javascript +MyTable.get({ id: 34, select: ['name', 'age'] }); +``` + +This method may also be used to retrieve a collection of records by a query. If the query is not for a specific record id, this will call the `search` method, described above. + +### `put(id: Id, record: object, context?: Resource|Context): Promise` + +This will save the provided record or data to this resource. This will create a new record or fully replace an existing record if one exists with the same `id` (primary key). + +### `put(record: object, context?: Resource|Context): Promise` + +This will save the provided record or data to this resource. This will create a new record or fully replace an existing record if one exists with the same primary key provided in the record. If your table doesn't have a primary key attribute, you will need to use the method with the `id` argument. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `create(record: object, context?: Resource|Context): Promise` + +This will create a new record using the provided record for all fields (except primary key), generating a new primary key for the record. This does _not_ check for an existing record; the record argument should not have a primary key and should use the generated primary key. This will (asynchronously) return the new resource instance. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `post(id: Id, data: object, context?: Resource|Context): Promise` + +### `post(data: object, context?: Resource|Context): Promise` + +This will save the provided data to this resource. By default, this will create a new record (by calling `create`). However, the `post` method is specifically intended to be available for custom behaviors, so extending a class to support custom `post` method behavior is encouraged. + +### `patch(recordUpdate: object, context?: Resource|Context): Promise` + +### `patch(id: Id, recordUpdate: object, context?: Resource|Context): Promise` + +This will save the provided updates to the record. The `recordUpdate` object's properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `recordUpdate` object. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `delete(id: Id, context?: Resource|Context): Promise` + +Deletes this resource's record or data. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `publish(message: object, context?: Resource|Context): Promise` + +### `publish(topic: Id, message: object, context?: Resource|Context): Promise` + +Publishes the given message to the record entry specified by the id in the context. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `subscribe(subscriptionRequest?, context?: Resource|Context): Promise` + +Subscribes to a record/resource. See the description of the `subscriptionRequest` object above for more information on how to use this. + +### `search(query: Query, context?: Resource|Context): AsyncIterable` + +This will perform a query on this table or collection. The query parameter can be used to specify the desired query. + +### `setComputedAttribute(name: string, computeFunction: (record: object) => any)` + +This will define the function to use for a computed attribute. To use this, the attribute must be defined in the schema as a computed attribute. The `computeFunction` will be called with the record as an argument and should return the computed value for the attribute. For example: + +```javascript +MyTable.setComputedAttribute('computedAttribute', (record) => { + return record.attribute1 + record.attribute2; +}); +``` + +For a schema like: + +```graphql +type MyTable @table { + id: ID @primaryKey + attribute1: Int + attribute2: Int + computedAttribute: Int @computed +} +``` + +See the [schema documentation](../../developers/applications/defining-schemas) for more information on computed attributes. + +### `primaryKey` + +This property indicates the name of the primary key attribute for a table. You can get the primary key for a record using this property name. For example: + +```javascript +let record34 = await Table.get(34); +record34[Table.primaryKey] -> 34 +``` + +There are additional methods that are only available on table classes (which are a type of resource). + +### `Table.sourcedFrom(Resource, options)` + +This defines the source for a table. This allows a table to function as a cache for an external resource. When a table is configured to have a source, any request for a record that is not found in the table will be delegated to the source resource to retrieve (via `get`) and the result will be cached/stored in the table. All writes to the table will also first be delegated to the source (if the source defines write functions like `put`, `delete`, etc.). The `options` parameter can include an `expiration` property that will configure the table with a time-to-live expiration window for automatic deletion or invalidation of older entries. The `options` parameter (also) supports: + +* `expiration` - Default expiration time for records in seconds. +* `eviction` - Eviction time for records in seconds. +* `scanInterval` - Time period for scanning the table for records to evict. + +If the source resource implements subscription support, real-time invalidation can be performed to ensure the cache is guaranteed to be fresh (and this can eliminate or reduce the need for time-based expiration of data). + +### `parsePath(path, context, query) {` + +This is called by static methods when they are responding to a URL (from HTTP request, for example), and translates the path to an id. By default, this will parse `.property` suffixes for accessing properties and specifying preferred content type in the URL (and for older tables it will convert a multi-segment path to multipart an array id). However, in some situations you may wish to preserve the path directly as a string. You can override `parsePath` for simpler path to id preservation: + +```javascript + static parsePath(path) { + return path; / return the path as the id + } +``` + +### `isCollection(resource: Resource): boolean` + +This returns a boolean indicating if the provide resource instance represents a collection (can return a query result) or a single record/entity. + +### Context and Transactions + +Whenever you implement an action that is calling other resources, it is recommended that you provide the "context" for the action. This allows a secondary resource to be accessed through the same transaction, preserving atomicity and isolation. + +This also allows timestamps that are accessed during resolution to be used to determine the overall last updated timestamp, which informs the header timestamps (which facilitates accurate client-side caching). The context also maintains user, session, and request metadata information that is communicated so that contextual request information (like headers) can be accessed and any writes are properly attributed to the correct user, or any additional security checks to be applied to the user. + +When using an export resource class, the REST interface will automatically create a context for you with a transaction and request metadata, and you can pass this to other actions by simply including `this` as the source argument (second argument) to the static methods. + +For example, if we had a method to post a comment on a blog, and when this happens we also want to update an array of comment IDs on the blog record, but then add the comment to a separate comment table. We might do this: + +```javascript +const { Comment } = tables; + +export class BlogPost extends tables.BlogPost { + post(comment) { + / add a comment record to the comment table, using this resource as the source for the context + Comment.put(comment, this); + this.comments.push(comment.id); / add the id for the record to our array of comment ids + / Both of these actions will be committed atomically as part of the same transaction + } +} +``` + +Please see the [transaction documentation](./transactions) for more information on how transactions work in Harper. + +### Query + +The `get`/`search` methods accept a Query object that can be used to specify a query for data. The query is an object that has the following properties, which are all optional: + +#### `conditions` + +This is an array of objects that specify the conditions to use the match records (if conditions are omitted or it is an empty array, this is a search for everything in the table). Each condition object can have the following properties: + +* `attribute`: Name of the property/attribute to match on. +* `value`: The value to match. +* `comparator`: This can specify how the value is compared. This defaults to "equals", but can also be "greater\_than", "greater\_than\_equal", "less\_than", "less\_than\_equal", "starts\_with", "contains", "ends\_with", "between", and "not\_equal". +* `conditions`: An array of conditions, which follows the same structure as above. +* `operator`: Specifies the operator to apply to this set of conditions (`and` or `or`. This is optional and defaults to `and`). For example, a complex query might look like: + +For example, a more complex query might look like: + +```javascript +Table.search({ conditions: [ + { attribute: 'price', comparator: 'less_than', value: 100 }, + { operator: 'or', conditions: [ + { attribute: 'rating', comparator: 'greater_than', value: 4 }, + { attribute: 'featured', value: true } + ]} +]}); +``` + +**Chained Attributes/Properties** + +Chained attribute/property references can be used to search on properties within related records that are referenced by [relationship properties](../../developers/applications/defining-schemas) (in addition to the [schema documentation](../../developers/applications/defining-schemas), see the [REST documentation](../../developers/rest) for more of overview of relationships and querying). Chained property references are specified with an array, with each entry in the array being a property name for successive property references. For example, if a relationship property called `brand` has been defined that references a `Brand` table, we could search products by brand name: + +```javascript +Product.search({ conditions: [ + { attribute: ['brand', 'name'], value: 'Harper' } +]}); +``` + +This effectively executes a join, searching on the `Brand` table and joining results with matching records in the `Product` table. Chained array properties can be used in any condition, as well nested/grouped conditions. The chain of properties may also be more than two entries, allowing for multiple relationships to be traversed, effectively joining across multiple tables. An array of chained properties can also be used as the `attribute` in the `sort` property, allowing for sorting by an attribute in a referenced joined tables. + +#### `operator` + +Specifies if the conditions should be applied as an `"and"` (records must match all conditions), or as an "or" (records must match at least one condition). This is optional and defaults to `"and"`. + +#### `limit` + +This specifies the limit of the number of records that should be returned from the query. + +#### `offset` + +This specifies the number of records that should be skipped prior to returning records in the query. This is often used with `limit` to implement "paging" of records. + +#### `select` + +This specifies the specific properties that should be included in each record that is returned. This can be an array, to specify a set of properties that should be included in the returned objects. The array can specify an `select.asArray = true` property and the query results will return a set of arrays of values of the specified properties instead of objects; this can be used to return more compact results. Each of the elements in the array can be a property name, or can be an object with a `name` and `select` array itself that specifies properties that should be returned by the referenced sub-object or related record. For example, a `select` can defined: + +```javascript +Table.search({ select: [ 'name', 'age' ], conditions: ...}) +``` + +Or nested/joined properties from referenced objects can be specified, here we are including the referenced `related` records, and returning the `description` and `id` from each of the related objects: + +```javascript +Table.search({ select: [ 'name', { name: 'related', select: ['description', 'id'] } ], conditions: ...}) +``` + +The select properties can also include certain special properties: + +* `$id` - This will specifically return the primary key of the record (regardless of name, even if there is no defined primary key attribute for the table). +* `$updatedtime` - This will return the last updated timestamp/version of the record (regardless of whether there is an attribute for the updated time). + +Alternately, the select value can be a string value, to specify that the value of the specified property should be returned for each iteration/element in the results. For example to just return an iterator of the `id`s of object: + +```javascript +Table.search({ select: 'id', conditions: ...}) +``` + +#### `sort` + +This defines the sort order, and should be an object that can have the following properties: + +* `attributes`: The attribute to sort on. +* `descending`: If true, will sort in descending order (optional and defaults to `false`). +* `next`: Specifies the next sort order to resolve ties. This is an object that follows the same structure as `sort`. + +#### `explain` + +This will return the conditions re-ordered as Harper will execute them. Harper will estimate the number of the matching records for each condition and apply the narrowest condition applied first. + +#### `enforceExecutionOrder` + +This will force the conditions to be executed in the order they were supplied, rather than using query estimation to re-order them. + +The query results are returned as an `AsyncIterable`. In order to access the elements of the query results, you must use a `for await` loop (it does _not_ return an array, you can not access the results by index). + +For example, we could do a query like: + +```javascript +let { Product } = tables; +let results = Product.search({ + conditions: [ + { attribute: 'rating', value: 4.5, comparator: 'greater_than' }, + { attribute: 'price', value: 100, comparator: 'less_than' }, + ], + offset: 20, + limit: 10, + select: ['id', 'name', 'price', 'rating'], + sort: { attribute: 'price' } +}) +for await (let record of results) { + / iterate through each record in the query results +} +``` + +`AsyncIterable`s can be returned from resource methods, and will be properly serialized in responses. When a query is performed, this will open/reserve a read transaction until the query results are iterated, either through your own `for await` loop or through serialization. Failing to iterate the results this will result in a long-lived read transaction which can degrade performance (including write performance), and may eventually be aborted. + +### Interacting with the Resource Data Model + +When extending or interacting with table resources, when a resource instance is retrieved and instantiated, it will be loaded with the record data from its table. You can interact with this record through the resource instance. For any properties that have been defined in the table's schema, you can direct access or modify properties through standard property syntax. For example, let's say we defined a product schema: + +```graphql +type Product @table { + id: ID @primaryKey + name: String + rating: Int + price: Float +} +``` + +If we have extended this table class with our get() we can interact with any these specified attributes/properties: + +```javascript +export class CustomProduct extends Product { + get(query) { + let name = this.name; / this is the name of the current product + let rating = this.rating; / this is the rating of the current product + this.rating = 3 / we can also modify the rating for the current instance + / (with a get this won't be saved by default, but will be used when serialized) + return super.get(query); + } +} +``` + +Likewise, we can interact with resource instances in the same way when retrieving them through the static methods: + +```javascript +let product1 = await Product.get(1); +let name = product1.name; / this is the name of the product with a primary key of 1 +let rating = product1.rating; / this is the rating of the product with a primary key of 1 +product1.rating = 3 / modify the rating for this instance (this will be saved without a call to update()) + +``` + +If there are additional properties on (some) products that aren't defined in the schema, we can still access them through the resource instance, but since they aren't declared, there won't be getter/setter definition for direct property access, but we can access properties with the `get(propertyName)` method and modify properties with the `set(propertyName, value)` method: + +```javascript +let product1 = await Product.get(1); +let additionalInformation = product1.get('additionalInformation'); / get the additionalInformation property value even though it isn't defined in the schema +product1.set('newProperty', 'some value'); / we can assign any properties we want with set +``` + +And likewise, we can do this in an instance method, although you will probably want to use super.get()/set() so you don't have to write extra logic to avoid recursion: + +```javascript +export class CustomProduct extends Product { + get(query) { + let additionalInformation = super.get('additionalInformation'); / get the additionalInformation property value even though it isn't defined in the schema + super.set('newProperty', 'some value'); / we can assign any properties we want with set + } +} +``` + +Note that you may also need to use `get`/`set` for properties that conflict with existing method names. For example, your schema defines an attribute called `getId` (not recommended), you would need to access that property through `get('getId')` and `set('getId', value)`. + +If you want to save the changes you make, you can call the \`update()\`\` method: + +```javascript +let product1 = await Product.get(1); +product1.rating = 3; +product1.set('newProperty', 'some value'); +product1.update(); / save both of these property changes +``` + +Updates are automatically saved inside modifying methods like put and post: + +```javascript +export class CustomProduct extends Product { + post(data) { + this.name = data.name; + this.set('description', data.description); + / both of these changes will be saved automatically as this transaction commits + } +} +``` + +We can also interact with properties in nested objects and arrays, following the same patterns. For example we could define more complex types on our product: + +```graphql +type Product @table { + id: ID @primaryKey + name: String + rating: Int + price: Float + brand: Brand; + variations: [Variation]; +} +type Brand { + name: String +} +type Variation { + name: String + price: Float +} +``` + +We can interact with these nested properties: + +```javascript +export class CustomProduct extends Product { + post(data) { + let brandName = this.brand.name; + let firstVariationPrice = this.variations[0].price; + let additionalInfoOnBrand = this.brand.get('additionalInfo'); / not defined in schema, but can still try to access property + / make some changes + this.variations.splice(0, 1); / remove first variation + this.variations.push({ name: 'new variation', price: 9.99 }); / add a new variation + this.brand.name = 'new brand name'; + / all these change will be saved + } +} +``` + +If you need to delete a property, you can do with the `delete` method: + +```javascript +let product1 = await Product.get(1); +product1.delete('additionalInformation'); +product1.update(); +``` + +You can also get "plain" object representation of a resource instance by calling `toJSON`, which will return a simple frozen object with all the properties (whether defined in the schema) as direct normal properties (note that this object can _not_ be modified, it is frozen since it is belongs to a cache): + +```javascript +let product1 = await Product.get(1); +let plainObject = product1.toJSON(); +for (let key in plainObject) { + / can iterate through the properties of this record +} +``` + +## Response Object + +The resource methods can return an object that will be serialized and returned as the response to the client. However, these methods can also return a `Response` style object with `status`, `headers`, and optionally `body` or `data` properties. This allows you to have more control over the response, including setting custom headers and status codes. For example, you could return a redirect response like: + +```javascript +return { status: 302, headers: { Location: '/new-location' } }; +``` + +If you include a `body` property, this must be a string or buffer that will be returned as the response body. If you include a `data` property, this must be an object that will be serialized as the response body (using the standard content negotiation). For example, we could return an object with a custom header: + +```javascript +return { status: 200, headers: { 'X-Custom-Header': 'custom value' }, data: { message: 'Hello, World!' } }; +``` + +### Throwing Errors + +You may throw errors (and leave them uncaught) from the response methods and these should be caught and handled by protocol the handler. For REST requests/responses, this will result in an error response. By default the status code will be 500. You can assign a property of `statusCode` to errors to indicate the HTTP status code that should be returned. For example: + +```javascript +if (notAuthorized()) { + let error = new Error('You are not authorized to access this'); + error.statusCode = 403; + throw error; +} +``` diff --git a/site/versioned_docs/version-4.4/technical-details/reference/storage-algorithm.md b/site/versioned_docs/version-4.4/technical-details/reference/storage-algorithm.md new file mode 100644 index 00000000..d936f1a5 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/reference/storage-algorithm.md @@ -0,0 +1,27 @@ +--- +title: Storage Algorithm +--- + +# Storage Algorithm + +The Harper storage algorithm is fundamental to the Harper core functionality, enabling the [Dynamic Schema](./dynamic-schema) and all other user-facing functionality. Harper is built on top of Lightning Memory-Mapped Database (LMDB), a key-value store offering industry leading performance and functionality, which allows for our storage algorithm to store data in tables as rows/objects. This document will provide additional details on how data is stored within Harper. + +## Query Language Agnostic + +The Harper storage algorithm was designed to abstract the data storage from any individual query language. Harper currently supports both SQL and NoSQL on top of this storage algorithm, with the ability to add additional query languages in the future. This means data can be inserted via NoSQL and read via SQL while hitting the same underlying data storage. + +## ACID Compliant + +Utilizing Multi-Version Concurrency Control (MVCC) through LMDB, Harper offers ACID compliance independently on each node. Readers and writers operate independently of each other, meaning readers don’t block writers and writers don’t block readers. Each Harper table has a single writer process, avoiding deadlocks and assuring that writes are executed in the order in which they were received. Harper tables can have multiple reader processes operating at the same time for consistent, high scale reads. + +## Universally Indexed + +All top level attributes are automatically indexed immediately upon ingestion. The [Harper Dynamic Schema](./dynamic-schema) reflexively creates both the attribute and index reflexively as new schema metadata comes in. Indexes are agnostic of datatype, honoring the following order: booleans, numbers ordered naturally, strings ordered lexically. Within the LMDB implementation, table records are grouped together into a single LMDB environment file, where each attribute index is a sub-database (dbi) inside said environment file. An example of the indexing scheme can be seen below. + +## Additional LMDB Benefits + +Harper inherits both functional and performance benefits by implementing LMDB as the underlying key-value store. Data is memory-mapped, which enables quick data access without data duplication. All writers are fully serialized, making writes deadlock-free. LMDB is built to maximize operating system features and functionality, fully exploiting buffer cache and built to run in CPU cache. To learn more about LMDB, visit their documentation. + +## Harper Indexing Example (Single Table) + +![](/img/v4.4/reference/HarperDB-3.0-Storage-Algorithm.png.webp) diff --git a/site/versioned_docs/version-4.4/technical-details/reference/transactions.md b/site/versioned_docs/version-4.4/technical-details/reference/transactions.md new file mode 100644 index 00000000..8c712122 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/reference/transactions.md @@ -0,0 +1,40 @@ +--- +title: Transactions +--- + +# Transactions + +Transactions are an important part of robust handling of data in data-driven applications. Harper provides ACID-compliant support for transactions, allowing for guaranteed atomic, consistent, and isolated data handling within transactions, with durability guarantees on commit. Understanding how transactions are tracked and behave is important for properly leveraging transactional support in Harper. For most operations this is very intuitive, each HTTP request is executed in a transaction, so when multiple actions are executed in a single request, they are normally automatically included in the same transaction. + +Transactions span a database. Once a read snapshot is started, it is an atomic snapshot of all the tables in a database. And writes that span multiple tables in the database will all be committed atomically together (no writes in one table will be visible before writes in another table in the same database). If a transaction is used to access or write data in multiple databases, there will actually be a separate database transaction used for each database, and there is no guarantee of atomicity between separate transactions in separate databases. This can be an important consideration when deciding if and how tables should be organized into different databases. + +Because Harper is designed to be a low-latency distributed database, locks are avoided in data handling. Because of this, transactions do not lock data within the transaction. When a transaction starts, it will provide a read snapshot of the database for any retrievals or queries, which means all reads will be performed on a single version of the database isolated from any other writes that are concurrently taking place. And within a transaction all writes are aggregated and atomically written on commit. These writes are all isolated (from other transactions) until committed, and all become visible atomically. However, because transactions are non-locking, it is possible that writes from other transactions may occur between when reads are performed and when the writes are committed (at which point the last write will win for any records that have been written concurrently). Support for locks in transactions is planned for a future release. + +Transactions can also be explicitly started using the `transaction` global function that is provided in the Harper environment: + +## `transaction(context?, callback: (transaction) => any): Promise` + +This executes the callback in a transaction, providing a context that can be used for any resource methods that are called. This returns a promise for when the transaction has been committed. The callback itself may be asynchronous (return a promise), allowing for asynchronous activity within the transaction. This is useful for starting a transaction when your code is not already running within a transaction (in an HTTP request handler, a transaction will typically already be started). For example, if we wanted to run an action on a timer that periodically loads data, we could ensure that the data is loaded in single transactions like this (note that HDB is multi-threaded and if we do a timer-based job, we very likely want it to only run in one thread): + +```javascript +import { tables } from 'harperdb'; +const { MyTable } = tables; +if (isMainThread) / only on main thread + setInterval(async () => { + let someData = await (await fetch(... some URL ...)).json(); + transaction((txn) => { + for (let item in someData) { + MyTable.put(item, txn); + } + }); + }, 3600000); / every hour +``` + +You can provide your own context object for the transaction to attach to. If you call `transaction` with a context that already has a transaction started, it will simply use the current transaction, execute the callback and immediately return (this can be useful for ensuring that a transaction has started). + +Once the transaction callback is completed (for non-nested transaction calls), the transaction will commit, and if the callback throws an error, the transaction will abort. However, the callback is called with the `transaction` object, which also provides the following methods and property: + +* `commit(): Promise` - Commits the current transaction. The transaction will be committed once the returned promise resolves. +* `abort(): void` - Aborts the current transaction and resets it. +* `resetReadSnapshot(): void` - Resets the read snapshot for the transaction, resetting to the latest data in the database. +* `timestamp: number` - This is the timestamp associated with the current transaction. diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/index.md b/site/versioned_docs/version-4.4/technical-details/release-notes/index.md new file mode 100644 index 00000000..4159c5ab --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/index.md @@ -0,0 +1,231 @@ +--- +title: Release Notes +--- + +# Release Notes + +### Current Release + +[Meet Tucker](./v4-tucker/tucker) Our 4th Release Pup + +[4.4.26 Tucker](./v4-tucker/4.4.26) + +[4.4.25 Tucker](./v4-tucker/4.4.25) + +[4.4.24 Tucker](./v4-tucker/4.4.24) + +[4.4.23 Tucker](./v4-tucker/4.4.23) + +[4.4.22 Tucker](./v4-tucker/4.4.22) + +[4.4.21 Tucker](./v4-tucker/4.4.21) + +[4.4.20 Tucker](./v4-tucker/4.4.20) + +[4.4.19 Tucker](./v4-tucker/4.4.19) + +[4.4.18 Tucker](./v4-tucker/4.4.18) + +[4.4.17 Tucker](./v4-tucker/4.4.17) + +[4.4.16 Tucker](./v4-tucker/4.4.16) + +[4.4.15 Tucker](./v4-tucker/4.4.15) + +[4.4.14 Tucker](./v4-tucker/4.4.14) + +[4.4.13 Tucker](./v4-tucker/4.4.13) + +[4.4.12 Tucker](./v4-tucker/4.4.12) + +[4.4.11 Tucker](./v4-tucker/4.4.11) + +[4.4.10 Tucker](./v4-tucker/4.4.10) + +[4.4.9 Tucker](./v4-tucker/4.4.9) + +[4.4.8 Tucker](./v4-tucker/4.4.8) + +[4.4.7 Tucker](./v4-tucker/4.4.7) + +[4.4.6 Tucker](./v4-tucker/4.4.6) + +[4.4.5 Tucker](./v4-tucker/4.4.5) + +[4.4.4 Tucker](./v4-tucker/4.4.4) + +[4.4.4 Tucker](./v4-tucker/4.4.3) + +[4.4.2 Tucker](./v4-tucker/4.4.2) + +[4.4.1 Tucker](./v4-tucker/4.4.1) + +[4.4.0 Tucker](./v4-tucker/4.4.0) + +[4.3.38 Tucker](./v4-tucker/4.3.38) + +[4.3.37 Tucker](./v4-tucker/4.3.37) + +[4.3.36 Tucker](./v4-tucker/4.3.36) + +[4.3.35 Tucker](./v4-tucker/4.3.35) + +[4.3.34 Tucker](./v4-tucker/4.3.34) + +[4.3.33 Tucker](./v4-tucker/4.3.33) + +[4.3.32 Tucker](./v4-tucker/4.3.32) + +[4.3.31 Tucker](./v4-tucker/4.3.31) + +[4.3.30 Tucker](./v4-tucker/4.3.30) + +[4.3.29 Tucker](./v4-tucker/4.3.29) + +[4.3.28 Tucker](./v4-tucker/4.3.28) + +[4.3.27 Tucker](./v4-tucker/4.3.27) + +[4.3.26 Tucker](./v4-tucker/4.3.26) + +[4.3.25 Tucker](./v4-tucker/4.3.25) + +[4.3.24 Tucker](./v4-tucker/4.3.24) + +[4.3.23 Tucker](./v4-tucker/4.3.23) + +[4.3.22 Tucker](./v4-tucker/4.3.22) + +[4.3.21 Tucker](./v4-tucker/4.3.21) + +[4.3.20 Tucker](./v4-tucker/4.3.20) + +[4.3.19 Tucker](./v4-tucker/4.3.19) + +[4.3.18 Tucker](./v4-tucker/4.3.18) + +[4.3.17 Tucker](./v4-tucker/4.3.17) + +[4.3.16 Tucker](./v4-tucker/4.3.16) + +[4.3.15 Tucker](./v4-tucker/4.3.15) + +[4.3.14 Tucker](./v4-tucker/4.3.14) + +[4.3.13 Tucker](./v4-tucker/4.3.13) + +[4.3.12 Tucker](./v4-tucker/4.3.12) + +[4.3.11 Tucker](./v4-tucker/4.3.11) + +[4.3.10 Tucker](./v4-tucker/4.3.10) + +[4.3.9 Tucker](./v4-tucker/4.3.9) + +[4.3.8 Tucker](./v4-tucker/4.3.8) + +[4.3.7 Tucker](./v4-tucker/4.3.7) + +[4.3.6 Tucker](./v4-tucker/4.3.6) + +[4.3.5 Tucker](./v4-tucker/4.3.5) + +[4.3.4 Tucker](./v4-tucker/4.3.4) + +[4.3.3 Tucker](./v4-tucker/4.3.3) + +[4.3.2 Tucker](./v4-tucker/4.3.2) + +[4.3.1 Tucker](./v4-tucker/4.3.1) + +[4.3.0 Tucker](./v4-tucker/4.3.0) + +[4.2.8 Tucker](./v4-tucker/4.2.8) + +[4.2.7 Tucker](./v4-tucker/4.2.7) + +[4.2.6 Tucker](./v4-tucker/4.2.6) + +[4.2.5 Tucker](./v4-tucker/4.2.5) + +[4.2.4 Tucker](./v4-tucker/4.2.4) + +[4.2.3 Tucker](./v4-tucker/4.2.3) + +[4.2.2 Tucker](./v4-tucker/4.2.2) + +[4.2.1 Tucker](./v4-tucker/4.2.1) + +[4.2.0 Tucker](./v4-tucker/4.2.0) + +[4.1.2 Tucker](./v4-tucker/4.1.2) + +[4.1.1 Tucker](./v4-tucker/4.1.1) + +[4.1.0 Tucker](./v4-tucker/4.1.0) + +[4.0.7 Tucker](./v4-tucker/4.0.7) + +[4.0.6 Tucker](./v4-tucker/4.0.6) + +[4.0.5 Tucker](./v4-tucker/4.0.5) + +[4.0.4 Tucker](./v4-tucker/4.0.4) + +[4.0.3 Tucker](./v4-tucker/4.0.3) + +[4.0.2 Tucker](./v4-tucker/4.0.2) + +[4.0.1 Tucker](./v4-tucker/4.0.1) + +[4.0.0 Tucker](./v4-tucker/4.0.0) + +### Past Releases + +[Meet Monkey](./v3-monkey/) Our 3rd Release Pup + +[3.2.1 Monkey](./v3-monkey/3.2.1) + +[3.2.0 Monkey](./v3-monkey/3.2.0) + +[3.1.5 Monkey](./v3-monkey/3.1.5) + +[3.1.4 Monkey](./v3-monkey/3.1.4) + +[3.1.3 Monkey](./v3-monkey/3.1.3) + +[3.1.2 Monkey](./v3-monkey/3.1.2) + +[3.1.1 Monkey](./v3-monkey/3.1.1) + +[3.1.0 Monkey](./v3-monkey/3.1.0) + +[3.0.0 Monkey](./v3-monkey/3.0.0) + +*** + +[Meet Penny](./v2-penny/) Our 2nd Release Pup + +[2.3.1 Penny](./v2-penny/2.3.1) + +[2.3.0 Penny](./v2-penny/2.3.0) + +[2.2.3 Penny](./v2-penny/2.2.3) + +[2.2.2 Penny](./v2-penny/2.2.2) + +[2.2.0 Penny](./v2-penny/2.2.0) + +[2.1.1 Penny](./v2-penny/2.1.1) + +*** + +[Meet Alby](./v1-alby/) Our 1st Release Pup + +[1.3.1 Alby](./v1-alby/1.3.1) + +[1.3.0 Alby](./v1-alby/1.3.0) + +[1.2.0 Alby](./v1-alby/1.2.0) + +[1.1.0 Alby](./v1-alby/1.1.0) diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/1.1.0.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/1.1.0.md new file mode 100644 index 00000000..b42514a2 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/1.1.0.md @@ -0,0 +1,77 @@ +--- +title: 1.1.0 +sidebar_position: 89899 +--- + +### HarperDB 1.1.0, Alby Release +4/18/2018 + +**Features** + +* Users & Roles: + + * Limit/Assign access to all HarperDB operations + + * Limit/Assign access to schemas, tables & attributes + + * Limit/Assign access to specific SQL operations (`INSERT`, `UPDATE`, `DELETE`, `SELECT`) + +* Enhanced SQL parser + + * Added extensive ANSI SQL Support. + + * Added Array function, which allows for converting relational data into Object/Hierarchical data + + * `Distinct_Array` Function: allows for removing duplicates in the Array function. + + * Enhanced SQL Validation: Improved validation around structure of SQL, validating the schema, etc.. + + * 10x performance improvement on SQL statements. + +* Export Function: can now call a NoSQL/SQL search and have it export to CSV or JSON. + +* Added upgrade function to CLI + +* Added ability to perform bulk update from CSV + +* Created landing page for HarperDB. + +* Added CORS support to HarperDB + +**Fixes** + +* Fixed memory leak in CSV bulk loads + +* Corrected error when attempting to perform a `SQL DELETE` + +* Added further validation to NoSQL `UPDATE` to validate schema & table exist + +* Fixed install issue occurring when part of the install path does not exist, the install would silently fail. + +* Fixed issues with replicated data when one of the replicas is down + +* Removed logging of initial user’s credentials during install + +* Can now use reserved words as aliases in SQL + +* Removed user(s) password in results when calling `list_users` + +* Corrected forwarding of operations to other nodes in a cluster + +* Corrected lag in schema meta-data passing to other nodes in a cluster + +* Drop table & schema now move the table & schema or table to the trash folder under the Database folder for later permanent deletion. + +* Bulk inserts no longer halt the entire operation if n records already exist, instead the return includes the hashes of records that have been skipped. + +* Added ability to accept EULA from command line + +* Corrected `search_by_value` not searching on the correct attribute + +* Added ability to increase the timeout of a request by adding `SERVER_TIMEOUT_MS` to config/settings.js + +* Add error handling resulting from SQL calculations. + +* Standardized error responses as JSON. + +* Corrected internal process generation to not allow more processes than machine has cores. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/1.2.0.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/1.2.0.md new file mode 100644 index 00000000..095bf239 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/1.2.0.md @@ -0,0 +1,42 @@ +--- +title: 1.2.0 +sidebar_position: 89799 +--- + +### HarperDB 1.2.0, Alby Release +7/10/2018 + +**Features** + +* Time to Live: Conserve the resources of your edge device by setting data on devices to live for a specific period of time. +* Geo: HarperDB has implemented turf.js into its SQL parser to enable geo based analytics. +* Jobs: CSV Data loads, Exports & Time to Live now all run as back ground jobs. +* Exports: Perform queries that export into JSON or CSV and save to disk or S3. + + +**Fixes** + +* Fixed issue where CSV data loads incorrectly report number of records loaded. +* Added validation to stop `BETWEEN` operations in SQL. +* Updated logging to not include internal variables in the logs. +* Cleaned up `add_role` response to not include internal variables. +* Removed old and unused dependencies. +* Build out further unit tests and integration tests. +* Fixed https to handle certificates properly. +* Improved stability of clustering & replication. +* Corrected issue where Objects and Arrays were not casting properly in `SQL SELECT` response. +* Fixed issue where Blob text was not being returned from `SQL SELECT`s. +* Fixed error being returned when querying on table with no data, now correctly returns empty array. +* Improved performance in SQL when searching on exact values. +* Fixed error when ./harperdb stop is called. +* Fixed logging issue causing instability in installer. +* Fixed `read_log` operation to accept date time. +* Added permissions checking to `export_to_s3`. +* Added ability to run SQL on `SELECT` without a `FROM`. +* Fixed issue where updating a user’s password was not encrypting properly. +* Fixed `user_guide.html` to point to readme on git repo. +* Created option to have HarperDB run as a foreground process. +* Updated `user_info` to return the correct role for a user. +* Fixed issue where HarperDB would not stop if the database root was deleted. +* Corrected error message on insert if an invalid schema is provided. +* Added permissions checks for user & role operations. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/1.3.0.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/1.3.0.md new file mode 100644 index 00000000..ad196159 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/1.3.0.md @@ -0,0 +1,27 @@ +--- +title: 1.3.0 +sidebar_position: 89699 +--- + +### HarperDB 1.3.0, Alby Release +11/2/2018 + +**Features** + +* Upgrade: Upgrade to newest version via command line. +* SQL Support: Added `IS NULL` for SQL parser. +* Added attribute validation to search operations. + + +**Fixes** + +* Fixed `SELECT` calculations, i.e. `SELECT` 2+2. +* Fixed select OR not returning expected results. +* No longer allowing reserved words for schema and table names. +* Corrected process interruptions from improper SQL statements. +* Improved message handling between spawned processes that replace killed processes. +* Enhanced error handling for updates to tables that do not exist. +* Fixed error handling for NoSQL responses when `get_attributes` is provided with invalid attributes. +* Fixed issue with new columns not being updated properly in update statements. +* Now validating roles, tables and attributes when creating or updating roles. +* Fixed an issue where in some cases `undefined` was being returned after dropping a role diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/1.3.1.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/1.3.1.md new file mode 100644 index 00000000..77e3ffe4 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/1.3.1.md @@ -0,0 +1,29 @@ +--- +title: 1.3.1 +sidebar_position: 89698 +--- + +### HarperDB 1.3.1, Alby Release +2/26/2019 + +**Features** + +* Clustering connection direction appointment +* Foundations for threading/multi processing +* UUID autogen for hash attributes that were not provided +* Added cluster status operation + + +**Bug Fixes and Enhancements** + +* More logging +* Clustering communication enhancements +* Clustering queue ordering by timestamps +* Cluster re connection enhancements +* Number of system core(s) detection +* Node LTS (10.15) compatibility +* Update/Alter users enhancements +* General performance enhancements +* Warning is logged if different versions of harperdb are connected via clustering +* Fixed need to restart after user creation/alteration +* Fixed SQL error that occurred on selecting from an empty table \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/_category_.json b/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/_category_.json new file mode 100644 index 00000000..e33195ec --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Alby (Version 1)", + "position": -1 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/index.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/index.md new file mode 100644 index 00000000..60659623 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v1-alby/index.md @@ -0,0 +1,13 @@ +--- +title: HarperDB Alby (Version 1) +--- + +# HarperDB Alby (Version 1) + +Did you know our release names are dedicated to employee pups? For our first release, Alby was our pup. + +Here is a bit about Alby: + +![picture of black dog](/img/v4.4/dogs/alby.webp) + +_Hi, I am Alby. My mom is Kaylan Stock, Director of Marketing at HarperDB. I am a 9-year-old Great Dane mix who loves sun bathing, going for swims, and wreaking havoc on the local squirrels. My favorite snack is whatever you are eating, and I love a good butt scratch!_ diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.1.1.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.1.1.md new file mode 100644 index 00000000..e1314a5f --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.1.1.md @@ -0,0 +1,27 @@ +--- +title: 2.1.1 +sidebar_position: 79898 +--- + +### HarperDB 2.1.1, Penny Release +05/22/2020 + +**Highlights** + +* CORE-1007 Added the ability to perform `SQL INSERT` & `UPDATE` with function calls & expressions on values. +* CORE-1023 Fixed minor bug in final SQL step incorrectly trying to translate ordinals to alias in `ORDER BY` statement. +* CORE-1020 Fixed bug allowing 'null' and 'undefined' string values to be passed in as valid hash values. +* CORE-1006 Added SQL functionality that enables `JOIN` statements across different schemas. +* CORE-1005 Implemented JSONata library to handle our JSON document search functionality in SQL, creating the `SEARCH_JSON` function. +* CORE-1009 Updated schema validation to allow all printable ASCII characters to be used in schema/table/attribute names, except, forward slashes and backticks. Same rules apply now for hash attribute values. +* CORE-1003 Fixed handling of ORDER BY statements with function aliases. +* CORE-1004 Fixed bug related to `SELECT*` on `JOIN` queries with table columns with the same name. +* CORE-996 Fixed an issue where the `transact_to_cluster` flag is lost for CSV URL loads, fixed an issue where new attributes created in CSV bulk load do not sync to the cluster. +* CORE-994 Added new operation `system_information`. This operation returns info & metrics for the OS, time, memory, cpu, disk, network. +* CORE-993 Added new custom date functions for AlaSQL & UTC updates. +* CORE-991 Changed jobs to spawn a new process which will run the intended job without impacting a main HarperDB process. +* CORE-992 HTTPS enabled by default. +* CORE-990 Updated `describe_table` to add the record count for the table for LMDB data storage. +* CORE-989 Killed the socket cluster processes prior to HarperDB processes to eliminate a false uptime. +* CORE-975 Updated time values set by SQL Date Functions to be in epoch format. +* CORE-974 Added date functions to `SQL SELECT` column alias functionality. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.2.0.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.2.0.md new file mode 100644 index 00000000..267168cd --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.2.0.md @@ -0,0 +1,43 @@ +--- +title: 2.2.0 +sidebar_position: 79799 +--- + +### HarperDB 2.2.0, Penny Release +08/24/2020 + +**Features/Updates** + +* CORE-997 Updated the data format for CSV data loads being sync'd across a cluster to take up less resources +* CORE-1018 Adds SQL functionality for `BETWEEN` statements +* CORE-1032 Updates permissions to allow regular users (i.e. non-super users) to call the `get_job` operation +* CORE-1036 On create/drop table we auto create/drop the related transactions environments for the schema.table +* CORE-1042 Built raw functions to write to a tables transaction log for insert/update/delete operations +* CORE-1057 Implemented write transaction into lmdb create/update/delete functions +* CORE-1048 Adds `SEARCH` wildcard handling for role permissions standards +* CORE-1059 Added config setting to disable transaction logging for an instance +* CORE-1076 Adds permissions filter to describe operations +* CORE-1043 Change clustering catchup to use the new transaction log +* CORE-1052 Removed word "master" from source +* CORE-1061 Added new operation called `delete_transactions_before` this will tail a transaction log for a specific schema / table +* CORE-1040 On HarperDB startup make sure all tables have a transaction environment +* CORE-1055 Added 2 new setting to change the server headersTimeout & keepAliveTimeout from the config file +* CORE-1044 Created new operation `read_transaction_log` which will allow a user to get transactions for a table by `timestamp`, `username`, or `hash_value` +* CORE-1043 Change clustering catchup to use the new transaction log +* CORE-1089 Added new attribute to `system_information` for table/transaction log data size in bytes & transaction log record count +* CORE-1101 Fix to store empty strings rather than considering them null & fix to be able to search on empty strings in SQL/NoSQL. +* CORE-1054 Updates permissions object to remove delete attribute permission and update table attribute permission key to `attribute_permissions` +* CORE-1092 Do not allow the `__createdtime__` to be updated +* CORE-1085 Updates create schema/table & drop schema/table/attribute operations permissions to require super user role and adds integration tests to validate +* CORE-1071 Updates response messages and status codes from `describe_schema` and `describe_table` operations to provide standard language/status code when a schema item is not found +* CORE-1049 Updates response message for SQL update op with no matching rows +* CORE-1096 Added tracking of the origin in the transaction log. This origin object stores the node name, timestamp of the transaction from the originating node & the user. + +**Bug Fixes** + +* CORE-1028 Fixes bug for simple `SQL SELECT` queries not returning aliases and incorrectly returning hash values when not requested in query +* CORE-1037 Fixed an issue where numbers with leading zero i.e. 00123 are converted to numbers rather than being honored as strings. +* CORE-1063 Updates permission error response shape to consolidate issues into individual objects per schema/table combo +* CORE-1098 Fixed an issue where transaction environments were remaining in the global cache after being dropped. +* CORE-1086 Fixed issue where responses from insert/update were incorrect with skipped records. +* CORE-1079 Fixes SQL bugs around invalid schema/table and special characters in `WHERE` clause \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.2.2.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.2.2.md new file mode 100644 index 00000000..827c63db --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.2.2.md @@ -0,0 +1,16 @@ +--- +title: 2.2.2 +sidebar_position: 79797 +--- + +### HarperDB 2.2.2, Penny Release +10/27/2020 + +* CORE-1154 Allowed transaction logging to be disabled even if clustering is enabled. +* CORE-1153 Fixed issue where `delete_files_before` was writing to transaction log. +* CORE-1152 Fixed issue where no more than 4 HarperDB forks would be created. +* CORE-1112 Adds handling for system timestamp attributes in permissions. +* CORE-1131 Adds better handling for checking perms on operations with action value in JSON. +* CORE-1113 Fixes validation bug checking for super user/cluster user permissions and other permissions. +* CORE-1135 Adds validation for valid keys in role API operations. +* CORE-1073 Adds new `import_from_s3` operation to API. diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.2.3.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.2.3.md new file mode 100644 index 00000000..eca953e2 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.2.3.md @@ -0,0 +1,9 @@ +--- +title: 2.2.3 +sidebar_position: 79796 +--- + +### HarperDB 2.2.3, Penny Release +11/16/2020 + +* CORE-1158 Performance improvements to core delete function and configuration of `delete_files_before` to run in batches with a pause into between. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.3.0.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.3.0.md new file mode 100644 index 00000000..2b248490 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.3.0.md @@ -0,0 +1,22 @@ +--- +title: 2.3.0 +sidebar_position: 79699 +--- + +### HarperDB 2.3.0, Penny Release +12/03/2020 + +**Features/Updates** + +* CORE-1191, CORE-1190, CORE-1125, CORE-1157, CORE-1126, CORE-1140, CORE-1134, CORE-1123, CORE-1124, CORE-1122 Added JWT Authentication option (See documentation for more information) +* CORE-1128, CORE-1143, CORE-1140, CORE-1129 Added `upsert` operation +* CORE-1187 Added `get_configuration` operation which allows admins to view their configuration settings. +* CORE-1175 Added new internal LMDB function to copy an environment for use in future features. +* CORE-1166 Updated packages to address security vulnerabilities. + +**Bug Fixes** + +* CORE-1195 Modified `drop_attribute` to drop after data cleanse completes. +* CORE-1149 Fix SQL bug regarding self joins and updates alasql to 0.6.5 release. +* CORE-1168 Fix inconsistent invalid schema/table errors. +* CORE-1162 Fix bug which caused `delete_files_before` to cause tables to grow in size due to an open cursor issue. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.3.1.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.3.1.md new file mode 100644 index 00000000..51291a01 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/2.3.1.md @@ -0,0 +1,12 @@ +--- +title: 2.3.1 +sidebar_position: 79698 +--- + +### HarperDB 2.3.1, Penny Release +1/29/2021 + +**Bug Fixes** + +* CORE-1218 A bug in HarperDB 2.3.0 was identified related to manually calling the `create_attribute` operation. This bug caused secondary indexes to be overwritten by the most recently inserted or updated value for the index, thereby causing a search operation filtered with that index to only return the most recently inserted/updated row. Note, this issue does not affect attributes that are reflexively/automatically created. It only affects attributes created using `create_attribute`. To resolve this issue in 2.3.0 or earlier, drop and recreate your table using reflexive attribute creation. In 2.3.1, drop and recreate your table and use either reflexive attribute creation or `create_attribute`. +* CORE-1219 Increased maximum table attributes from 1000 to 10000 \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/_category_.json b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/_category_.json new file mode 100644 index 00000000..285eecf7 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Penny (Version 2)", + "position": -2 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/index.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/index.md new file mode 100644 index 00000000..30b06241 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v2-penny/index.md @@ -0,0 +1,13 @@ +--- +title: HarperDB Penny (Version 2) +--- + +# HarperDB Penny (Version 2) + +Did you know our release names are dedicated to employee pups? For our second release, Penny was the star. + +Here is a bit about Penny: + +![picture of brindle dog](/img/v4.4/dogs/penny.webp) + +_Hi I am Penny! My dad is Kyle Bernhardy, the CTO of HarperDB. I am a nine-year-old Whippet who lives for running hard and fast while exploring the beautiful terrain of Colorado. My favorite activity is chasing birds along with afternoon snoozes in a sunny spot in my backyard._ diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.0.0.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.0.0.md new file mode 100644 index 00000000..2907ee6c --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.0.0.md @@ -0,0 +1,31 @@ +--- +title: 3.0.0 +sidebar_position: 69999 +--- + +### HarperDB 3.0, Monkey Release +5/18/2021 + +**Features/Updates** + +* CORE-1217, CORE-1226, CORE-1232 Create new `search_by_conditions` operation. +* CORE-1304 Upgrade to Node 12.22.1. +* CORE-1235 Adds new upgrade/install functionality. +* CORE-1206, CORE-1248, CORE-1252 Implement `lmdb-store` library for optimized performance. +* CORE-1062 Added alias operation for `delete_files_before`, named `delete_records_before`. +* CORE-1243 Change `HTTPS_ON` settings value to false by default. +* CORE-1189 Implement fastify web server, resulting in improved performance. +* CORE-1221 Update user API to use role name instead of role id. +* CORE-1225 Updated dependencies to eliminate npm security warnings. +* CORE-1241 Adds 3.0 update directive and refactors/fixes update functionality. + +**Bug Fixes** + +* CORE-1299 Remove all references to the `PROJECT_DIR` setting. This setting is problematic when using node version managers and upgrading the version of node and then installing a new instance of HarperDB. +* CORE-1288 Fix bug with drop table/schema that was causing 'env required' error log. +* CORE-1285 Update warning log when trying to create an attribute that already exists. +* CORE-1254 Added logic to manage data collisions in clustering. +* CORE-1212 Add pre-check to `drop_user` that returns error if user doesn't exist. +* CORE-1114 Update response code and message from `add_user` when user already exists. +* CORE-1111 Update response from `create_attribute` to match the create schema/table response. +* CORE-1205 Fixed bug that prevented schema/table from being dropped if name was a number or had a wildcard value in it. Updated validation for insert, upsert and update. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.0.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.0.md new file mode 100644 index 00000000..148690f6 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.0.md @@ -0,0 +1,23 @@ +--- +title: 3.1.0 +sidebar_position: 69899 +--- + +### HarperDB 3.1.0, Monkey Release +8/24/2021 + +**Features/Updates** + +* CORE-1320, CORE-1321, CORE-1323, CORE-1324 Version 1.0 of HarperDB Custom Functions +* CORE-1275, CORE-1276, CORE-1278, CORE-1279, CORE-1280, CORE-1282, CORE-1283, CORE-1305, CORE-1314 IPC server for communication between HarperDB processes, including HarperDB, HarperDB Clustering, and HarperDB Functions +* CORE-1352, CORE-1355, CORE-1356, CORE-1358 Implement pm2 for HarperDB process management +* CORE-1292, CORE-1308, CORE-1312, CORE-1334, CORE-1338 Updated installation process to start HarperDB immediately on install and to accept all config settings via environment variable or command line arguments +* CORE-1310 Updated licensing functionality +* CORE-1301 Updated validation for performance improvement +* CORE-1359 Add `hdb-response-time` header which returns the HarperDB response time in milliseconds +* CORE-1330, CORE-1309 New config settings: `LOG_TO_FILE`, `LOG_TO_STDSTREAMS`, `IPC_SERVER_PORT`, `RUN_IN_FOREGROUND`, `CUSTOM_FUNCTIONS`, `CUSTOM_FUNCTIONS_PORT`, `CUSTOM_FUNCTIONS_DIRECTORY`, `MAX_CUSTOM_FUNCTION_PROCESSES` + +**Bug Fixes** + +* CORE-1315 Corrected issue in HarperDB restart scenario +* CORE-1370 Update some of the validation error handlers so that they don't log full stack \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.1.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.1.md new file mode 100644 index 00000000..0adbeb21 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.1.md @@ -0,0 +1,18 @@ +--- +title: 3.1.1 +sidebar_position: 69898 +--- + +### HarperDB 3.1.1, Monkey Release +9/23/2021 + +**Features/Updates** + +* CORE-1393 Added utility function to add settings from env/cmd vars to the settings file on every run/restart +* CORE-1395 Create a setting which will allow to enable the local Studio to be served from an instance of HarperDB +* CORE-1397 Update the stock 404 response to not return the request URL +* General updates to optimize Docker container + +**Bug Fixes** + +* CORE-1399 Added fixes for complex SQL alias issues \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.2.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.2.md new file mode 100644 index 00000000..f1c192b6 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.2.md @@ -0,0 +1,15 @@ +--- +title: 3.1.2 +sidebar_position: 69897 +--- + +### HarperDB 3.1.2, Monkey Release +10/21/2021 + +**Features/Updates** + +* Updated the installation ASCII art to reflect the new HarperDB logo + +**Bug Fixes** + +* CORE-1408 Corrects issue where `drop_attribute` was not properly setting the LMDB version number causing tables to behave unexpectedly \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.3.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.3.md new file mode 100644 index 00000000..2d484f8d --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.3.md @@ -0,0 +1,11 @@ +--- +title: 3.1.3 +sidebar_position: 69896 +--- + +### HarperDB 3.1.3, Monkey Release +1/14/2022 + +**Bug Fixes** + +* CORE-1446 Fix for scans on indexes larger than 1 million entries causing queries to never return \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.4.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.4.md new file mode 100644 index 00000000..ae0074fd --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.4.md @@ -0,0 +1,11 @@ +--- +title: 3.1.4 +sidebar_position: 69895 +--- + +### HarperDB 3.1.4, Monkey Release +2/24/2022 + +**Features/Updates** + +* CORE-1460 Added new setting `STORAGE_WRITE_ASYNC`. If this setting is true, LMDB will have faster write performance at the expense of not being crash safe. The default for this setting is false, which results in HarperDB being crash safe. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.5.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.5.md new file mode 100644 index 00000000..eff4b5b0 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.1.5.md @@ -0,0 +1,11 @@ +--- +title: 3.1.5 +sidebar_position: 69894 +--- + +### HarperDB 3.1.5, Monkey Release +3/4/2022 + +**Features/Updates** + +* CORE-1498 Fixed incorrect autocasting of string that start with "0." that tries to convert to number but instead returns NaN. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.2.0.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.2.0.md new file mode 100644 index 00000000..003575d8 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.2.0.md @@ -0,0 +1,13 @@ +--- +title: 3.2.0 +sidebar_position: 69799 +--- + +### HarperDB 3.2.0, Monkey Release +3/25/2022 + +**Features/Updates** + +* CORE-1391 Bug fix related to orphaned HarperDB background processes. +* CORE-1509 Updated node version check, updated Node.js version, updated project dependencies. +* CORE-1518 Remove final call from logger. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.2.1.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.2.1.md new file mode 100644 index 00000000..dc511a70 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.2.1.md @@ -0,0 +1,11 @@ +--- +title: 3.2.1 +sidebar_position: 69798 +--- + +### HarperDB 3.2.1, Monkey Release +6/1/2022 + +**Features/Updates** + +* CORE-1573 Added logic to track the pid of the foreground process if running in foreground. Then on stop, use that pid to kill the process. Logic was also added to kill the pm2 daemon when stop is called. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.3.0.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.3.0.md new file mode 100644 index 00000000..3e3ca784 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/3.3.0.md @@ -0,0 +1,12 @@ +--- +title: 3.3.0 +sidebar_position: 69699 +--- + +### HarperDB 3.3.0 - Monkey + +* CORE-1595 Added new role type `structure_user`, this enables non-superusers to be able to create/drop schema/table/attribute. +* CORE-1501 Improved performance for drop_table. +* CORE-1599 Added two new operations for custom functions `install_node_modules` & `audit_node_modules`. +* CORE-1598 Added `skip_node_modules` flag to `package_custom_function_project` operation. This flag allows for not bundling project dependencies and deploying a smaller project to other nodes. Use this flag in tandem with `install_node_modules`. +* CORE-1707 Binaries are now included for Linux on AMD64, Linux on ARM64, and macOS. GCC, Make, Python are no longer required when installing on these platforms. diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/_category_.json b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/_category_.json new file mode 100644 index 00000000..0103ac36 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Monkey (Version 3)", + "position": -3 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/index.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/index.md new file mode 100644 index 00000000..e9f2b84c --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v3-monkey/index.md @@ -0,0 +1,11 @@ +--- +title: HarperDB Monkey (Version 3) +--- + +# HarperDB Monkey (Version 3) + +Did you know our release names are dedicated to employee pups? For our third release, we have Monkey. + +![picture of tan dog](/img/v4.4/dogs/monkey.webp) + +_Hi, I am Monkey, a.k.a. Monk, a.k.a. Monchichi. My dad is Aron Johnson, the Director of DevOps at HarperDB. I am an eight-year-old Australian Cattle dog mutt whose favorite pastime is hunting and collecting tennis balls from the park next to her home. I love burrowing in the Colorado snow, rolling in the cool grass on warm days, and cheese!_ diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.0.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.0.md new file mode 100644 index 00000000..49770307 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.0.md @@ -0,0 +1,124 @@ +--- +title: 4.0.0 +sidebar_position: 59999 +--- + +### HarperDB 4.0.0, Tucker Release +11/2/2022 + +**Networking & Data Replication (Clustering)** + +The HarperDB clustering internals have been rewritten and the underlying technology for Clustering has been completely replaced with [NATS](https:/nats.io/), an enterprise grade connective technology responsible for addressing, discovery and exchanging of messages that drive the common patterns in distributed systems. +* CORE-1464, CORE-1470, : Remove SocketCluster dependencies and all code related to them. +* CORE-1465, CORE-1485, CORE-1537, CORE-1538, CORE-1558, CORE-1583, CORE_1665, CORE-1710, CORE-1801, CORE-1865 :Add nats-`server` code as dependency, on install of HarperDB download nats-`server` is possible else fallback to building from source code. +* CORE-1593, CORE-1761: Add `nats.js` as project dependency. +* CORE-1466: Build NATS configs on `harperdb run` based on HarperDB YAML configuration. +* CORE-1467, CORE-1508: Launch and manage NATS servers with PM2. +* CORE-1468, CORE-1507: Create a process which reads the work queue stream and processes transactions. +* CORE-1481, CORE-1529, CORE-1698, CORE-1502, CORE-1696: On upgrade to 4.0, update pre-existing clustering configurations, create table transaction streams, create work queue stream, update `hdb_nodes` table, create clustering folder structure, and rebuild self-signed certs. +* CORE-1494, CORE-1521, CORE-1755: Build out internals to interface with NATS. +* CORE-1504: Update existing hooks to save transactions to work with NATS. +* CORE-1514, CORE-1515, CORE-1516, CORE-1527, CORE-1532: Update `add_node`, `update_node`, and `remove_node` operations to no longer need host and port in payload. These operations now manage dynamically sourcing of table level transaction streams between nodes and work queues. +* CORE-1522: Create `NATSReplyService` process which handles the receiving NATS based requests from remote instances and sending back appropriate responses. +* CORE-1471, CORE-1568, CORE-1563, CORE-1534, CORE-1569: Update `cluster_status` operation. +* CORE-1611: Update pre-existing transaction log operations to be audit log operations. +* CORE-1541, CORE-1612, CORE-1613: Create translation log operations which interface with streams. +* CORE-1668: Update NATS serialization / deserialization to use MessagePack. +* CORE-1673: Add `system_info` param to `hdb_nodes` table and update on `add_node` and `cluster_status`. +* CORE-1477, CORE-1493, CORE-1557, CORE-1596, CORE-1577: Both a full HarperDB restart & just clustering restart call the NATS server with a reload directive to maintain full uptime while servers refresh. +* CORE-1474:HarperDB install adds clustering folder structure. +* CORE-1530: Post `drop_table` HarperDB purges the related transaction stream. +* CORE-1567: Set NATS config to always use TLS. +* CORE-1543: Removed the `transact_to_cluster` attribute from the bulk load operations. Now bulk loads always replicate. +* CORE-1533, CORE-1556, CORE-1561, CORE-1562, CORE-1564: New operation `configure_cluster`, this operation enables bulk publishing and subscription of multiple tables to multiple instances of HarperDB. +* CORE-1535: Create work queue stream on install of HarperDB. This stream receives transactions from remote instances of HarperDB which are then ingested in order. +* CORE-1551: Create transaction streams on the remote node if they do not exist when performing `add_node` or `update_node`. +* CORE-1594, CORE-1605, CORE-1749, CORE-1767, CORE-1770: Optimize the work queue stream and its consumer to be more performant and validate exact once delivery. +* CORE-1621, CORE-1692, CORE-1570, CORE-1693: NATS stream names are MD5 hashed to avoid characters that HarperDB allows, but NATS may not. +* CORE-1762: Add a new optional attribute to `add_node` and `update_node` named `opt_start_time`. This attribute sets a starting time to start synchronizing transactions. +* CORE-1785: Optimizations and bug fixes in regards to sourcing data from remote instances on HarperDB. +* CORE-1588: Created new operation `set_cluster_routes` to enable setting routes for instances of HarperDB to mesh together. +* CORE-1589: Created new operation `get_cluster_routes` to allow for retrieval of routes used to connect the instance of HarperDB to the mesh. +* CORE-1590: Created new operation `delete_cluster_routes` to allow for removal of routes used to connect the instance of HarperDB to the mesh. +* CORE-1667: Fix old environment variable `CLUSTERING_PORT` not mapping to new hub server port. +* CORE-1609: Allow `remove_node` to be called when the other node cannot be reached. +* CORE-1815: Add transaction lock to `add_node` and `update_node` to avoid concurrent nats source update bug. +* CORE-1848: Update stream configs if the node name has been changed in the YAML configuration. +* CORE-1873: Update `add_node` and `update_node` so that it auto-creates schema/table on both local and remote node respectively + + +**Data Storage** + +We have made improvements to how we store, index, and retrieve data. +* CORE-1619: Enabled new concurrent flushing technology for improved write performance. +* CORE-1701: Optimize search performance for `search_by_conditions` when executing multiple AND conditions. +* CORE-1652: Encode the values of secondary indices more efficiently for faster access. +* CORE-1670: Store updated timestamp in `lmdb.js`' version property. +* CORE-1651: Enabled multiple value indexing of array values which allows for the ability to search on specific elements in an array more efficiently. +* CORE-1649, CORE-1659: Large text values (larger than 255 bytes) are no longer stored in separate blob index. Now they are segmented and delimited in the same index to increase search performance. +* Complex objects and object arrays are no longer stored in a separate index to preserve storage and increase write throughput. +* CORE-1650, CORE-1724, CORE-1738: Improved internals around interpreting attribute values. +* CORE-1657: Deferred property decoding allows large objects to be stored, but individual attributes can be accessed (like with get_attributes) without incurring the cost of decoding the entire object. +* CORE-1658: Enable in-memory caching of records for even faster access to frequently accessed data. +* CORE-1693: Wrap updates in async transactions to ensure ACID-compliant updates. +* CORE-1653: Upgrade to 4.0 rebuilds tables to reflect changes made to index improvements. +* CORE-1753: Removed old `node-lmdb` dependency. +* CORE-1787: Freeze objects returned from queries. +* CORE-1821: Read the `WRITE_ASYNC` setting which enables LMDB nosync. + +**Logging** + +HarperDB has increased logging specificity by breaking out logs based on components logging. There are specific log files each for HarperDB Core, Custom Functions, Hub Server, Leaf Server, and more. +* CORE-1497: Remove `pino` and `winston` dependencies. +* CORE-1426: All logging is output via `stdout` and `stderr`, our default logging is then picked up by PM2 which handles writing out to file. +* CORE-1431: Improved `read_log` operation validation. +* CORE-1433, CORE-1463: Added log rotation. +* CORE-1553, CORE-1555, CORE-1552, CORE-1554, CORE-1704: Performance gain by only serializing objects and arrays if the log is for the level defined in configuration. +* CORE-1436: Upgrade to 4.0 updates internals for logging changes. +* CORE-1428, CORE-1440, CORE-1442, CORE-1434, CORE-1435, CORE-1439, CORE-1482, CORE-1751, CORE-1752: Bug fixes, performance improvements and improved unit tests. +* CORE-1691: Convert non-PM2 managed log file writes to use Node.js `fs.appendFileSync` function. + +**Configuration** + +HarperDB has updated its configuration from a properties file to YAML. +* CORE-1448, CORE-1449, CORE-1519, CORE-1587: Upgrade automatically converts the pre-existing settings file to YAML. +* CORE-1445, CORE-1534, CORE-1444, CORE-1858: Build out new logic to create, update, and interpret the YAML configuration file. +* Installer has updated prompts to reflect YAML settings. +* CORE-1447: Create an alias for the `configure_cluster` operation as `set_configuration`. +* CORE-1461, CORE-1462, CORE-1483: Unit test improvements. +* CORE-1492: Improvements to get_configuration and set_configuration operations. +* CORE-1503: Modify HarperDB configuration for more granular certificate definition. +* CORE-1591: Update `routes` IP param to `host` and to `leaf` config in `harperdb.conf` +* CORE-1519: Fix issue when switching between old and new versions of HarperDB we are getting the config parameter is undefined error on npm install. + +**Broad NodeJS and Platform Support** +* CORE-1624: HarperDB can now run on multiple versions of NodeJS, from v14 to v19. We primarily test on v18, so that is the preferred version. + +**Windows 10 and 11** +* CORE-1088: HarperDB now runs natively on Windows 10 and 11 without the need to run in a container or installed in WSL. Windows is only intended for evaluation and development purposes, not for production work loads. + +**Extra Changes and Bug Fixes** +* CORE-1520: Refactor installer to remove all waterfall code and update to use Promises. +* CORE-1573: Stop the PM2 daemon and any logging processes when stopping hdb. +* CORE-1586: When HarperDB is running in foreground stop any additional logging processes from being spawned. +* CORE-1626: Update docker file to accommodate new `harperdb.conf` file. +* CORE-1592, CORE-1526, CORE-1660, CORE-1646, CORE-1640, CORE-1689, CORE-1711, CORE-1601, CORE-1726, CORE-1728, CORE-1736, CORE-1735, CORE-1745, CORE-1729, CORE-1748, CORE-1644, CORE-1750, CORE-1757, CORE-1727, CORE-1740, CORE-1730, CORE-1777, CORE-1778, CORE-1782, CORE-1775, CORE-1771, CORE-1774, CORE-1759, CORE-1772, CORE-1861, CORE-1862, CORE-1863, CORE-1870, CORE-1869:Changes for CI/CD pipeline and integration tests. +* CORE-1661: Fixed issue where old boot properties file caused an error when attempting to install 4.0.0. +* CORE-1697, CORE-1814, CORE-1855: Upgrade fastify dependency to new major version 4. +* CORE-1629: Jobs are now running as processes managed by the PM2 daemon. +* CORE-1733: Update LICENSE to reflect our EULA on our site. +* CORE-1606: Enable Custom Functions by default. +* CORE-1714: Include pre-built binaries for most common platforms (darwin-arm64, darwin-x64, linux-arm64, linux-x64, win32-x64). +* CORE-1628: Fix issue where setting license through environment variable not working. +* CORE-1602, CORE-1760, CORE-1838, CORE-1839, CORE-1847, CORE-1773: HarperDB Docker container improvements. +* CORE-1706: Add support for encoding HTTP responses with MessagePack. +* CORE-1709: Improve the way lmdb.js dependencies are installed. +* CORE-1758: Remove/update unnecessary HTTP headers. +* CORE-1756: On `npm install` and `harperdb install` change the node version check from an error to a warning if the installed Node.js version does not match our preferred version. +* CORE-1791: Optimizations to authenticated user caching. +* CORE-1794: Update README to discuss Windows support & Node.js versions +* CORE-1837: Fix issue where Custom Function directory was not being created on install. +* CORE-1742: Add more validation to audit log - check schema/table exists and log is enabled. +* CORE-1768: Fix issue where when running in foreground HarperDB process is not stopping on `harperdb stop`. +* CORE-1864: Fix to semver checks on upgrade. +* CORE-1850: Fix issue where a `cluster_user` type role could not be altered. diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.1.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.1.md new file mode 100644 index 00000000..9e148e63 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.1.md @@ -0,0 +1,12 @@ +--- +title: 4.0.1 +sidebar_position: 59998 +--- + +### HarperDB 4.0.1, Tucker Release +01/20/2023 + +**Bug Fixes** + +* CORE-1992 Local studio was not loading because the path got mangled in the build. +* CORE-2001 Fixed deploy_custom_function_project after node update broke it. diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.2.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.2.md new file mode 100644 index 00000000..b65d1427 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.2.md @@ -0,0 +1,12 @@ +--- +title: 4.0.2 +sidebar_position: 59997 +--- + +### HarperDB 4.0.2, Tucker Release +01/24/2023 + +**Bug Fixes** + +* CORE-2003 Fix bug where if machine had one core thread config would default to zero. +* Update to lmdb 2.7.3 and msgpackr 1.7.0 diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.3.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.3.md new file mode 100644 index 00000000..67aaae56 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.3.md @@ -0,0 +1,11 @@ +--- +title: 4.0.3 +sidebar_position: 59996 +--- + +### HarperDB 4.0.3, Tucker Release +01/26/2023 + +**Bug Fixes** + +* CORE-2007 Add update nodes 4.0.0 launch script to build script to fix clustering upgrade. diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.4.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.4.md new file mode 100644 index 00000000..2a30c9d1 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.4.md @@ -0,0 +1,11 @@ +--- +title: 4.0.4 +sidebar_position: 59995 +--- + +### HarperDB 4.0.4, Tucker Release +01/27/2023 + +**Bug Fixes** + +* CORE-2009 Fixed bug where add node was not being called when upgrading clustering. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.5.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.5.md new file mode 100644 index 00000000..dc66721f --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.5.md @@ -0,0 +1,14 @@ +--- +title: 4.0.5 +sidebar_position: 59994 +--- + +### HarperDB 4.0.5, Tucker Release +02/15/2023 + +**Bug Fixes** + +* CORE-2029 Improved the upgrade process for handling existing user TLS certificates and correctly configuring TLS settings. Added a prompt to upgrade to determine if new certificates should be created or existing certificates should be kept/used. +* Fix the way NATS connections are honored in a local environment. +* Do not define the certificate authority path to NATS if it is not defined in the HarperDB config. + diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.6.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.6.md new file mode 100644 index 00000000..bf97d148 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.6.md @@ -0,0 +1,11 @@ +--- +title: 4.0.6 +sidebar_position: 59993 +--- + +### HarperDB 4.0.6, Tucker Release +03/09/2023 + +**Bug Fixes** + +* Fixed a data serialization error that occurs when a large number of different record structures are persisted in a single table. diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.7.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.7.md new file mode 100644 index 00000000..7d48666a --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.0.7.md @@ -0,0 +1,11 @@ +--- +title: 4.0.7 +sidebar_position: 59992 +--- + +### HarperDB 4.0.7, Tucker Release +03/10/2023 + +**Bug Fixes** + +* Update lmdb.js dependency \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.1.0.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.1.0.md new file mode 100644 index 00000000..eaa825a8 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.1.0.md @@ -0,0 +1,63 @@ +--- +title: 4.1.0 +sidebar_position: 59899 +--- + +# 4.1.0 + +HarperDB 4.1 introduces the ability to use worker threads for concurrently handling HTTP requests. Previously this was handled by processes. This shift provides important benefits in terms of better control of traffic delegation with support for optimized load tracking and session affinity, better debuggability, and reduced memory footprint. + +This means debugging will be much easier for custom functions. If you install/run HarperDB locally, most modern IDEs like WebStorm and VSCode support worker thread debugging, so you can start HarperDB in your IDE, and set breakpoints in your custom functions and debug them. + +The associated routing functionality now includes session affinity support. This can be used to consistently route users to the same thread which can improve caching locality, performance, and fairness. This can be enabled in with the [`http.sessionAffinity` option in your configuration](../../../deployments/configuration#http). + +HarperDB 4.1's NoSQL query handling has been revamped to consistently use iterators, which provide an extremely memory efficient mechanism for directly streaming query results to the network _as_ the query results are computed. This results in faster Time to First Byte (TTFB) (only the first record/value in a query needs to be computed before data can start to be sent), and less memory usage during querying (the entire query result does not need to be stored in memory). These iterators are also available in query results for custom functions and can provide means for custom function code to iteratively access data from the database without loading entire results. This should be a completely transparent upgrade, all HTTP APIs function the same, with the one exception that custom functions need to be aware that they can't access query results by `[index]` (they should use array methods or for-in loops to handle query results). + +4.1 includes configuration options for specifying the location of database storage files. This allows you to specifically locate database directories and files on different volumes for better flexibility and utilization of disks and storage volumes. See the [storage configuration](../../../../deployments/configuration#storage) and [schemas configuration](../../../../deployments/configuration#schemas) for information on how to configure these locations. + +Logging has been revamped and condensed into one `hdb.log` file. See [logginglogging for more information. + +A new operation called `cluster_network` was added, this operation will ping the cluster and return a list of enmeshed nodes. + +Custom Functions will no longer automatically load static file routes, instead the `@fastify/static` plugin will need to be registered with the Custom Function server. See [Host A Static Web UI-static](https:/docs.harperdb.io/docs/v/4.1/custom-functions/host-static). + +Updates to S3 import and export mean that these operations now require the bucket `region` in the request. Also, if referencing a nested object it should be done in the `key` parameter. See examples [here](../../../developers/operations-api/bulk-operations#import-from-s3). + +Due to the AWS SDK v2 reaching end of life support we have updated to v3. This has caused some breaking changes in our operations `import_from_s3` and `export_to_s3`: + +* A new attribute `region` will need to be supplied +* The `bucket` attribute can no longer have trailing slashes. Slashes will now need to be in the `key`. + +Starting HarperDB without any command (just `harperdb`) now runs HarperDB like a standard process, in the foreground. This means you can use standard unix tooling for interacting with the process and is conducive for running HarperDB with systemd or any other process management tool. If you wish to have HarperDB launch itself in separate background process (and immediately terminate the shell process), you can do so by running `harperdb start`. + +Internal Tickets completed: + +* CORE-609 - Ensure that attribute names are always added to global schema as Strings +* CORE-1549 - Remove fastify-static code from Custom Functions server which auto serves content from "static" folder +* CORE-1655 - Iterator based queries +* CORE-1764 - Fix issue where describe\_all operation returns an empty object for non super-users if schema(s) do not yet have table(s) +* CORE-1854 - Switch to using worker threads instead of processes for handling concurrency +* CORE-1877 - Extend the csv\_url\_load operation to allow for additional headers to be passed to the remote server when the csv is being downloaded +* CORE-1893 - Add last updated timestamp to describe operations +* CORE-1896 - Fix issue where Select \* from system.hdb\_info returns wrong HDB version number after Instance Upgrade +* CORE-1904 - Fix issue when executing GEOJSON query in SQL +* CORE-1905 - Add HarperDB YAML configuration setting which defines the storage location of NATS streams +* CORE-1906 - Add HarperDB YAML configuration setting defining the storage location of tables. +* CORE-1655 - Streaming binary format serialization +* CORE-1943 - Add configuration option to set mount point for audit tables +* CORE-1921 - Update NATS transaction lifecycle to handle message deduplication in work queue streams. +* CORE-1963 - Update logging for better readability, reduced duplication, and request context information. +* CORE-1968 - In server\nats\natsIngestService.js remove the js\_msg.working(); line to improve performance. +* CORE-1976 - Fix error when calling describe\_table operation with no schema or table defined in payload. +* CORE-1983 - Fix issue where create\_attribute operation does not validate request for required attributes +* CORE-2015 - Remove PM2 logs that get logged in console when starting HDB +* CORE-2048 - systemd script for 4.1 +* CORE-2052 - Include thread information in system\_information for visibility of threads +* CORE-2061 - Add a better error msg when clustering is enabled without a cluster user set +* CORE-2068 - Create new log rotate logic since pm2 log-rotate no longer used +* CORE-2072 - Update to Node 18.15.0 +* CORE-2090 - Upgrade Testing from v4.0.x and v3.x to v4.1. +* CORE-2091 - Run the performance tests +* CORE-2092 - Allow for automatic patch version updates of certain packages +* CORE-2109 - Add verify option to clustering TLS configuration +* CORE-2111 - Update AWS SDK to v3 diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.1.1.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.1.1.md new file mode 100644 index 00000000..537ef71c --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.1.1.md @@ -0,0 +1,15 @@ +--- +title: 4.1.1 +sidebar_position: 59898 +--- + +# 4.1.1 + +06/16/2023 + +* HarperDB uses improved logic for determining default heap limits and thread counts. When running in a restricted container and on NodeJS 18.15+, HarperDB will use the constrained memory limit to determine heap limits for each thread. In more memory constrained servers with many CPU cores, a reduced default thread count will be used to ensure that excessive memory is not used by many workers. You may still define your own thread count (with `http`/`threads`) in the [configuration](../../../deployments/configuration). +* An option has been added for [disabling the republishing NATS messages](../../../deployments/configuration), which can provide improved replication performance in a fully connected network. +* Improvements to our OpenShift container. +* Dependency security updates. +* **Bug Fixes** +* Fixed a bug in reporting database metrics in the `system_information` operation. diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.1.2.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.1.2.md new file mode 100644 index 00000000..2a62db64 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.1.2.md @@ -0,0 +1,13 @@ +--- +title: 4.1.2 +sidebar_position: 59897 +--- + +### HarperDB 4.1.2, Tucker Release +06/16/2023 + +* HarperDB has updated binary dependencies to support older glibc versions back 2.17. +* A new CLI command was added to get the current status of whether HarperDB is running and the cluster status. This is available with `harperdb status`. +* Improvements to our OpenShift container. +* Dependency security updates. + diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.0.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.0.md new file mode 100644 index 00000000..55bfe220 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.0.md @@ -0,0 +1,99 @@ +--- +title: 4.2.0 +sidebar_position: 59799 +--- + +# 4.2.0 + +#### HarperDB 4.2.0 + +HarperDB 4.2 introduces a new interface to accessing our core database engine with faster access, well-typed idiomatic JavaScript interfaces, ergonomic object mapping, and real-time data subscriptions. 4.2 also had adopted a new component architecture for building extensions to deliver customized external data sources, authentication, file handlers, content types, and more. These architectural upgrades lead to several key new HarperDB capabilities including a new REST interface, advanced caching, real-time messaging and publish/subscribe functionality through MQTT, WebSockets, and Server-Sent Events. + +4.2 also introduces configurable database schemas, using GraphQL Schema syntax. The new component structure is also configuration-driven, providing easy, low-code paths to building applications. [Check out our new getting starting guide](../../../getting-started) to see how easy it is to get started with HarperDB apps. + +### Resource API + +The [Resource API](../../reference/resource) is the new interface for accessing data in HarperDB. It utilizes a uniform interface for accessing data in HarperDB database/tables and is designed to easily be implemented or extended for defining customized application logic for table access or defining custom external data sources. This API has support for connecting resources together for caching and delivering data change and message notifications in real-time. The [Resource API documentation details this interface](../../reference/resource). + +### Component Architecture + +HarperDB's custom functions have evolved towards a [full component architecture](../../../developers/components); our internal functionality is defined as components, and this can be used in a modular way in conjunction with user components. These can all easily be configured and loaded through configuration files, and there is now a [well-defined interface for creating your own components. Components can easily be deployed/installed into HarperDB using [NPM and Github references as well. + +### Configurable Database Schemas + +HarperDB applications or components support [schema definitions using GraphQL schema syntax](../../../../developers/applications/defining-schemas). This makes it easy to define your table and attribute structure and gives you control over which attributes should be indexed and what types they should be. With schemas in configuration, these schemas can be bundled with an application and deployed together with application code. + +### REST Interface + +HarperDB 4.2 introduces a new REST interface for accessing data through best-practice HTTP APIs using intuitive paths and standards-based methods and headers that directly map to our Resource API. This new interface provides fast and easy access to data via queries through GET requests, modifications of data through PUTs, customized actions through POSTs and more. With standards-based header support built-in, this works seamlessly with external caches (including browser caches) for accelerated performance and reduced network transfers. + +### Real-Time + +HarperDB 4.2 now provides standard interfaces for subscribing to data changes and receiving notifications of changes and messages in real-time. Using these new real-time messaging capabilities with structured data provides a powerful integrated platform for both database style data updates and querying along with message delivery. [Real-time messaging](../../../../developers/real-time) of data is available through several protocols: + +#### MQTT + +4.2 now includes MQTT support which is a publish and subscribe messaging protocol, designed for efficiency (designed to be efficient enough for even small Internet of Things devices). This allows clients to connect to HarperDB and publish messages through our data center and subscribe to messages and data for real-time delivery. 4.2 implements support for QoS 0 and 1, along with durable sessions. + +#### WebSockets + +HarperDB now also supports WebSockets. This can be used as a transport for MQTT or as a connection for custom connection handling. + +#### Server-Sent Events + +HarperDB also includes support for Server-Sent Events. This is a very easy-to-use browser API that allows web sites/applications to connect to HarperDB and subscribe to data changes with minimal effort over standard HTTP. + +### Database Structure + +HarperDB databases contain a collection of tables, and these tables are now contained in a single transactionally-consistent database file. This means reads and writes can be performed transactionally and atomically across tables (as long as they are in the same database). Multi-table transactions are replicated as single atomic transactions as well. Audit logs are also maintained in the same database with atomic consistency as well. + +Databases are now entirely encapsulated in a file, which means they can be moved/copied to another database without requiring any separate metadata updates in the system tables. + +### Clone Node + +HarperDB includes new functionality for adding new HarperDB nodes in a cluster. New instances can be configured to clone from a leader node, performing and copying a database snapshot from a leader node, and self-configuring from the leader node as well, to facilitate accelerated deployment of new nodes for fast horizontal scaling to meet demand needs. [See the documentation on Clone Node for more information.](../../../../administration/cloning) + +### Operations API terminology updates + +Any operation that used the `schema` property was updated to make this property optional and alternately support `database` as the property for specifying the database (formerly 'schema'). If both `schema` and `database` are absent, operation defaults to using the `data` database. Term 'primary key' now used in place of 'hash'. noSQL operation `search_by_hash` updated to `search_by_id`. + +Support was added for defining a table with `primary_key` instead of `hash_attribute`. + +## Configuration + +There have been significant changes to `harperdb-config.yaml`, however none of these changes should affect pre-4.2 versions. If you upgrade to 4.2 any existing configuration should be backwards compatible and will not need to be updated. + +`harperdb-config.yaml` has had some configuration values added, removed, renamed and defaults changed. Please refer to [harperdb-config.yaml](../../../deployments/configuration) for the most current configuration parameters. + +* The `http` element has been expanded. + * `compressionThreshold` was added. + * All `customFunction` configuration now lives here, except for the `tls` section. +* `threads` has moved out of the `http` element and now is its own top level element. +* `authentication` section was moved out of the `operationsApi` section and is now its own top level element/section. +* `analytics.aggregatePeriod` was added. +* Default logging level was changed to `warn`. +* Default clustering log level was changed to `info`. +* `clustering.republishMessages` now defaults to `false`. +* `operationsApi.foreground` was removed. To start HarperDB in the foreground, from the CLI run `harperdb`. +* Made `operationsApi` configuration optional. Any config not defined here will default to the `http` section. +* Added a `securePort` parameter to `operationsApi` and `http` used for setting the https port. +* Added a new top level `tls` section. +* Removed `customFunctions.enabled`, `customFunctions.network.https`, `operationsApi.network.https` and `operationsApi.nodeEnv`. +* Added an element called `componentRoot` which replaces `customFunctions.root`. +* Updated custom pathing to use `databases` instead of `schemas`. +* Added `logging.auditAuthEvents.logFailed` and `logging.auditAuthEvents.logSuccessful` for enabling logging of auth events. +* A new `mqtt` section was added. + +### Socket Management + +HarperDB now uses socket sharing to distribute incoming connections to different threads (`SO_REUSEPORT`). This is considered to be the most performant mechanism available for multi-threaded socket handling. This does mean that we have deprecated session-affinity based socket delegation. + +HarperDB now also supports more flexible port configurations: application endpoints and WebSockets run on 9926 by default, but these can be separated, or application endpoints can be configured to run on the same port as the operations API for a single port configuration. + +### Sessions + +HarperDB now supports cookie-based sessions for authentication for web clients. This can be used with the standard authentication mechanisms to login, and then cookies can be used to preserve the authenticated session. This is generally a more secure way of maintaining authentication in browsers, without having to rely on storing credentials. + +### Dev Mode + +HarperDB can now directly run a HarperDB application from any location using `harperdb run /path/to/app` or `harperdb dev /path/to/app`. The latter starts in dev mode, with logging directly to the console, debugging enabled, and auto-restarting with any changes in your application files. Dev mode is recommended for local application and component development. diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.1.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.1.md new file mode 100644 index 00000000..38617ca9 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.1.md @@ -0,0 +1,13 @@ +--- +title: 4.2.1 +sidebar_position: 59798 +--- + +### HarperDB 4.2.1, Tucker Release +11/3/2023 + +* Downgrade NATS 2.10.3 back to 2.10.1 due to regression in connection handling. +* Handle package names with underscores. +* Improved validation of queries and comparators +* Avoid double replication on transactions with multiple commits +* Added file metadata on get_component_file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.2.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.2.md new file mode 100644 index 00000000..15768374 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.2.md @@ -0,0 +1,15 @@ +--- +title: 4.2.2 +sidebar_position: 59797 +--- + +### HarperDB 4.2.2, Tucker Release +11/8/2023 + +* Increase timeouts for NATS connections. +* Fix for database snapshots for backups (and for clone node). +* Fix application of permissions for default tables exposed through REST. +* Log replication failures with record information. +* Fix application of authorization/permissions for MQTT commands. +* Fix copying of local components in clone node. +* Fix calculation of overlapping start time in clone node. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.3.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.3.md new file mode 100644 index 00000000..dab25c3d --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.3.md @@ -0,0 +1,13 @@ +--- +title: 4.2.3 +sidebar_position: 59796 +--- + +### HarperDB 4.2.3, Tucker Release +11/15/2023 + +* When setting setting securePort, disable unsecure port setting on same port +* Fix `harperdb status` when pid file is missing +* Fix/include missing icons/fonts from local studio +* Fix crash that can occur when concurrently accessing records > 16KB +* Apply a lower heap limit to better ensure that memory leaks are quickly caught/mitigated \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.4.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.4.md new file mode 100644 index 00000000..87ee241d --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.4.md @@ -0,0 +1,10 @@ +--- +title: 4.2.4 +sidebar_position: 59795 +--- + +### HarperDB 4.2.4, Tucker Release +11/16/2023 + +* Prevent coercion of strings to numbers in SQL queries (in WHERE clause) +* Address fastify deprecation warning about accessing config \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.5.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.5.md new file mode 100644 index 00000000..1172c4b3 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.5.md @@ -0,0 +1,12 @@ +--- +title: 4.2.5 +sidebar_position: 59794 +--- + +### HarperDB 4.2.5, Tucker Release +11/22/2023 + +* Disable compression on server-sent events to ensure messages are immediately sent (not queued for later deliver) +* Update geoNear function to tolerate null values +* lmdb-js fix to ensure prefetched keys are pinned in memory until retrieved +* Add header to indicate start of a new authenticated session (for studio to identify authenticated sessions) diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.6.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.6.md new file mode 100644 index 00000000..d0a1f177 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.6.md @@ -0,0 +1,10 @@ +--- +title: 4.2.6 +sidebar_position: 59793 +--- + +### HarperDB 4.2.6, Tucker Release +11/29/2023 + +* Update various geo SQL functions to tolerate invalid values +* Properly report component installation/load errors in `get_components` (for studio to load components after an installation failure) \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.7.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.7.md new file mode 100644 index 00000000..78bfcaa7 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.7.md @@ -0,0 +1,11 @@ +--- +title: 4.2.7 +sidebar_position: 59792 +--- + +### HarperDB 4.2.7 +12/6/2023 + +* Add support for cloning over the top of an existing HarperDB instance +* Add health checks for NATS consumer with ability to restart consumer loops for better resiliency +* Revert Fastify autoload module due to a regression that had caused EcmaScript modules for Fastify route modules to fail to load on Windows \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.8.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.8.md new file mode 100644 index 00000000..fbe94b69 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.2.8.md @@ -0,0 +1,14 @@ +--- +title: 4.2.8 +sidebar_position: 59791 +--- + +### HarperDB 4.2.8 +12/19/2023 + +* Added support CLI command line arguments for clone node +* Added support for cloning a node without enabling clustering +* Clear NATS client cache on closed event +* Fix check for attribute permissions so that an empty attribute permissions array is treated as a table level permission definition +* Improve speed of cross-node health checks +* Fix for using `database` in describe operations diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.0.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.0.md new file mode 100644 index 00000000..f6aa2046 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.0.md @@ -0,0 +1,125 @@ +--- +title: 4.3.0 +sidebar_position: 59699 +--- + +# 4.3.0 + +#### HarperDB 4.3.0, Tucker Release + +3/19/2024 + +#### Relationships and Joins + +HarperDB now supports defining relationships between tables. These relationships can be defined as one-to-many, many-to-one, or many-to-many, and use a foreign key to record the relationship between records from different tables. An example of how to use this to define a many-to-one and one-to-many relationships between a product and brand table: + +```graphql +type Product @table { + id: ID @primaryKey + name: String @indexed + # foreign key used to reference a brand + brandId: ID @indexed + # many-to-one relationship to brand + brand: Related @relation(from: "brandId") +} +type Brand @table { + id: ID @primaryKey + name: String @indexed + # one-to-many relationship of brand to products of that brand + products: Product @relation(to: "brandId") +} +``` + +This relationships model can be used in queries and selects, which will automatically "join" the data from the tables. For example, you could search for products by brand name: + +```http +/Product?brand.name=Microsoft +``` + +HarperDB also now supports querying with a sort order. Multiple sort orders can be provided breaking ties. Nested select have also been added, which also utilizes joins when related records are referenced. For example: + +```http +/Product?brand.name=Microsoft&sort(price)&select(name,brand{name,size}) +``` + +See the [schema definition documentation](../../../../developers/applications/defining-schemas) for more information on defining relationships, and the [REST documentation for more information on queries](../../../../developers/rest). + +#### OpenAPI Specification + +A new default endpoint `GET /openapi` was added for describing endpoints configured through a GraphQL schema. + +#### Query Optimizations + +HarperDB has also made numerous improvements to query planning and execution for high performance query results with a broader range of queries. + +#### Indexing Nulls + +New tables and indexes now support indexing null values, enabling queries by null (as well as queries for non-null values). For example, you can query by nulls with the REST interface: + +```http +GET /Table/?attribute=null +``` + +Note, that existing indexes will remain without null value indexing, and can only support indexing/querying by nulls if they are rebuilt (removed and re-added). + +#### CLI Expansion + +The HarperDB now supports an expansive set of commands that execute operations from the operations API. For example, you can list users from the command line: + +```bash +harperdb list_users +``` + +#### BigInt Support + +HarperDB now supports `BigInt` attributes/values with integers (with full precision) up to 1000 bits (or 10^301). These can be used as primary keys or standard attributes, and can be used in queries or other operations. Within JSON documents, you can simply use standard JSON integer numbers with up to 300 digits, and large BigInt integers will be returned as standard JSON numbers. + +#### Local Studio Upgrade + +HarperDB has upgraded the local studio to match the same version that is offered on http:/studio.harperdb.io. The local studio now has the full robust feature set of the online version. + +### MQTT + +#### mTLS Support + +HarperDB now supports mTLS based authentication for HTTP, WebSockets, and MQTT. See the [configuration documentation for more information](../../../deployments/configuration). + +#### Single-Level Wildcards + +HarperDB's MQTT service now supports single-level wildcards (`+`), which facilitates a great range of subscriptions. + +#### Retain handling + +HarperDB's MQTT now supports the retain handling flags for subscriptions that are made using MQTT v5. + +#### CRDT + +HarperDB now supports basic conflict-free data type (CRDT) updates that allow properties to be individually updated and merged when separate properties are updated on different threads or nodes. Individual property CRDT updates are automatically performed when you update individual properties through the resource API. Individual property CRDT updates are used when making `PATCH` requests through the REST API. + +The CRDT functionality also supports explicit incrementation to merge multiple parallel incrementation requests with proper summing. See the [Resource API for more information](../../reference/resource). + +#### Configuration Improvements + +The configuration has improved support for detecting port conflicts, handling paths for fastify routes, and now includes support for specifying a heap limit and TLS ciphers. See the [configuration documentation for more information](../../../deployments/configuration). + +#### Balanced Audit Log Cleanup + +Audit log cleanup has been improved to reduce resource consumption during scheduled cleanups. + +#### `export_*` support for `search_by_conditions` + +The `export_local` and `export_to_s3` operations now support `search_by_conditions` as one of the allowed search operators. + +### Storage Performance Improvements + +Significant improvements were made to handling of free-space to decrease free-space fragmentation and improve performance of reusing free-space for new data. This includes prioritizing reuse of recently released free-space for more better memory/caching utilization. + +#### Compact Database + +In addition to storage improvements, HarperDB now includes functionality for [compacting a database](../../../deployments/harper-cli) (while offline), which can be used to eliminate all free-space to reset any fragmentation. + +#### Compression + +Compression is now enabled by default for all records over 4KB. + +To learn more on how to configure compression visit [configuration](https:/docs.harperdb.io/docs/v/4.3/deployments/configuration). diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.1.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.1.md new file mode 100644 index 00000000..e583d175 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.1.md @@ -0,0 +1,11 @@ +--- +title: 4.3.1 +sidebar_position: 59698 +--- + +### HarperDB 4.3.1 +3/25/2024 + +* Fix Fastify warning about responseTime usage +* Add access to the MQTT topic in the context +* Fix for ensuring local NATS streams are created diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.10.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.10.md new file mode 100644 index 00000000..bd286e90 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.10.md @@ -0,0 +1,12 @@ +--- +title: 4.3.10 +sidebar_position: 59689 +--- + +### HarperDB 4.3.10 +5/5/2024 + +* Provide a `data` property on the request/context with deserialized data from the request body for any request including methods that don't typically have a request body +* Ensure that CRDTs are not double applied after committing a transaction +* Delete MQTT will after publishing even if it fails to publish +* Improve transaction retry logic to use async non-optimistic transactions after multiple retries \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.11.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.11.md new file mode 100644 index 00000000..df2cc2fb --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.11.md @@ -0,0 +1,10 @@ +--- +title: 4.3.11 +sidebar_position: 59688 +--- + +### HarperDB 4.3.11 +5/15/2024 + +* Add support for multiple certificates with SNI-based selection of certificates for HTTPS/TLS +* Fix warning in Node v22 \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.12.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.12.md new file mode 100644 index 00000000..c4344da9 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.12.md @@ -0,0 +1,10 @@ +--- +title: 4.3.12 +sidebar_position: 59687 +--- + +### HarperDB 4.3.12 +5/16/2024 + +* Fix for handling ciphers in multiple certificates +* Allow each certificate config to have multiple hostnames \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.13.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.13.md new file mode 100644 index 00000000..7152f231 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.13.md @@ -0,0 +1,11 @@ +--- +title: 4.3.13 +sidebar_position: 59686 +--- + +### HarperDB 4.3.13 +5/22/2024 + +* Fix for handling HTTPS/TLS with IP address targets (no hostname) where SNI is not available +* Fix for memory leak when a node is down and consumers are trying to reconnect +* Faster cross-thread notification mechanism for transaction events \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.14.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.14.md new file mode 100644 index 00000000..8374b138 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.14.md @@ -0,0 +1,9 @@ +--- +title: 4.3.14 +sidebar_position: 59685 +--- + +### HarperDB 4.3.14 +5/24/2024 + +* Fix application of ciphers to multi-certificate TLS configuration \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.15.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.15.md new file mode 100644 index 00000000..5bbb2304 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.15.md @@ -0,0 +1,10 @@ +--- +title: 4.3.15 +sidebar_position: 59684 +--- + +### HarperDB 4.3.15 +5/29/2024 + +* Add support for wildcards in hostnames for SNI +* Properly apply ciphers settings on multiple TLS configurations \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.16.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.16.md new file mode 100644 index 00000000..b3b198d8 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.16.md @@ -0,0 +1,10 @@ +--- +title: 4.3.16 +sidebar_position: 59683 +--- + +### HarperDB 4.3.16 +6/3/2024 + +* Properly shim legacy TLS configuration with new multi-certificate support +* Show the changed filenames when an application is reloaded \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.17.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.17.md new file mode 100644 index 00000000..6cebb30b --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.17.md @@ -0,0 +1,14 @@ +--- +title: 4.3.17 +sidebar_position: 59682 +--- + +### HarperDB 4.3.17 +6/13/2024 + +* Add MQTT analytics of incoming messages and separate by QoS level +* Ensure that any installed `harperdb` package in components is relinked to running harperdb. +* Upgrade storage to more efficiently avoid storage increases +* Fix to improve database metrics in system_information +* Fix for pathing on Windows with extension modules +* Add ability to define a range of listening threads \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.18.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.18.md new file mode 100644 index 00000000..7de1ca2d --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.18.md @@ -0,0 +1,9 @@ +--- +title: 4.3.18 +sidebar_position: 59681 +--- + +### HarperDB 4.3.18 +6/18/2024 + +* Immediately terminate an MQTT connection when there is a keep-alive timeout. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.19.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.19.md new file mode 100644 index 00000000..ed2782da --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.19.md @@ -0,0 +1,11 @@ +--- +title: 4.3.19 +sidebar_position: 59680 +--- + +### HarperDB 4.3.19 +7/2/2024 + +* Properly return records for the existing value for subscriptions used for retained messages, so they are correctly serialized. +* Ensure that deploy components empty the target directory for a clean installation and expansion of a `package` sub-directory. +* Ensure that we do not double load components that are referenced by symlink from node_modules and in components directory. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.2.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.2.md new file mode 100644 index 00000000..7a967e98 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.2.md @@ -0,0 +1,15 @@ +--- +title: 4.3.2 +sidebar_position: 59697 +--- + +### HarperDB 4.3.2 +3/29/2024 + +* Clone node updates to individually clone missing parts +* Fixes for publishing OpenShift container +* Increase purge stream timeout +* Fixed declaration of analytics schema so queries work before a restart +* Fix for iterating queries when deleted records exist +* LMDB stability upgrade +* Fix for cleanup of last will in MQTT \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.20.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.20.md new file mode 100644 index 00000000..68a18912 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.20.md @@ -0,0 +1,17 @@ +--- +title: 4.3.20 +sidebar_position: 59679 +--- + +### HarperDB 4.3.20 +7/11/2024 + +* The restart_service operation is now executed as a job, making it possible to track the progress of a restart (which is performed as a rolling restart of threads) +* Disable Nagle's algorithm for TCP connections to improve performance +* Append Server-Timing header if a fastify route has already added one +* Avoid symlinking the harperdb directory to itself +* Fix for deleting an empty database +* Upgrade ws and pm2 packages for security vulnerabilities +* Improved TypeScript definitions for Resource and Context. +* The context of a source can set `noCacheStore` to avoid caching the results of a retrieval from source +* Better error reporting of MQTT parsing errors and termination of connections for compliance diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.21.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.21.md new file mode 100644 index 00000000..b8c22de5 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.21.md @@ -0,0 +1,13 @@ +--- +title: 4.3.21 +sidebar_position: 59678 +--- + +### HarperDB 4.3.21 +8/21/2024 + +* Fixed an issue with iterating/serializing query results with a `limit`. +* Fixed an issue that was preventing the caching of structured records in memory. +* Fixed and added several TypeScript exported types including `tables`, `databases`, `Query`, and `Context`. +* Fixed logging warnings about license limits after a license is updated. +* Don't register a certificate as the default certificate for non-SNI connections unless it lists an IP address in the SAN field. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.22.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.22.md new file mode 100644 index 00000000..92f1da33 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.22.md @@ -0,0 +1,14 @@ +--- +title: 4.3.22 +sidebar_position: 59677 +--- + +### HarperDB 4.3.22 +9/6/2024 + +* Adding improved back-pressure handling for large subscriptions and backlogs with durable MQTT sessions +* Allow .extension in URL paths to indicate both preferred encoding and decoding +* Added support for multi-part ids in query parameters +* Limit describe calls by time before using statistical sampling +* Proper cleanup of a transaction when it is aborted due to running out of available read transactions +* Updates to release/builds \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.23.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.23.md new file mode 100644 index 00000000..8dd47c25 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.23.md @@ -0,0 +1,11 @@ +--- +title: 4.3.23 +sidebar_position: 59676 +--- + +### HarperDB 4.3.23 +9/12/2024 + +* Avoid long-running read transactions on subscription catch-ups +* Reverted change to setting default certificate for IP address only +* Better handling of last-will messages on startup \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.24.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.24.md new file mode 100644 index 00000000..ef4933ea --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.24.md @@ -0,0 +1,9 @@ +--- +title: 4.3.24 +sidebar_position: 59675 +--- + +### HarperDB 4.3.24 +9/12/2024 + +* Fix for querying for large strings (over 255 characters) \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.25.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.25.md new file mode 100644 index 00000000..387a2588 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.25.md @@ -0,0 +1,12 @@ +--- +title: 4.3.25 +sidebar_position: 59674 +--- + +### HarperDB 4.3.25 +9/24/2024 + +* Add analytics for replication latency +* Fix iteration issue over asynchronous joined queries +* Local studio fix for loading applications in insecure context (HTTP) +* Local studio fix for loading configuration tab \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.26.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.26.md new file mode 100644 index 00000000..d910120c --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.26.md @@ -0,0 +1,10 @@ +--- +title: 4.3.26 +sidebar_position: 59673 +--- + +### HarperDB 4.3.26 +9/27/2024 + +* Fixed a security issue that allowed users to bypass access controls with the operations API +* Previously expiration handling was limited to tables with a source, but now it can be applied to any table \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.27.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.27.md new file mode 100644 index 00000000..ca8352d3 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.27.md @@ -0,0 +1,13 @@ +--- +title: 4.3.27 +sidebar_position: 59672 +--- + +### HarperDB 4.3.27 +10/2/2024 + +* Fixed handling HTTP upgrade with Connection header that does not use Upgrade as the sole value (for Firefox) +* Added metrics for requests by status code +* Properly remove attributes from the stored metadata when removed from GraphQL schema +* Fixed a regression in clustering retrieval of schema description +* Fix attribute validation/handling to ensure that sequential ids can be assigned with insert/upsert operations \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.28.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.28.md new file mode 100644 index 00000000..fdba3828 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.28.md @@ -0,0 +1,11 @@ +--- +title: 4.3.28 +sidebar_position: 59671 +--- + +### HarperDB 4.3.28 +10/3/2024 + +* Tolerate user with no role when building NATS config +* Change metrics for requests by status code to be prefixed with "response_" +* Log error `cause`, and other properties, when available. diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.29.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.29.md new file mode 100644 index 00000000..c1f533fd --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.29.md @@ -0,0 +1,16 @@ +--- +title: 4.3.29 +sidebar_position: 59670 +--- + +### HarperDB 4.3.29 +10/7/2024 + +* Avoid unnecessary cookie session creation without explicit login +* Added support for caching directives in operations API +* Fixed issue with creating metadata for table with no primary key +* Local studio upgrade: + * Added support for "cache only" mode to view table data without origin resolution + * Added partial support for cookie-based authentication + * Added support for browsing tables with no primary key + * Improved performance for sorting tables diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.3.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.3.md new file mode 100644 index 00000000..52d7ebde --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.3.md @@ -0,0 +1,9 @@ +--- +title: 4.3.3 +sidebar_position: 59696 +--- + +### HarperDB 4.3.3 +4/01/2024 + +* Improve MQTT logging by properly logging auth failures, logging disconnections diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.30.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.30.md new file mode 100644 index 00000000..70c10852 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.30.md @@ -0,0 +1,9 @@ +--- +title: 4.3.30 +sidebar_position: 59669 +--- + +### HarperDB 4.3.30 +10/9/2024 + +* Properly assign transaction timestamp to writes from cache resolutions (ensuring that latencies can be calculated on replicating nodes) diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.31.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.31.md new file mode 100644 index 00000000..097726ac --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.31.md @@ -0,0 +1,11 @@ +--- +title: 4.3.31 +sidebar_position: 59668 +--- + +### HarperDB 4.3.31 +10/10/2024 + +* Reset the restart limit for manual restarts to ensure that NATS process will continue to restart after more than 10 manual restarts +* Only apply caching directives (from headers) to tables/resources that are configured to be caching, sourced from another resource +* Catch/tolerate errors on serializing objects for logging diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.32.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.32.md new file mode 100644 index 00000000..ee5da648 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.32.md @@ -0,0 +1,11 @@ +--- +title: 4.3.32 +sidebar_position: 59667 +--- + +### HarperDB 4.3.32 +10/16/2024 + +* Fix a memory leak when cluster_network closes a hub connection +* Improved MQTT error handling, with less verbose logging of more common errors, and treat a missing subscription as an invalid/missing topic +* Record analytics and server-timing header even when cache resolution fails diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.33.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.33.md new file mode 100644 index 00000000..271373ef --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.33.md @@ -0,0 +1,9 @@ +--- +title: 4.3.33 +sidebar_position: 59666 +--- + +### HarperDB 4.3.33 +10/24/2024 + +* Change the default maximum length for a fastify route parameter from 100 to 1000 characters. diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.34.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.34.md new file mode 100644 index 00000000..1071c273 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.34.md @@ -0,0 +1,9 @@ +--- +title: 4.3.34 +sidebar_position: 59665 +--- + +### HarperDB 4.3.34 +10/24/2024 + +* lmdb-js upgrade diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.35.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.35.md new file mode 100644 index 00000000..1811732b --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.35.md @@ -0,0 +1,10 @@ +--- +title: 4.3.35 +sidebar_position: 59664 +--- + +### HarperDB 4.3.35 +11/12/2024 + +* Upgrades for supporting Node.js V23 +* Fix for handling a change in the schema for nested data structures diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.36.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.36.md new file mode 100644 index 00000000..b2db5bd7 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.36.md @@ -0,0 +1,9 @@ +--- +title: 4.3.36 +sidebar_position: 59663 +--- + +### HarperDB 4.3.36 +11/14/2024 + +* lmdb-js upgrade for better free-space management diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.37.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.37.md new file mode 100644 index 00000000..57e23f5d --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.37.md @@ -0,0 +1,9 @@ +--- +title: 4.3.37 +sidebar_position: 59662 +--- + +### HarperDB 4.3.37 +12/6/2024 + +* lmdb-js upgrade for preventing crashes with shared user buffers diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.38.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.38.md new file mode 100644 index 00000000..640f3620 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.38.md @@ -0,0 +1,9 @@ +--- +title: 4.3.38 +sidebar_position: 59661 +--- + +### HarperDB 4.3.38 +1/10/2025 + +* Fixes for audit log cleanup diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.4.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.4.md new file mode 100644 index 00000000..f50f1bb6 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.4.md @@ -0,0 +1,10 @@ +--- +title: 4.3.4 +sidebar_position: 59695 +--- + +### HarperDB 4.3.4 +4/9/2024 + +* Fixed a buffer overrun issue with decompressing compressed data +* Better keep-alive of transactions with long running queries \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.5.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.5.md new file mode 100644 index 00000000..40d030e5 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.5.md @@ -0,0 +1,9 @@ +--- +title: 4.3.5 +sidebar_position: 59694 +--- + +### HarperDB 4.3.5 +4/10/2024 + +* Fixed a buffer overrun issue with decompressing compressed data \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.6.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.6.md new file mode 100644 index 00000000..92b28286 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.6.md @@ -0,0 +1,13 @@ +--- +title: 4.3.6 +sidebar_position: 59693 +--- + +### HarperDB 4.3.6 +4/12/2024 + +* Fixed parsing of dates from epoch millisecond times in queries +* Fixed CRDT incrementation of different data types +* Adjustments to text/plain content type q-value handling +* Fixed parsing of passwords with a colon +* Added MQTT events for connections, authorization, and disconnections \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.7.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.7.md new file mode 100644 index 00000000..8f45995a --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.7.md @@ -0,0 +1,13 @@ +--- +title: 4.3.7 +sidebar_position: 59692 +--- + +### HarperDB 4.3.7 +4/16/2024 + +* Fixed transaction handling to stay on open on long compaction operations +* Fixed handling of sorting on non-indexed attributes +* Storage stability improvements +* Fixed authentication/authorization of WebSockets connection and use of cookies +* Fixes for clone node operations \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.8.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.8.md new file mode 100644 index 00000000..cd0fe88e --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.8.md @@ -0,0 +1,13 @@ +--- +title: 4.3.8 +sidebar_position: 59691 +--- + +### HarperDB 4.3.8 +4/26/2024 + +* Added support for the MQTT keep-alive feature (disconnecting if no control messages are received within keep-alive window) +* Improved handling of write queue timeouts, with configurability +* Fixed a memory leak that can occur with NATS reconnections after heartbeat misses +* Fixed a bug in clone node with a null port +* Add error events to MQTT events system \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.9.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.9.md new file mode 100644 index 00000000..dca6a92f --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.3.9.md @@ -0,0 +1,9 @@ +--- +title: 4.3.9 +sidebar_position: 59690 +--- + +### HarperDB 4.3.9 +4/30/2024 + +* lmdb-js upgrade \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.0.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.0.md new file mode 100644 index 00000000..f4e0da94 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.0.md @@ -0,0 +1,60 @@ +--- +title: 4.4.0 +sidebar_position: 59599 +--- + +# 4.4.0 + +#### HarperDB 4.4.0 + +10/14/2024 + +### Native Replication + +HarperDB has a completely [new native replication system](../../../developers/replication/) which is faster, more efficient, secure, and reliable than the previous replication system. The new system (codenamed "Plexus") uses direct WebSocket connections between servers with highly optimized encoding and is driven by direct tracking audit/transaction log for efficient and flexible data transfer. This replication has improved resilience with the ability to reach consensus consistency when one node goes down through cross-node catch-up. Network connections can be performed over the existing operations API port or a separate port, for improved configurability. + +The native replication system is much easier to configure, with multiple options for authentication and security, including PKI/mTLS security that is highly robust and easy to use in conjunction with existing PKI certificates. Replication can be configured through explicit subscriptions or for automated replication of all data in a database. With automated replication, gossiping is used to automatically discover and connect to other nodes in the cluster. + +#### Sharding + +The new replication system also includes provisional support for [sharding](../../../developers/replication/sharding). This sharding mechanism paves the way for greater scalability and performance, by allow data to be distributed across multiple nodes. + +#### Replicated Operations + +Certain operations can now be replicated across the cluster, including the deployment and management of components. This allows for a more seamless experience when managing a cluster of HarperDB instances. Restarts can also be "replicated", and if used, will perform a rolling restart of all the nodes in a cluster. + +### Computed Properties + +Computed properties allow applications to define properties that are computed from other properties, allowing for composite properties that are calculated from other data stored in records without requiring actual storage of the computed value. For example, you could have a computed property for a full name based on first and last, or age/duration based on a date. Computed properties are also foundational for custom indexes. See the [schema documentation ](../../../../developers/applications/defining-schemas), [Resource API](../../reference/resource), and our blog post on [computed properties](https:/www.harperdb.io/development/tutorials/how-to-create-custom-indexes-with-computed-properties) for more information. + +### Custom Indexing + +Custom indexes can now be defined using computed properties to allow for unlimited possibilities of indexing, including composite, full-text indexing, vector indexing. Again, see the [schema documentation](../../../../developers/applications/defining-schemas) for more information. + +### Native Graph Support + +HarperDB now includes provisional support for native [GraphQL querying functionality](../../reference/graphql). This allows for querying of graph data using GraphQL syntax. This is provisional and some APIs may be updated in the future. + +### Dynamic Certificate Management + +Certificates are now stored in system tables and can be dynamically managed. Certificates can be added, replaced, and deleted without restarting HarperDB. This includes both standard certificates and certificate authorities, as well as private keys (private keys are not stored in table, they securely stored in a file). + +#### Status Report on Startup + +On startup, HarperDB will now print out an informative status of all running services and ports they are listening on. + +#### Support for Response object + +Resource methods can now return a `Response` object (or an object with `headers` and `status`) to allow for more control over the response. + +### Auto-incrementing Primary Keys + +Primary keys can now be auto-incrementing, allowing for automatic generation of numeric primary keys on insert/creation. Primary keys defined with `ID` or `String` will continue to use GUIDs for auto-assigned primary keys, which occurs on insert or creation if the primary key is not provided. However, for keys that are defined as `Any`, `Int`, or `Long`, the primary key will be assigned using auto-incrementation. This is significantly more efficient than GUIDs since the key only requires 8 bytes of storage instead of 31 bytes, and doesn't require random number generation. + +#### Developer/Production Mode for Configuration + +When using interactive installation (when configuration is not provided through arguments or env vars), HarperDB now provides an option for developer or production mode with a set of default configuration for each mode better suited for developer or production environments. + +**Export by Protocol** + +Exported resources can be configured to be specifically exported by protocol (REST, MQTT, etc.) for more granular control over what is exported where. diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.1.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.1.md new file mode 100644 index 00000000..80fac940 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.1.md @@ -0,0 +1,12 @@ +--- +title: 4.4.1 +sidebar_position: 59598 +--- + +### HarperDB 4.4.1 +10/17/2024 + +* Fix issue where non-RSA keys were not being parsed correctly on startup. +* Fix a memory leak when cluster_network closes a hub connection +* Improved MQTT error handling, with less verbose logging of more common errors, and treat a missing subscription as an invalid/missing topic +* Record analytics and server-timing header even when cache resolution fails \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.10.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.10.md new file mode 100644 index 00000000..328a694a --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.10.md @@ -0,0 +1,9 @@ +--- +title: 4.4.10 +sidebar_position: 59589 +--- + +### HarperDB 4.4.10 +12/17/2024 + +* Fix for deploying packages and detecting node_modules directory \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.11.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.11.md new file mode 100644 index 00000000..6f5d7215 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.11.md @@ -0,0 +1,10 @@ +--- +title: 4.4.11 +sidebar_position: 59588 +--- + +### HarperDB 4.4.11 +12/18/2024 + +* Fix for initial certification creation on upgrade +* Docker build fix \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.12.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.12.md new file mode 100644 index 00000000..82c09692 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.12.md @@ -0,0 +1,10 @@ +--- +title: 4.4.12 +sidebar_position: 59587 +--- + +### HarperDB 4.4.12 +12/19/2024 + +* Move components installed by reference into hdb/components for consistency and compatibility with next.js +* Use npm install --force to ensure modules are installed \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.13.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.13.md new file mode 100644 index 00000000..681fc21d --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.13.md @@ -0,0 +1,15 @@ +--- +title: 4.4.13 +sidebar_position: 59586 +--- + +### HarperDB 4.4.13 +1/2/2025 + +* Fix for not using requestCert if the port doesn't need replication +* Fix for applying timeouts HTTP server for ancient node versions +* Updates for different replication configuration settings, including sharding and replication using stored credentials +* Mitigation crashing due GC'ed shared array buffers +* Fix for error handling with CLI failures +* Updated dependencies +* Fix for allow securePort to be set on authentication \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.14.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.14.md new file mode 100644 index 00000000..48103afe --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.14.md @@ -0,0 +1,12 @@ +--- +title: 4.4.14 +sidebar_position: 59585 +--- + +### HarperDB 4.4.14 +1/3/2025 + +* Fix for starting HTTP server if headersTimeout is omitted in the configuration +* Fix for avoiding ping timeouts for large/long-duration WS messages between nodes +* Don't report errors for component that only uses a directory +* Add flag for disabling WebSocket on REST component \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.15.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.15.md new file mode 100644 index 00000000..ec4ac263 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.15.md @@ -0,0 +1,11 @@ +--- +title: 4.4.15 +sidebar_position: 59584 +--- + +### HarperDB 4.4.15 +1/8/2025 + +* Fix for manage the state of replication sequences for node +* Fix for better concurrency with ongoing replication +* Fix for accessing audit log entries \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.16.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.16.md new file mode 100644 index 00000000..3e90a9b1 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.16.md @@ -0,0 +1,15 @@ +--- +title: 4.4.16 +sidebar_position: 59583 +--- + +### HarperDB 4.4.16 +1/22/2025 + +* Fix for cleaning up old audit entries and associated deletion entries +* Allow CLI operations to be run when cloning is enabled +* Report table size in describe operations +* Fix for cleaning up symlinks when dropping components +* Fix for enumerating components when symlinks are used +* Add an option for using a specific installation command with deploys +* Add an API for registering an HTTP upgrade listener with `server.upgrade` \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.17.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.17.md new file mode 100644 index 00000000..788b9810 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.17.md @@ -0,0 +1,12 @@ +--- +title: 4.4.17 +sidebar_position: 59582 +--- + +### HarperDB 4.4.17 +1/29/2025 + +* Provide statistics on the size of the audit log store +* Fix handling of symlinks to HarperDB package that to avoid NPM's errors in restricted containers +* Add option for rolling/consecutive restarts for deployments +* Fix for enabling root CAs for replication authorization \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.18.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.18.md new file mode 100644 index 00000000..cf341732 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.18.md @@ -0,0 +1,11 @@ +--- +title: 4.4.18 +sidebar_position: 59581 +--- + +### HarperDB 4.4.18 +1/29/2025 + +* Add option for disabling full table copy in replication +* Add option for startTime in route configuration +* Add/fix option to deploy with package from CLI \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.19.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.19.md new file mode 100644 index 00000000..53d42bb8 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.19.md @@ -0,0 +1,12 @@ +--- +title: 4.4.19 +sidebar_position: 59580 +--- + +### HarperDB 4.4.19 +2/4/2025 + +* LMDB upgrade for free-list verification on commit +* Add check to avoid compacting database multiple times with compactOnStart +* Fix handling of denied/absent subscription +* Add support for including symlinked directories in packaging a deployed component \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.2.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.2.md new file mode 100644 index 00000000..6137d48a --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.2.md @@ -0,0 +1,9 @@ +--- +title: 4.4.2 +sidebar_position: 59597 +--- + +### HarperDB 4.4.2 +10/18/2024 + +* Republish of 4.4.1 with Git merge correction. \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.20.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.20.md new file mode 100644 index 00000000..845129ca --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.20.md @@ -0,0 +1,9 @@ +--- +title: 4.4.20 +sidebar_position: 59579 +--- + +### HarperDB 4.4.20 +2/11/2025 + +* LMDB upgrade for improved handling of page boundaries with free-space lists diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.21.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.21.md new file mode 100644 index 00000000..74d653bc --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.21.md @@ -0,0 +1,11 @@ +--- +title: 4.4.21 +sidebar_position: 59578 +--- + +### HarperDB 4.4.21 +2/25/2025 + +* Fix for saving audit log entries for large keys (> 1KB) +* Security fix for handling missing passwords +* Skip bin links for NPM installation to avoid access issues \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.22.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.22.md new file mode 100644 index 00000000..85ae1895 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.22.md @@ -0,0 +1,9 @@ +--- +title: 4.4.22 +sidebar_position: 59577 +--- + +### HarperDB 4.4.22 +3/5/2025 + +* Add new http configuration option `corsAccessControlAllowHeaders` \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.23.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.23.md new file mode 100644 index 00000000..42e37e0c --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.23.md @@ -0,0 +1,10 @@ +--- +title: 4.4.23 +sidebar_position: 59576 +--- + +### HarperDB 4.4.23 +3/7/2025 + +* Fix for subscriptions to children of segmented id +* Fix for better error reporting on NPM failures \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.24.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.24.md new file mode 100644 index 00000000..dbdf7972 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.24.md @@ -0,0 +1,10 @@ +--- +title: 4.4.24 +sidebar_position: 59575 +--- + +### HarperDB 4.4.24 +3/10/2025 + +* Use process.exit(0) to restart when enabled by env var +* Reset the cwd on thread restart \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.25.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.25.md new file mode 100644 index 00000000..eaf15be1 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.25.md @@ -0,0 +1,9 @@ +--- +title: 4.4.25 +sidebar_position: 59574 +--- + +### HarperDB 4.4.25 +4/3/2025 + +* Fix for immediately reloading updated certificates and private key files to ensure that certificates properly match the private key diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.26.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.26.md new file mode 100644 index 00000000..022c8ef1 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.26.md @@ -0,0 +1,11 @@ +--- +title: 4.4.26 +sidebar_position: 59573 +--- + +### Harper 4.4.26 +5/12/2025 + +* Fix replication of messages with Plexus +* Rebrand HarperDB as Harper +* Add support for enabling heapSnapshotNearLimit via `--heapsnapshot-near-heap-limit` param / `THREADS_HEAPSNAPSHOTNEARLIMIT` env var diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.3.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.3.md new file mode 100644 index 00000000..e91428c4 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.3.md @@ -0,0 +1,13 @@ +--- +title: 4.4.3 +sidebar_position: 59596 +--- + +### HarperDB 4.4.3 +10/25/2024 + +* Fix for notification of records through classes that override get for multi-tier caching +* Fix for CLI operations +* Support for longer route parameters in Fastify routes +* Fix for accessing `harperdb` package/module from user threads +* Improvements to clone node for cloning without credentials \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.4.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.4.md new file mode 100644 index 00000000..8e6a0c48 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.4.md @@ -0,0 +1,11 @@ +--- +title: 4.4.4 +sidebar_position: 59595 +--- + +### HarperDB 4.4.4 +11/4/2024 + +* Re-introduce declarative roles and permissions +* Fix for OpenAPI endpoint +* Fix for exports of `harperdb` package/module \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.5.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.5.md new file mode 100644 index 00000000..f075ea02 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.5.md @@ -0,0 +1,15 @@ +--- +title: 4.4.5 +sidebar_position: 59594 +--- + +### HarperDB 4.4.5 +11/15/2024 + +* Fix for DOS vulnerability in large headers with cache-control and replication headers +* Fix for handling a change in the schema type for sub-fields in a nested object +* Add support for content type handlers to return iterators +* Fix for session management with custom authentication handler +* Updates for Node.js V23 compatibility +* Fix for sorting on nested properties +* Fix for querying on not_equal to a null with object values \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.6.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.6.md new file mode 100644 index 00000000..2d4b17b6 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.6.md @@ -0,0 +1,12 @@ +--- +title: 4.4.6 +sidebar_position: 59593 +--- + +### HarperDB 4.4.6 +11/25/2024 + +* Fix queries with only sorting applied +* Fix for handling invalidation events propagating through sources +* Expanded CLI support for deploying packages +* Support for deploying large packages \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.7.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.7.md new file mode 100644 index 00000000..e1723090 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.7.md @@ -0,0 +1,10 @@ +--- +title: 4.4.7 +sidebar_position: 59592 +--- + +### HarperDB 4.4.7 +11/27/2024 + +* Allow for package to deploy own modules +* Fix for preventing double sourcing of resources \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.8.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.8.md new file mode 100644 index 00000000..3bb02964 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.8.md @@ -0,0 +1,9 @@ +--- +title: 4.4.8 +sidebar_position: 59591 +--- + +### HarperDB 4.4.8 +12/2/2024 + +* Add multiple node versions of published docker containers \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.9.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.9.md new file mode 100644 index 00000000..fa576ba9 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/4.4.9.md @@ -0,0 +1,13 @@ +--- +title: 4.4.9 +sidebar_position: 59590 +--- + +### HarperDB 4.4.9 +12/12/2024 + +* Change enableRootCAs to default to true +* Fixes for install and clone commands +* Add rejectUnauthorized to the CLI options +* Fixes for cloning +* Install modules in own component when deploying package by payload \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/_category_.json b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/_category_.json new file mode 100644 index 00000000..9a7bca50 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Tucker (Version 4)", + "position": -4 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/index.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/index.md new file mode 100644 index 00000000..67dde9b5 --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/index.md @@ -0,0 +1,39 @@ +--- +title: Harper Tucker (Version 4) +--- + +# Harper Tucker (Version 4) + +HarperDB version 4 ([Tucker release](./tucker)) represents major step forward in database technology. This release line has ground-breaking architectural advancements including: + +## [4.4](./4.4.0) + +* Native replication (codename "Plexus") which is faster, more efficient, secure, and reliable than the previous replication system and provides provisional sharding capabilities with a foundation for the future +* Computed properties that allow applications to define properties that are computed from other properties, allowing for composite properties that are calculated from other data stored in records without requiring actual storage of the computed value +* Custom indexing including composite, full-text indexing, and vector indexing + +## [4.3](./4.3.0) + +* Relationships, joins, and broad new querying capabilities for complex and nested conditions, sorting, joining, and selecting with significant query optimizations +* More advanced transaction support for CRDTs and storage of large integers (with BigInt) +* Better management with new upgraded local studio and new CLI features + +## [4.2](./4.2.0) + +* New component architecture and Resource API for advanced, robust custom database application development +* Real-time capabilites through MQTT, WebSockets, and Server-Sent Events +* REST interface for intuitive, fast, and standards-compliant HTTP interaction +* Native caching capabilities for high-performance cache scenarios +* Clone node functionality + +## [4.1](./4.1.0) + +* New streaming iterators mechanism that allows query results to be delivered to clients _while_ querying results are being processed, for incredibly fast time-to-first-byte and concurrent processing/delivery +* New thread-based concurrency model for more efficient resource usage + +## [4.0](./4.0.0) + +* New clustering technology that delivers robust, resilient and high-performance replication +* Major storage improvements with highly-efficient adaptive-structure modified MessagePack format, with on-demand deserialization capabilities + +Did you know our release names are dedicated to employee pups? For our fourth release, [meet Tucker!](./tucker) diff --git a/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/tucker.md b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/tucker.md new file mode 100644 index 00000000..db4b3e0f --- /dev/null +++ b/site/versioned_docs/version-4.4/technical-details/release-notes/v4-tucker/tucker.md @@ -0,0 +1,11 @@ +--- +title: Harper Tucker (Version 4) +--- + +# Harper Tucker (Version 4) + +Did you know our release names are dedicated to employee pups? For our fourth release, we have Tucker. + +![picture of grey and white dog](/img/v4.4/dogs/tucker.png) + +_G’day, I’m Tucker. My dad is David Cockerill, a software engineer here at Harper. I am a 3-year-old Labrador Husky mix. I love to protect my dad from all the squirrels and rabbits we have in our yard. I have very ticklish feet and love belly rubs!_ diff --git a/site/versioned_docs/version-4.5/administration/_category_.json b/site/versioned_docs/version-4.5/administration/_category_.json new file mode 100644 index 00000000..828e0998 --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Administration", + "position": 2, + "link": { + "type": "generated-index", + "title": "Administration Documentation", + "description": "Guides for managing and administering HarperDB instances", + "keywords": [ + "administration" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/administration/administration.md b/site/versioned_docs/version-4.5/administration/administration.md new file mode 100644 index 00000000..2952d8c1 --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/administration.md @@ -0,0 +1,31 @@ +--- +title: Best Practices and Recommendations +--- + +# Best Practices and Recommendations + +Harper is designed for minimal administrative effort, and with managed services these are handled for you. But there are important things to consider for managing your own Harper servers. + +### Data Protection and (Backup and) Recovery + +As a distributed database, data protection and recovery can benefit from different data protection strategies than a traditional single-server database. But multiple aspects of data protection and recovery should be considered: + +* Availability: As a distributed database Harper is intrinsically built for high-availability and a cluster will continue to run even with complete server(s) failure. This is the first and primary defense for protecting against any downtime or data loss. Harper provides fast horizontal scaling functionality with node cloning, which facilitates ease of establishing high availability clusters. +* [Audit log](./logging/audit-logging): Harper defaults to tracking data changes so malicious data changes can be found, attributed, and reverted. This provides security-level defense against data loss, allowing for fine-grained isolation and reversion of individual data without the large-scale reversion/loss of data associated with point-in-time recovery approaches. +* Snapshots: When used as a source-of-truth database for crucial data, we recommend using snapshot tools to regularly snapshot databases as a final backup/defense against data loss (this should only be used as a last resort in recovery). Harper has a [`get_backup`](../developers/operations-api/databases-and-tables#get-backup) operation, which provides direct support for making and retrieving database snapshots. An HTTP request can be used to get a snapshot. Alternatively, volume snapshot tools can be used to snapshot data at the OS/VM level. Harper can also provide scripts for replaying transaction logs from snapshots to facilitate point-in-time recovery when necessary (often customization may be preferred in certain recovery situations to minimize data loss). + +### Horizontal Scaling with Node Cloning + +Harper provides rapid horizontal scaling capabilities through [node cloning functionality described here](./cloning). + +### Monitoring + +Harper provides robust capabilities for analytics and observability to facilitate effective and informative monitoring: +* Analytics provides statistics on usage, request counts, load, memory usage with historical tracking. The analytics data can be [accessed through querying](../technical-details/reference/analytics). +* A large variety of real-time statistics about load, system information, database metrics, thread usage can be retrieved through the [`system_information` API](../developers/operations-api/utilities). +* Information about the current cluster configuration and status can be found in the [cluster APIs](../developers/operations-api/clustering). +* Analytics and system information can easily be exported to Prometheus with our [Prometheus exporter component](https:/github.com/HarperDB-Add-Ons/prometheus_exporter), making it easy visualize and monitor Harper with Graphana. + +### Replication Transaction Logging + +Harper utilizes NATS for replication, which maintains a transaction log. See the [transaction log documentation for information on how to query this log](./logging/transaction-logging). diff --git a/site/versioned_docs/version-4.5/administration/cloning.md b/site/versioned_docs/version-4.5/administration/cloning.md new file mode 100644 index 00000000..1550814f --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/cloning.md @@ -0,0 +1,150 @@ +--- +title: Clone Node +--- + +# Clone Node + +Clone node is a configurable node script that when pointed to another instance of Harper will create a clone of that +instance's config, databases and setup full replication. If it is run in a location where there is no existing Harper install, +it will, along with cloning, install Harper. If it is run in a location where there is another Harper instance, it will +only clone config, databases and replication that do not already exist. + +Clone node is triggered when Harper is installed or started with certain environment or command line (CLI) variables set (see below). + +**Leader node** - the instance of Harper you are cloning.\ +**Clone node** - the new node which will be a clone of the leader node. + +To start clone run `harperdb` in the CLI with either of the following variables set: + +#### Environment variables + +* `HDB_LEADER_URL` - The URL of the leader node's operation API (usually port 9925). +* `HDB_LEADER_USERNAME` - The leader node admin username. +* `HDB_LEADER_PASSWORD` - The leader node admin password. +* `REPLICATION_HOSTNAME` - _(optional)_ The clones replication hostname. This value will be added to `replication.hostname` on the clone node. If this value is not set, replication will not be set up between the leader and clone. + +For example: +``` +HDB_LEADER_URL=https:/node-1.my-domain.com:9925 REPLICATION_HOSTNAME=node-1.my-domain.com HDB_LEADER_USERNAME=... HDB_LEADER_PASSWORD=... harperdb +``` + +#### Command line variables + +* `--HDB_LEADER_URL` - The URL of the leader node's operation API (usually port 9925). +* `--HDB_LEADER_USERNAME` - The leader node admin username. +* `--HDB_LEADER_PASSWORD` - The leader node admin password. +* `--REPLICATION_HOSTNAME` - _(optional)_ The clones clustering host. This value will be added to `replication.hostname` on the clone node. If this value is not set, replication will not be set up between the leader and clone. + +For example: +``` +harperdb --HDB_LEADER_URL https:/node-1.my-domain.com:9925 --REPLICATION_HOSTNAME node-1.my-domain.com --HDB_LEADER_USERNAME ... --HDB_LEADER_PASSWORD ... +``` + +Each time clone is run it will set a value `cloned: true` in `harperdb-config.yaml`. This value will prevent clone from +running again. If you want to run clone again set this value to `false`. If Harper is started with the clone variables +still present and `cloned` is true, Harper will just start as normal. + +Clone node does not require any additional configuration apart from the variables referenced above. +However, if you wish to set any configuration during clone this can be done by passing the config as environment/CLI +variables or cloning overtop of an existing `harperdb-config.yaml` file. + +More can be found in the Harper config documentation [here](../deployments/configuration). + +### Excluding database and components + +To set any specific (optional) clone config, including the exclusion of any database and/or replication, there is a file +called `clone-node-config.yaml` that can be used. + +The file must be located in the `ROOTPATH` directory of your clone (the `hdb` directory where you clone will be installed. +If the directory does not exist, create one and add the file to it). + +The config available in `clone-node-config.yaml` is: + +```yaml +databaseConfig: + excludeDatabases: + - database: null + excludeTables: + - database: null + table: null +componentConfig: + exclude: + - name: null +``` + +_Note: only include the configuration that you are using. If no clone config file is provided nothing will be excluded, +unless it already exists on the clone._ + +`databaseConfig` - Set any databases or tables that you wish to exclude from cloning. + +`componentConfig` - Set any components that you do not want cloned. Clone node will not clone the component code, +it will only clone the component reference that exists in the leader harperdb-config file. + +### Cloning configuration + +Clone node will not clone any configuration that is classed as unique to the leader node. This includes `replication.hostname`, `replication.url`,`clustering.nodeName`, +`rootPath` and any other path related values, for example `storage.path`, `logging.root`, `componentsRoot`, +any authentication certificate/key paths. + +### Cloning system database + +Harper uses a database called `system` to store operational information. Clone node will only clone the user and role +tables from this database. It will also set up replication on this table, which means that any existing and future user and roles +that are added will be replicated throughout the cluster. + +Cloning the user and role tables means that once clone node is complete, the clone will share the same login credentials with +the leader. + +### Replication + +If clone is run with the `REPLICATION_HOSTNAME` variable set, a fully replicating clone will be created. + +If any databases are excluded from the clone, replication will not be set up on these databases. + +### JWT Keys + +If cloning with replication, the leader's JWT private and public keys will be cloned. To disable this, include `CLONE_KEYS=false` in your clone variables. + +### Cloning overtop of an existing Harper instance + +Clone node will not overwrite any existing config, database or replication. It will write/clone any config database or replication +that does not exist on the node it is running on. + +An example of how this can be useful is if you want to set Harper config before the clone is created. To do this you +would create a harperdb-config.yaml file in your local `hdb` root directory with the config you wish to set. Then +when clone is run it will append the missing config to the file and install Harper with the desired config. + +Another useful example could be retroactively adding another database to an existing instance. Running clone on +an existing instance could create a full clone of another database and set up replication between the database on the +leader and the clone. + +### Cloning steps + +Clone node will execute the following steps when ran: +1. Look for an existing Harper install. It does this by using the default (or user provided) `ROOTPATH`. +1. If an existing instance is found it will check for a `harperdb-config.yaml` file and search for the `cloned` value. If the value exists and is `true` clone will skip the clone logic and start Harper. +1. Clone harperdb-config.yaml values that don't already exist (excluding values unique to the leader node). +1. Fully clone any databases that don't already exist. +1. If classed as a "fresh clone", install Harper. An instance is classed as a fresh clone if there is no system database. +1. If `REPLICATION_HOSTNAME` is set, set up replication between the leader and clone. +1. Clone is complete, start Harper. + +### Cloning with Docker + +To run clone inside a container add the environment variables to your run command. + +For example: + +``` +docker run -d \ + -v :/home/harperdb/hdb \ + -e HDB_LEADER_PASSWORD=password \ + -e HDB_LEADER_USERNAME=admin \ + -e HDB_LEADER_URL=https:/1.123.45.6:9925 \ + -e REPLICATION_HOSTNAME=1.123.45.6 \ + -p 9925:9925 \ + -p 9926:9926 \ + harperdb/harperdb +``` + +Clone will only run once, when you first start the container. If the container restarts the environment variables will be ignored. diff --git a/site/versioned_docs/version-4.5/administration/compact.md b/site/versioned_docs/version-4.5/administration/compact.md new file mode 100644 index 00000000..1a71db14 --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/compact.md @@ -0,0 +1,60 @@ +--- +title: Compact +--- + +# Compact + +Database files can grow quickly as you use them, sometimes impeding performance. Harper has multiple compact features that can be used to reduce database file size and potentially improve performance. The compact process does not compress your data, it instead makes your database file smaller by eliminating free-space and fragmentation. + +There are two options that Harper offers for compacting a Database. + +_Note: Some of the storage configuration (such as compression) cannot be updated on existing databases, this is where the following options are useful. They will create a new compressed copy of the database with any updated configuration._ + +More information on the storage configuration options can be [found here](../deployments/configuration#storage) + +### Copy compaction + +It is recommended that, to prevent any record loss, Harper is not running when performing this operation. + +This will copy a Harper database with compaction. If you wish to use this new database in place of the original, you will need to move/rename it to the path of the original database. + +This command should be run in the [CLI](../deployments/harper-cli) + +```bash +harperdb copy-db +``` + +For example, to copy the default database: + +```bash +harperdb copy-db data /home/user/hdb/database/copy.mdb +``` + +### Compact on start + +Compact on start is a more automated option that will compact **all** databases when Harper is started. Harper will not start until compact is complete. Under the hood it loops through all non-system databases, creates a backup of each one and calls copy-db. After the copy/compaction is complete it will move the new database to where the original one is located and remove any backups. + +Compact on start is initiated by config in `harperdb-config.yaml` + +_Note: Compact on start will switch `compactOnStart` to `false` after it has run_ + +`compactOnStart` - _Type_: boolean; _Default_: false + +`compactOnStartKeepBackup` - _Type_: boolean; _Default_: false + +```yaml +storage: + compactOnStart: true + compactOnStartKeepBackup: false +``` + +Using CLI variables + +```bash +--STORAGE_COMPACTONSTART true --STORAGE_COMPACTONSTARTKEEPBACKUP true +``` + +```bash +STORAGE_COMPACTONSTART=true +STORAGE_COMPACTONSTARTKEEPBACKUP=true +``` diff --git a/site/versioned_docs/version-4.5/administration/harper-studio/create-account.md b/site/versioned_docs/version-4.5/administration/harper-studio/create-account.md new file mode 100644 index 00000000..fdc23cfb --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/harper-studio/create-account.md @@ -0,0 +1,26 @@ +--- +title: Create a Studio Account +--- + +# Create a Studio Account +Start at the [Harper Studio sign up page](https:/studio.harperdb.io/sign-up). + +1) Provide the following information: + * First Name + * Last Name + * Email Address + * Subdomain + + *Part of the URL that will be used to identify your Harper Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com.* + * Coupon Code (optional) +2) Review the Privacy Policy and Terms of Service. +3) Click the sign up for free button. +4) You will be taken to a new screen to add an account password. Enter your password. + *Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character.* +5) Click the add account password button. + +You will receive a Studio welcome email confirming your registration. + + + +Note: Your email address will be used as your username and cannot be changed. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/administration/harper-studio/enable-mixed-content.md b/site/versioned_docs/version-4.5/administration/harper-studio/enable-mixed-content.md new file mode 100644 index 00000000..85b7f8a7 --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/harper-studio/enable-mixed-content.md @@ -0,0 +1,11 @@ +--- +title: Enable Mixed Content +--- + +# Enable Mixed Content + +Enabling mixed content is required in cases where you would like to connect the Harper Studio to Harper Instances via HTTP. This should not be used for production systems, but may be convenient for development and testing purposes. Doing so will allow your browser to reach HTTP traffic, which is considered insecure, through an HTTPS site like the Studio. + + + +A comprehensive guide is provided by Adobe [here](https:/experienceleague.adobe.com/docs/target/using/experiences/vec/troubleshoot-composer/mixed-content.html). \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/administration/harper-studio/index.md b/site/versioned_docs/version-4.5/administration/harper-studio/index.md new file mode 100644 index 00000000..6db20847 --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/harper-studio/index.md @@ -0,0 +1,17 @@ +--- +title: Harper Studio +--- + +# Harper Studio +Harper Studio is the web-based GUI for Harper. Studio enables you to administer, navigate, and monitor all of your Harper instances in a simple, user-friendly interface without any knowledge of the underlying Harper API. It’s free to sign up, get started today! + +[Sign up for free!](https:/studio.harperdb.io/sign-up) + +Harper now includes a simplified local Studio that is packaged with all Harper installations and served directly from the instance. It can be enabled in the [configuration file](../../deployments/configuration#localstudio). This section is dedicated to the hosted Studio accessed at [studio.harperdb.io](https:/studio.harperdb.io). + +--- +## How does Studio Work? +While Harper Studio is web based and hosted by us, all database interactions are performed on the Harper instance the studio is connected to. The Harper Studio loads in your browser, at which point you login to your Harper instances. Credentials are stored in your browser cache and are not transmitted back to Harper. All database interactions are made via the Harper Operations API directly from your browser to your instance. + +## What type of instances can I manage? +Harper Studio enables users to manage both Harper Cloud instances and privately hosted instances all from a single UI. All Harper instances feature identical behavior whether they are hosted by us or by you. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/administration/harper-studio/instance-configuration.md b/site/versioned_docs/version-4.5/administration/harper-studio/instance-configuration.md new file mode 100644 index 00000000..fe0d295f --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/harper-studio/instance-configuration.md @@ -0,0 +1,125 @@ +--- +title: Instance Configuration +--- + +# Instance Configuration + +Harper instance configuration can be viewed and managed directly through the Harper Studio. Harper Cloud instances can be resized in two different ways via this page, either by modifying machine RAM or by increasing drive storage. Enterprise instances can have their licenses modified by modifying licensed RAM. + + + +All instance configuration is handled through the **config** page of the Harper Studio, accessed with the following instructions: + +1) Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click config in the instance control bar. + +*Note, the **config** page will only be available to super users and certain items are restricted to Studio organization owners.* + +## Instance Overview + +The **instance overview** panel displays the following instance specifications: + +* Instance URL + +* Applications URL + +* Instance Node Name (for clustering) + +* Instance API Auth Header (this user) + + *The Basic authentication header used for the logged in Harper database user* + +* Created Date (Harper Cloud only) + +* Region (Harper Cloud only) + + *The geographic region where the instance is hosted.* + +* Total Price + +* RAM + +* Storage (Harper Cloud only) + +* Disk IOPS (Harper Cloud only) + +## Update Instance RAM + +Harper Cloud instance size and Enterprise instance licenses can be modified with the following instructions. This option is only available to Studio organization owners. + + + +Note: For Harper Cloud instances, upgrading RAM may add additional CPUs to your instance as well. Click here to see how many CPUs are provisioned for each instance size. + +1) In the **update ram** panel at the bottom left: + + * Select the new instance size. + + * If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. + + * If you do have a credit card associated, you will be presented with the updated billing information. + + * Click **Upgrade**. + +2) The instance will shut down and begin reprovisioning/relicensing itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. + +3) Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. + +*Note, if Harper Cloud instance reprovisioning takes longer than 20 minutes, please submit a support ticket here: https:/harperdbhelp.zendesk.com/hc/en-us/requests/new.* + +## Update Instance Storage + +The Harper Cloud instance storage size can be increased with the following instructions. This option is only available to Studio organization owners. + +Note: Instance storage can only be upgraded once every 6 hours. + +1) In the **update storage** panel at the bottom left: + + * Select the new instance storage size. + + * If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. + + * If you do have a credit card associated, you will be presented with the updated billing information. + + * Click **Upgrade**. + +2) The instance will shut down and begin reprovisioning itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. + +3) Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. + +*Note, if this process takes longer than 20 minutes, please submit a support ticket here: https:/harperdbhelp.zendesk.com/hc/en-us/requests/new.* + +## Remove Instance + +The Harper instance can be deleted/removed from the Studio with the following instructions. Once this operation is started it cannot be undone. This option is only available to Studio organization owners. + +1) In the **remove instance** panel at the bottom left: + * Enter the instance name in the text box. + + * The Studio will present you with a warning. + + * Click **Remove**. + +2) The instance will begin deleting immediately. + +## Restart Instance + +The Harper Cloud instance can be restarted with the following instructions. + +1) In the **restart instance** panel at the bottom right: + * Enter the instance name in the text box. + + * The Studio will present you with a warning. + + * Click **Restart**. + +2) The instance will begin restarting immediately. + +## Instance Config (Read Only) + +A JSON preview of the instance config is available for reference at the bottom of the page. This is a read only visual and is not editable via the Studio. To make changes to the instance config, review the [configuration file documentation](../../deployments/configuration#using-the-configuration-file-and-naming-conventions). \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/administration/harper-studio/instance-metrics.md b/site/versioned_docs/version-4.5/administration/harper-studio/instance-metrics.md new file mode 100644 index 00000000..eae954f1 --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/harper-studio/instance-metrics.md @@ -0,0 +1,16 @@ +--- +title: Instance Metrics +--- + +# Instance Metrics + +The Harper Studio display instance status and metrics on the instance status page, which can be accessed with the following instructions: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **status** in the instance control bar. + +Once on the instance browse page you can view host system information, [Harper logs](../logging/standard-logging), and Harper Cloud alarms (if it is a cloud instance). + +_Note, the **status** page will only be available to super users._ diff --git a/site/versioned_docs/version-4.5/administration/harper-studio/instances.md b/site/versioned_docs/version-4.5/administration/harper-studio/instances.md new file mode 100644 index 00000000..f44fb609 --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/harper-studio/instances.md @@ -0,0 +1,130 @@ +--- +title: Instances +--- + +# Instances + +The Harper Studio allows you to administer all of your HarperDinstances in one place. Harper currently offers the following instance types: + +* **Harper Cloud Instance** Managed installations of Harper, what we call [Harper Cloud](../../deployments/harper-cloud/). +* **5G Wavelength Instance** Managed installations of Harper running on the Verizon network through AWS Wavelength, what we call 5G Wavelength Instances. _Note, these instances are only accessible via the Verizon network._ +* **Enterprise Instance** Any Harper installation that is managed by you. These include instances hosted within your cloud provider accounts (for example, from the AWS or Digital Ocean Marketplaces), privately hosted instances, or instances installed locally. + +All interactions between the Studio and your instances take place directly from your browser. Harper stores metadata about your instances, which enables the Studio to display these instances when you log in. Beyond that, all traffic is routed from your browser to the Harper instances using the standard [Harper API](../../developers/operations-api/). + +## Organization Instance List + +A summary view of all instances within an organization can be viewed by clicking on the appropriate organization from the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. Each instance gets their own card. Harper Cloud and Enterprise instances are listed together. + +## Create a New Instance + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization for the instance to be created under. +1. Click the **Create New Harper Cloud Instance + Register Enterprise Instance** card. +1. Select your desired Instance Type. +1. For a Harper Cloud Instance or a Harper 5G Wavelength Instance, click **Create Harper Cloud Instance**. + 1. Fill out Instance Info. + 1. Enter Instance Name + + _This will be used to build your instance URL. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com. The Instance URL will be previewed below._ + 1. Enter Instance Username + + _This is the username of the initial Harper instance super user._ + 1. Enter Instance Password + + _This is the password of the initial Harper instance super user._ + 1. Click **Instance Details** to move to the next page. + 1. Select Instance Specs + 1. Select Instance RAM + + _Harper Cloud Instances are billed based on Instance RAM, this will select the size of your provisioned instance._ _More on instance specs__._ + 1. Select Storage Size + + _Each instance has a mounted storage volume where your Harper data will reside. Storage is provisioned based on space and IOPS._ _More on IOPS Impact on Performance__._ + 1. Select Instance Region + + _The geographic area where your instance will be provisioned._ + 1. Click **Confirm Instance Details** to move to the next page. + 1. Review your Instance Details, if there is an error, use the back button to correct it. + 1. Review the [Privacy Policy](https:/harperdb.io/legal/privacy-policy/) and [Terms of Service](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. + 1. Click **Add Instance**. + 1. Your Harper Cloud instance will be provisioned in the background. Provisioning typically takes 5-15 minutes. You will receive an email notification when your instance is ready. + +## Register Enterprise Instance + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization for the instance to be created under. +1. Click the **Create New Harper Cloud Instance + Register Enterprise Instance** card. +1. Select **Register Enterprise Instance**. + 1. Fill out Instance Info. + 1. Enter Instance Name + + _This is used for descriptive purposes only._ + 1. Enter Instance Username + + _The username of a Harper super user that is already configured in your Harper installation._ + 1. Enter Instance Password + + _The password of a Harper super user that is already configured in your Harper installation._ + 1. Enter Host + + _The host to access the Harper instance. For example, `harperdb.myhost.com` or `localhost`._ + 1. Enter Port + + _The port to access the Harper instance. Harper defaults `9925` for HTTP and `31283` for HTTPS._ + 1. Select SSL + + _If your instance is running over SSL, select the SSL checkbox. If not, you will need to enable mixed content in your browser to allow the HTTPS Studio to access the HTTP instance. If there are issues connecting to the instance, the Studio will display a red error message._ + 1. Click **Instance Details** to move to the next page. + 1. Select Instance Specs + 1. Select Instance RAM + + _Harper instances are billed based on Instance RAM. Selecting additional RAM will enable the ability for faster and more complex queries._ + 1. Click **Confirm Instance Details** to move to the next page. + 1. Review your Instance Details, if there is an error, use the back button to correct it. + 1. Review the [Privacy Policy](https:/harperdb.io/legal/privacy-policy/) and [Terms of Service](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. + 1. Click **Add Instance**. + 1. The Harper Studio will register your instance and restart it for the registration to take effect. Your instance will be immediately available after this is complete. + +## Delete an Instance + +Instance deletion has two different behaviors depending on the instance type. + +* **Harper Cloud Instance** This instance will be permanently deleted, including all data. This process is irreversible and cannot be undone. +* **Enterprise Instance** The instance will be removed from the Harper Studio only. This does not uninstall Harper from your system and your data will remain intact. + +An instance can be deleted as follows: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card and click the trash can icon. +1. Enter the instance name into the text box. + + _This is done for confirmation purposes to ensure you do not accidentally delete an instance._ +1. Click the **Do It** button. + +## Upgrade an Instance + +Harper instances can be resized on the [Instance Configuration](./instance-configuration) page. + +## Instance Log In/Log Out + +The Studio enables users to log in and out of different database users from the instance control panel. To log out of an instance: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card and click the lock icon. +1. You will immediately be logged out of the instance. + +To log in to an instance: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card, it will have an unlocked icon and a status reading PLEASE LOG IN, and click the center of the card. +1. Enter the database username. + + _The username of a Harper user that is already configured in your Harper instance._ +1. Enter the database password. + + _The password of a Harper user that is already configured in your Harper instance._ +1. Click **Log In**. diff --git a/site/versioned_docs/version-4.5/administration/harper-studio/login-password-reset.md b/site/versioned_docs/version-4.5/administration/harper-studio/login-password-reset.md new file mode 100644 index 00000000..2d1e7eac --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/harper-studio/login-password-reset.md @@ -0,0 +1,42 @@ +--- +title: Login and Password Reset +--- + +# Login and Password Reset + +## Log In to Your Harper Studio Account + +To log into your existing Harper Studio account: + +1) Navigate to the [Harper Studio](https:/studio.harperdb.io/). +2) Enter your email address. +3) Enter your password. +4) Click **sign in**. + +## Reset a Forgotten Password + +To reset a forgotten password: + +1) Navigate to the Harper Studio password reset page. +2) Enter your email address. +3) Click **send password reset email**. +4) If the account exists, you will receive an email with a temporary password. +5) Navigate back to the Harper Studio login page. +6) Enter your email address. +7) Enter your temporary password. +8) Click **sign in**. +9) You will be taken to a new screen to reset your account password. Enter your new password. +*Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character.* +10) Click the **add account password** button. + +## Change Your Password + +If you are already logged into the Studio, you can change your password though the user interface. + +1) Navigate to the Harper Studio profile page. +2) In the **password** section, enter: + + * Current password. + * New password. + * New password again *(for verification)*. +4) Click the **Update Password** button. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/administration/harper-studio/manage-applications.md b/site/versioned_docs/version-4.5/administration/harper-studio/manage-applications.md new file mode 100644 index 00000000..16974445 --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/harper-studio/manage-applications.md @@ -0,0 +1,59 @@ +--- +title: Manage Applications +--- + +# Manage Applications + +[Harper Applications](../../developers/applications/) are enabled by default and can be configured further through the Harper Studio. It is recommended to read through the [Applications](../../developers/applications/) documentation first to gain a strong understanding of Harper Applications behavior. + +All Applications configuration and development is handled through the **applications** page of the Harper Studio, accessed with the following instructions: + +1. Navigate to the Harper Studio Organizations page. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **applications** in the instance control bar. + +_Note, the **applications** page will only be available to super users._ + +## Manage Applications + +The Applications editor is not required for development and deployment, though it is a useful tool to maintain and manage your Harper Applications. The editor provides the ability to create new applications or import/deploy remote application packages. + +The left bar is the applications file navigator, allowing you to select files to edit and add/remove files and folders. By default, this view is empty because there are no existing applications. To get started, either create a new application or import/deploy a remote application. + +The right side of the screen is the file editor. Here you can make edit individual files of your application directly in the Harper Studio. + +## Things to Keep in Mind + +To learn more about developing Harper Applications, make sure to read through the [Applications](../../developers/applications/) documentation. + +When working with Applications in the Harper Studio, by default the editor will restart the Harper Applications server every time a file is saved. Note, this behavior can be turned off by toggling the `auto` toggle at the top right of the applications page. If you are constantly editing your application, it may result in errors causing the application not to run. These errors will not be visible on the application page, however they will be available in the Harper logs, which can be found on the [status page](./instance-metrics). + +The Applications editor stores unsaved changes in cache. This means that occasionally your editor will show a discrepancy from the code that is stored and running on your Harper instance. You can identify if the code in your Studio differs if the "save" and "revert" buttons are active. To revert the cached version in your editor to the version of the file stored on your Harper instance click the "revert" button. + +## Accessing Your Application Endpoints + +Accessing your application endpoints varies with which type of endpoint you're creating. All endpoints, regardless of type, will be accessed via the [Harper HTTP port found in the Harper configuration file](../../deployments/configuration#http). The default port is `9926`, but you can verify what your instances is set to by navigating to the [instance config page](./instance-configuration) and examining the read only JSON version of your instance's config file looking specifically for either the `http: port: 9926` or `http: securePort: 9926` configs. If `port` is set, you will access your endpoints via `http` and if `securePort` is set, you will access your endpoints via `https`. + +Below is a breakdown of how to access each type of endpoint. In these examples, we will use a locally hosted instance with `securePort` set to `9926`: `https:/localhost:9926`. + +* **Standard REST Endpoints**\ + Standard REST endpoints are defined via the `@export` directive to tables in your schema definition. You can read more about these in the [Adding an Endpoint section of the Applications documentation](../../developers/applications/#adding-an-endpoint). Here, if we are looking to access a record with ID `1` from table `Dog` on our instance, [per the REST documentation](../../developers/rest), we could send a `GET` (or since this is a GET, we could post the URL in our browser) to `https:/localhost:9926/Dog/1`. +* **Augmented REST Endpoints**\ + Harper Applications enable you to write [Custom Functionality with JavaScript](../../developers/applications/#custom-functionality-with-javascript) for your resources. Accessing these endpoints is identical to accessing the standard REST endpoints above, though you may have defined custom behavior in each function. Taking the example from the [Applications documentation](../../developers/applications/#custom-functionality-with-javascript), if we are looking to access the `DogWithHumanAge` example, we could send the GET to `https:/localhost:9926/DogWithHumanAge/1`. +* **Fastify Routes**\ + If you need more functionality than the REST applications can provide, you can define your own custom endpoints using [Fastify Routes](../../developers/applications/#define-fastify-routes). The paths to these routes are defined via the application `config.yaml` file. You can read more about how you can customize the configuration options in the [Define Fastify Routes documentation](../../developers/applications/define-routes). By default, routes are accessed via the following pattern: `[Instance URL]:[HTTP Port]/[Project Name]/[Route URL]`. Using the example from the [Harper Application Template](https:/github.com/HarperDB/application-template/), where we've named our project `application-template`, we would access the `getAll` route at `https:/localhost/application-template/getAll`. + +## Creating a New Application + +1. From the application page, click the "+ app" button at the top right. +1. Click "+ Create A New Application Using The Default Template". +1. Enter a name for your project, note project names must contain only alphanumeric characters, dashes and underscores. +1. Click OK. +1. Your project will be available in the applications file navigator on the left. Click a file to select a file to edit. + +## Editing an Application + +1. From the applications page, click the file you would like to edit from the file navigator on the left. +1. Edit the file with any changes you'd like. +1. Click "save" at the top right. Note, as mentioned above, when you save a file, the Harper Applications server will be restarted immediately. diff --git a/site/versioned_docs/version-4.5/administration/harper-studio/manage-databases-browse-data.md b/site/versioned_docs/version-4.5/administration/harper-studio/manage-databases-browse-data.md new file mode 100644 index 00000000..88c16a6c --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/harper-studio/manage-databases-browse-data.md @@ -0,0 +1,132 @@ +--- +title: Manage Databases / Browse Data +--- + +# Manage Databases / Browse Data + +Manage instance databases/tables and browse data in tabular format with the following instructions: + +1) Navigate to the Harper Studio Organizations page. +2) Click the appropriate organization that the instance belongs to. +3) Select your desired instance. +4) Click **browse** in the instance control bar. + +Once on the instance browse page you can view data, manage databases and tables, add new data, and more. + +## Manage Databases and Tables + +#### Create a Database + +1) Click the plus icon at the top right of the databases section. +2) Enter the database name. +3) Click the green check mark. + + +#### Delete a Database + +Deleting a database is permanent and irreversible. Deleting a database removes all tables and data within it. + +1) Click the minus icon at the top right of the databases section. +2) Identify the appropriate database to delete and click the red minus sign in the same row. +3) Click the red check mark to confirm deletion. + + +#### Create a Table + +1) Select the desired database from the databases section. +2) Click the plus icon at the top right of the tables section. +3) Enter the table name. +4) Enter the primary key. + + *The primary key is also often referred to as the hash attribute in the studio, and it defines the unique identifier for each row in your table.* +5) Click the green check mark. + + +#### Delete a Table +Deleting a table is permanent and irreversible. Deleting a table removes all data within it. + +1) Select the desired database from the databases section. +2) Click the minus icon at the top right of the tables section. +3) Identify the appropriate table to delete and click the red minus sign in the same row. +4) Click the red check mark to confirm deletion. + +## Manage Table Data + +The following section assumes you have selected the appropriate table from the database/table browser. + + + +#### Filter Table Data + +1) Click the magnifying glass icon at the top right of the table browser. +2) This expands the search filters. +3) The results will be filtered appropriately. + + +#### Load CSV Data + +1) Click the data icon at the top right of the table browser. You will be directed to the CSV upload page where you can choose to import a CSV by URL or upload a CSV file. +2) To import a CSV by URL: + 1) Enter the URL in the **CSV file URL** textbox. + 2) Click **Import From URL**. + 3) The CSV will load, and you will be redirected back to browse table data. +3) To upload a CSV file: + 1) Click **Click or Drag to select a .csv file** (or drag your CSV file from your file browser). + 2) Navigate to your desired CSV file and select it. + 3) Click **Insert X Records**, where X is the number of records in your CSV. + 4) The CSV will load, and you will be redirected back to browse table data. + + +#### Add a Record + +1) Click the plus icon at the top right of the table browser. +2) The Studio will pre-populate existing table attributes in JSON format. + + *The primary key is not included, but you can add it in and set it to your desired value. Auto-maintained fields are not included and cannot be manually set. You may enter a JSON array to insert multiple records in a single transaction.* +3) Enter values to be added to the record. + + *You may add new attributes to the JSON; they will be reflexively added to the table.* +4) Click the **Add New** button. + + +#### Edit a Record + +1) Click the record/row you would like to edit. +2) Modify the desired values. + + *You may add new attributes to the JSON; they will be reflexively added to the table.* + +3) Click the **save icon**. + + +#### Delete a Record + +Deleting a record is permanent and irreversible. If transaction logging is turned on, the delete transaction will be recorded as well as the data that was deleted. + +1) Click the record/row you would like to delete. +2) Click the **delete icon**. +3) Confirm deletion by clicking the **check icon**. + +## Browse Table Data + +The following section assumes you have selected the appropriate table from the database/table browser. + +#### Browse Table Data + +The first page of table data is automatically loaded on table selection. Paging controls are at the bottom of the table. Here you can: + +* Page left and right using the arrows. +* Type in the desired page. +* Change the page size (the amount of records displayed in the table). + + +#### Refresh Table Data + +Click the refresh icon at the top right of the table browser. + + + +#### Automatically Refresh Table Data + +Toggle the auto switch at the top right of the table browser. The table data will now automatically refresh every 15 seconds. Filters and pages will remain set for refreshed data. + diff --git a/site/versioned_docs/version-4.5/administration/harper-studio/manage-instance-roles.md b/site/versioned_docs/version-4.5/administration/harper-studio/manage-instance-roles.md new file mode 100644 index 00000000..d0f8c82d --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/harper-studio/manage-instance-roles.md @@ -0,0 +1,76 @@ +--- +title: Manage Instance Roles +--- + +# Manage Instance Roles + +Harper users and roles can be managed directly through the Harper Studio. It is recommended to read through the [users & roles documentation](../../developers/security/users-and-roles) to gain a strong understanding of how they operate. + +Instance role configuration is handled through the **roles** page of the Harper Studio, accessed with the following instructions: + +1) Navigate to the Harper Studio Organizations page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **roles** in the instance control bar. + +*Note, the **roles** page will only be available to super users.* + + + +The *roles management* screen consists of the following panels: + +* **super users** + + Displays all super user roles for this instance. +* **cluster users** + + Displays all cluster user roles for this instance. +* **standard roles** + + Displays all standard roles for this instance. +* **role permission editing** + + Once a role is selected for editing, permissions will be displayed here in JSON format. + +*Note, when new tables are added that are not configured, the Studio will generate configuration values with permissions defaulting to `false`.* + +## Role Management + +#### Create a Role + +1) Click the plus icon at the top right of the appropriate role section. + +2) Enter the role name. + +3) Click the green check mark. + +4) Optionally toggle the **manage databases/tables** switch to specify the `structure_user` config. + +5) Configure the role permissions in the role permission editing panel. + + *Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel.* + +6) Click **Update Role Permissions**. + +#### Modify a Role + +1) Click the appropriate role from the appropriate role section. + +2) Modify the role permissions in the role permission editing panel. + + *Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel.* + +3) Click **Update Role Permissions**. + +#### Delete a Role + +Deleting a role is permanent and irreversible. A role cannot be remove if users are associated with it. + +1) Click the minus icon at the top right of the roles section. + +2) Identify the appropriate role to delete and click the red minus sign in the same row. + +3) Click the red check mark to confirm deletion. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/administration/harper-studio/manage-instance-users.md b/site/versioned_docs/version-4.5/administration/harper-studio/manage-instance-users.md new file mode 100644 index 00000000..a99ae4c6 --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/harper-studio/manage-instance-users.md @@ -0,0 +1,61 @@ +--- +title: Manage Instance Users +--- + +# Manage Instance Users + +Harper users and roles can be managed directly through the Harper Studio. It is recommended to read through the [users & roles documentation](../../developers/security/users-and-roles) to gain a strong understanding of how they operate. + +Instance user configuration is handled through the **users** page of the Harper Studio, accessed with the following instructions: + +1) Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **users** in the instance control bar. + +*Note, the **users** page will only be available to super users.* + +## Add a User + +Harper instance users can be added with the following instructions. + +1) In the **add user** panel on the left enter: + + * New user username. + + * New user password. + + * Select a role. + + *Learn more about role management here: [Manage Instance Roles](./manage-instance-roles).* + +2) Click **Add User**. + +## Edit a User + +Harper instance users can be modified with the following instructions. + +1) In the **existing users** panel, click the row of the user you would like to edit. + +2) To change a user’s password: + + 1) In the **Change user password** section, enter the new password. + + 2) Click **Update Password**. + +3) To change a user’s role: + + 1) In the **Change user role** section, select the new role. + + 2) Click **Update Role**. + +4) To delete a user: + + 1) In the **Delete User** section, type the username into the textbox. + + *This is done for confirmation purposes.* + + 2) Click **Delete User**. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/administration/harper-studio/manage-replication.md b/site/versioned_docs/version-4.5/administration/harper-studio/manage-replication.md new file mode 100644 index 00000000..78a457a9 --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/harper-studio/manage-replication.md @@ -0,0 +1,89 @@ +--- +title: Manage Replication +--- + +# Manage Replication + +Harper instance clustering and replication can be configured directly through the Harper Studio. It is recommended to read through the [clustering documentation](../../developers/clustering/) first to gain a strong understanding of Harper clustering behavior. + + + +All clustering configuration is handled through the **replication** page of the Harper Studio, accessed with the following instructions: + +1) Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. + +2) Click the appropriate organization that the instance belongs to. + +3) Select your desired instance. + +4) Click **replication** in the instance control bar. + +Note, the **replication** page will only be available to super users. + +--- +## Initial Configuration + +Harper instances do not have clustering configured by default. The Harper Studio will walk you through the initial configuration. Upon entering the **replication** screen for the first time you will need to complete the following configuration. Configurations are set in the **enable clustering** panel on the left while actions are described in the middle of the screen. It is worth reviewing the [Creating a Cluster User](../../developers/clustering/creating-a-cluster-user) document before proceeding. + +1) Enter Cluster User username. (Defaults to `cluster_user`). +2) Enter Cluster Password. +3) Review and/or Set Cluster Node Name. +4) Click **Enable Clustering**. + +At this point the Studio will restart your Harper Instance, required for the configuration changes to take effect. + +--- + +## Manage Clustering +Once initial clustering configuration is completed you a presented with a clustering management screen with the following properties: + +* **connected instances** + + Displays all instances within the Studio Organization that this instance manages a connection with. + +* **unconnected instances** + + Displays all instances within the Studio Organization that this instance does not manage a connection with. + +* **unregistered instances** + + Displays all instances outside the Studio Organization that this instance manages a connection with. + +* **manage clustering** + + Once instances are connected, this will display clustering management options for all connected instances and all databases and tables. +--- + +## Connect an Instance + +Harper Instances can be clustered together with the following instructions. + +1) Ensure clustering has been configured on both instances and a cluster user with identical credentials exists on both. + +2) Identify the instance you would like to connect from the **unconnected instances** panel. + +3) Click the plus icon next the appropriate instance. + +4) If configurations are correct, all databases will sync across the cluster, then appear in the **manage clustering** panel. If there is a configuration issue, a red exclamation icon will appear, click it to learn more about what could be causing the issue. + +--- + +## Disconnect an Instance + +Harper Instances can be disconnected with the following instructions. + +1) Identify the instance you would like to disconnect from the **connected instances** panel. + +2) Click the minus icon next the appropriate instance. + +--- + +## Manage Replication + +Subscriptions must be configured in order to move data between connected instances. Read more about subscriptions here: Creating A Subscription. The **manage clustering** panel displays a table with each row representing an channel per instance. Cells are bolded to indicate a change in the column. Publish and subscribe replication can be configured per table with the following instructions: + +1) Identify the instance, database, and table for replication to be configured. + +2) For publish, click the toggle switch in the **publish** column. + +3) For subscribe, click the toggle switch in the **subscribe** column. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/administration/harper-studio/organizations.md b/site/versioned_docs/version-4.5/administration/harper-studio/organizations.md new file mode 100644 index 00000000..fede2cd8 --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/harper-studio/organizations.md @@ -0,0 +1,105 @@ +--- +title: Organizations +--- + +# Organizations +Harper Studio organizations provide the ability to group Harper Cloud Instances. Organization behavior is as follows: + +* Billing occurs at the organization level to a single credit card. +* Organizations retain their own unique Harper Cloud subdomain. +* Cloud instances reside within an organization. +* Studio users can be invited to organizations to share instances. + + +An organization is automatically created for you when you sign up for Harper Studio. If you only have one organization, the Studio will automatically bring you to your organization’s page. + +--- + +## List Organizations +A summary view of all organizations your user belongs to can be viewed on the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. You can navigate to this page at any time by clicking the **all organizations** link at the top of the Harper Studio. + +## Create a New Organization +A new organization can be created as follows: + +1) Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the **Create a New Organization** card. +3) Fill out new organization details + * Enter Organization Name + *This is used for descriptive purposes only.* + * Enter Organization Subdomain + *Part of the URL that will be used to identify your Harper Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com.* +4) Click Create Organization. + +## Delete an Organization +An organization cannot be deleted until all instances have been removed. An organization can be deleted as follows: + +1) Navigate to the Harper Studio Organizations page. +2) Identify the proper organization card and click the trash can icon. +3) Enter the organization name into the text box. + + *This is done for confirmation purposes to ensure you do not accidentally delete an organization.* +4) Click the **Do It** button. + +## Manage Users +Harper Studio organization owners can manage users including inviting new users, removing users, and toggling ownership. + + + +#### Inviting a User +A new user can be invited to an organization as follows: + +1) Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the appropriate organization card. +3) Click **users** at the top of the screen. +4) In the **add user** box, enter the new user’s email address. +5) Click **Add User**. + +Users may or may not already be Harper Studio users when adding them to an organization. If the Harper Studio account already exists, the user will receive an email notification alerting them to the organization invitation. If the user does not have a Harper Studio account, they will receive an email welcoming them to Harper Studio. + +--- + +#### Toggle a User’s Organization Owner Status +Organization owners have full access to the organization including the ability to manage organization users, create, modify, and delete instances, and delete the organization. Users must have accepted their invitation prior to being promoted to an owner. A user’s organization owner status can be toggled owner as follows: + +1) Navigate to the Harper Studio Organizations page. +2) Click the appropriate organization card. +3) Click **users** at the top of the screen. +4) Click the appropriate user from the **existing users** section. +5) Toggle the **Is Owner** switch to the desired status. +--- + +#### Remove a User from an Organization +Users may be removed from an organization at any time. Removing a user from an organization will not delete their Harper Studio account, it will only remove their access to the specified organization. A user can be removed from an organization as follows: + +1) Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the appropriate organization card. +3) Click **users** at the top of the screen. +4) Click the appropriate user from the **existing users** section. +5) Type **DELETE** in the text box in the **Delete User** row. + + *This is done for confirmation purposes to ensure you do not accidentally delete a user.* +6) Click **Delete User**. + +## Manage Billing + +Billing is configured per organization and will be billed to the stored credit card at appropriate intervals (monthly or annually depending on the registered instance). Billing settings can be configured as follows: + +1) Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +2) Click the appropriate organization card. +3) Click **billing** at the top of the screen. + +Here organization owners can view invoices, manage coupons, and manage the associated credit card. + + + +*Harper billing and payments are managed via Stripe.* + + + +### Add a Coupon + +Coupons are applicable towards any paid tier or enterprise instance and you can change your subscription at any time. Coupons can be added to your Organization as follows: + +1) In the coupons panel of the **billing** page, enter your coupon code. +2) Click **Add Coupon**. +3) The coupon will then be available and displayed in the coupons panel. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/administration/harper-studio/query-instance-data.md b/site/versioned_docs/version-4.5/administration/harper-studio/query-instance-data.md new file mode 100644 index 00000000..b0dc0261 --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/harper-studio/query-instance-data.md @@ -0,0 +1,53 @@ +--- +title: Query Instance Data +--- + +# Query Instance Data + +SQL queries can be executed directly through the Harper Studio with the following instructions: + +1) Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +2) Click the appropriate organization that the instance belongs to. +3) Select your desired instance. +4) Click **query** in the instance control bar. +5) Enter your SQL query in the SQL query window. +6) Click **Execute**. + +*Please note, the Studio will execute the query exactly as entered. For example, if you attempt to `SELECT *` from a table with millions of rows, you will most likely crash your browser.* + +## Browse Query Results Set + +#### Browse Results Set Data + +The first page of results set data is automatically loaded on query execution. Paging controls are at the bottom of the table. Here you can: + +* Page left and right using the arrows. +* Type in the desired page. +* Change the page size (the amount of records displayed in the table). + +#### Refresh Results Set + +Click the refresh icon at the top right of the results set table. + +#### Automatically Refresh Results Set + +Toggle the auto switch at the top right of the results set table. The results set will now automatically refresh every 15 seconds. Filters and pages will remain set for refreshed data. + +## Query History + +Query history is stored in your local browser cache. Executed queries are listed with the most recent at the top in the **query history** section. + + +#### Rerun Previous Query + +* Identify the query from the **query history** list. +* Click the appropriate query. It will be loaded into the **sql query** input box. +* Click **Execute**. + +#### Clear Query History + +Click the trash can icon at the top right of the **query history** section. + +## Create Charts + +The Harper Studio includes a charting feature where you can build charts based on your specified queries. Visit the Charts documentation for more information. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/administration/jobs.md b/site/versioned_docs/version-4.5/administration/jobs.md new file mode 100644 index 00000000..e71dd9cf --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/jobs.md @@ -0,0 +1,112 @@ +--- +title: Jobs +--- + +# Jobs + +Harper Jobs are asynchronous tasks performed by the Operations API. + +## Job Summary + +Jobs uses an asynchronous methodology to account for the potential of a long-running operation. For example, exporting millions of records to S3 could take some time, so that job is started and the id is provided to check on the status. + +The job status can be **COMPLETE** or **IN\_PROGRESS**. + +## Example Job Operations + +Example job operations include: + +[csv data load](../developers/operations-api/bulk-operations#csv-data-load) + +[csv file load](../developers/operations-api/bulk-operations#csv-file-load) + +[csv url load](../developers/operations-api/bulk-operations#csv-url-load) + +[import from s3](../developers/operations-api/bulk-operations#import-from-s3) + +[delete_records_before](../developers/operations-api/utilities#delete-records-before) + +[export_local](../developers/operations-api/utilities#export-local) + +[export_to_s3](../developers/operations-api/utilities#export-to-s3) + +Example Response from a Job Operation + +``` +{ + "message": "Starting job with id 062a1892-6a0a-4282-9791-0f4c93b12e16" +} +``` + +Whenever one of these operations is initiated, an asynchronous job is created and the request contains the ID of that job which can be used to check on its status. + +## Managing Jobs + +To check on a job's status, use the [get_job](../developers/operations-api/jobs#get-job) operation. + +Get Job Request + +``` +{ + "operation": "get_job", + "id": "4a982782-929a-4507-8794-26dae1132def" +} +``` + +Get Job Response + +``` +[ + { + "__createdtime__": 1611615798782, + "__updatedtime__": 1611615801207, + "created_datetime": 1611615798774, + "end_datetime": 1611615801206, + "id": "4a982782-929a-4507-8794-26dae1132def", + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "start_datetime": 1611615798805, + "status": "COMPLETE", + "type": "csv_url_load", + "user": "HDB_ADMIN", + "start_datetime_converted": "2021-01-25T23:03:18.805Z", + "end_datetime_converted": "2021-01-25T23:03:21.206Z" + } +] +``` + +## Finding Jobs + +To find jobs (if the ID is not known) use the [search_jobs_by_start_date](../developers/operations-api/jobs#search-jobs-by-start-date) operation. + +Search Jobs Request + +``` +{ + "operation": "search_jobs_by_start_date", + "from_date": "2021-01-25T22:05:27.464+0000", + "to_date": "2021-01-25T23:05:27.464+0000" +} +``` + +Search Jobs Response + +``` +[ + { + "id": "942dd5cb-2368-48a5-8a10-8770ff7eb1f1", + "user": "HDB_ADMIN", + "type": "csv_url_load", + "status": "COMPLETE", + "start_datetime": 1611613284781, + "end_datetime": 1611613287204, + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "created_datetime": 1611613284764, + "__createdtime__": 1611613284767, + "__updatedtime__": 1611613287207, + "start_datetime_converted": "2021-01-25T22:21:24.781Z", + "end_datetime_converted": "2021-01-25T22:21:27.204Z" + } +] +``` diff --git a/site/versioned_docs/version-4.5/administration/logging/audit-logging.md b/site/versioned_docs/version-4.5/administration/logging/audit-logging.md new file mode 100644 index 00000000..cfec1062 --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/logging/audit-logging.md @@ -0,0 +1,135 @@ +--- +title: Audit Logging +--- + +# Audit Logging + +### Audit log + +The audit log uses a standard Harper table to track transactions. For each table a user creates, a corresponding table will be created to track transactions against that table. + +Audit log is enabled by default. To disable the audit log, set `logging.auditLog` to false in the config file, `harperdb-config.yaml`. Then restart Harper for those changes to take place. Note, the audit is required to be enabled for real-time messaging. + +### Audit Log Operations + +#### read\_audit\_log + +The `read_audit_log` operation is flexible, enabling users to query with many parameters. All operations search on a single table. Filter options include timestamps, usernames, and table hash values. Additional examples found in the [Harper API documentation](../../developers/operations-api/logs). + +**Search by Timestamp** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "timestamp", + "search_values": [ + 1660585740558 + ] +} +``` + +There are three outcomes using timestamp. + +* `"search_values": []` - All records returned for specified table +* `"search_values": [1660585740558]` - All records after provided timestamp +* `"search_values": [1660585740558, 1760585759710]` - Records "from" and "to" provided timestamp + +*** + +**Search by Username** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "username", + "search_values": [ + "admin" + ] +} +``` + +The above example will return all records whose `username` is "admin." + +*** + +**Search by Primary Key** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "hash_value", + "search_values": [ + 318 + ] +} +``` + +The above example will return all records whose primary key (`hash_value`) is 318. + +*** + +#### read\_audit\_log Response + +The example that follows provides records of operations performed on a table. One thing of note is that the `read_audit_log` operation gives you the `original_records`. + +```json +{ + "operation": "update", + "user_name": "HDB_ADMIN", + "timestamp": 1607035559122.277, + "hash_values": [ + 1, + 2 + ], + "records": [ + { + "id": 1, + "breed": "Muttzilla", + "age": 6, + "__updatedtime__": 1607035559122 + }, + { + "id": 2, + "age": 7, + "__updatedtime__": 1607035559121 + } + ], + "original_records": [ + { + "__createdtime__": 1607035556801, + "__updatedtime__": 1607035556801, + "age": 5, + "breed": "Mutt", + "id": 2, + "name": "Penny" + }, + { + "__createdtime__": 1607035556801, + "__updatedtime__": 1607035556801, + "age": 5, + "breed": "Mutt", + "id": 1, + "name": "Harper" + } + ] +} +``` + +#### delete\_audit\_logs\_before + +Just like with transaction logs, you can clean up your audit logs with the `delete_audit_logs_before` operation. It will delete audit log data according to the given parameters. The example below will delete records older than the timestamp provided. + +```json +{ + "operation": "delete_audit_logs_before", + "schema": "dev", + "table": "cat", + "timestamp": 1598290282817 +} +``` diff --git a/site/versioned_docs/version-4.5/administration/logging/index.md b/site/versioned_docs/version-4.5/administration/logging/index.md new file mode 100644 index 00000000..7a9588ce --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/logging/index.md @@ -0,0 +1,11 @@ +--- +title: Logging +--- + +# Logging + +Harper provides many different logging options for various features and functionality. + +* [Standard Logging](./standard-logging): Harper maintains a log of events that take place throughout operation. +* [Audit Logging](./audit-logging): Harper uses a standard Harper table to track transactions. For each table a user creates, a corresponding table will be created to track transactions against that table. +* [Transaction Logging](./transaction-logging): Harper stores a verbose history of all transactions logged for specified database tables, including original data records. diff --git a/site/versioned_docs/version-4.5/administration/logging/standard-logging.md b/site/versioned_docs/version-4.5/administration/logging/standard-logging.md new file mode 100644 index 00000000..7194fff4 --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/logging/standard-logging.md @@ -0,0 +1,65 @@ +--- +title: Standard Logging +--- + +# Standard Logging + +Harper maintains a log of events that take place throughout operation. Log messages can be used for diagnostics purposes as well as monitoring. + +All logs (except for the install log) are stored in the main log file in the hdb directory `/log/hdb.log`. The install log is located in the Harper application directory most likely located in your npm directory `npm/harperdb/logs`. + +Each log message has several key components for consistent reporting of events. A log message has a format of: + +``` + [] [] ...[]: +``` + +For example, a typical log entry looks like: + +``` +2023-03-09T14:25:05.269Z [notify] [main/0]: HarperDB successfully started. +``` + +The components of a log entry are: + +* timestamp - This is the date/time stamp when the event occurred +* level - This is an associated log level that gives a rough guide to the importance and urgency of the message. The available log levels in order of least urgent (and more verbose) are: `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. +* thread/ID - This reports the name of the thread and the thread ID that the event was reported on. Note that NATS logs are recorded by their process name and there is no thread id for them since they are a separate process. Key threads are: + * main - This is the thread that is responsible for managing all other threads and routes incoming requests to the other threads + * http - These are the worker threads that handle the primary workload of incoming HTTP requests to the operations API and custom functions. + * Clustering\* - These are threads and processes that handle replication. + * job - These are job threads that have been started to handle operations that are executed in a separate job thread. +* tags - Logging from a custom function will include a "custom-function" tag in the log entry. Most logs will not have any additional tags. +* message - This is the main message that was reported. + +We try to keep logging to a minimum by default, to do this the default log level is `error`. If you require more information from the logs, increasing the log level down will provide that. + +The log level can be changed by modifying `logging.level` in the config file `harperdb-config.yaml`. + +## Clustering Logging + +Harper clustering utilizes two [NATS](https:/nats.io/) servers, named Hub and Leaf. The Hub server is responsible for establishing the mesh network that connects instances of Harper and the Leaf server is responsible for managing the message stores (streams) that replicate and store messages between instances. Due to the verbosity of these servers there is a separate log level configuration for them. To adjust their log verbosity, set `clustering.logLevel` in the config file `harperdb-config.yaml`. Valid log levels from least verbose are `error`, `warn`, `info`, `debug` and `trace`. + +## Log File vs Standard Streams + +Harper logs can optionally be streamed to standard streams. Logging to standard streams (stdout/stderr) is primarily used for container logging drivers. For more traditional installations, we recommend logging to a file. Logging to both standard streams and to a file can be enabled simultaneously. To log to standard streams effectively, make sure to directly run `harperdb` and don't start it as a separate process (don't use `harperdb start`) and `logging.stdStreams` must be set to true. Note, logging to standard streams only will disable clustering catchup. + +## Logging Rotation + +Log rotation allows for managing log files, such as compressing rotated log files, archiving old log files, determining when to rotate, and the like. This will allow for organized storage and efficient use of disk space. For more information see “logging” in our [config docs](../../deployments/configuration). + +## Read Logs via the API + +To access specific logs you may query the Harper API. Logs can be queried using the `read_log` operation. `read_log` returns outputs from the log based on the provided search criteria. + +```json +{ + "operation": "read_log", + "start": 0, + "limit": 1000, + "level": "error", + "from": "2021-01-25T22:05:27.464+0000", + "until": "2021-01-25T23:05:27.464+0000", + "order": "desc" +} +``` diff --git a/site/versioned_docs/version-4.5/administration/logging/transaction-logging.md b/site/versioned_docs/version-4.5/administration/logging/transaction-logging.md new file mode 100644 index 00000000..48860fdd --- /dev/null +++ b/site/versioned_docs/version-4.5/administration/logging/transaction-logging.md @@ -0,0 +1,87 @@ +--- +title: Transaction Logging +--- + +# Transaction Logging + +Harper offers two options for logging transactions executed against a table. The options are similar but utilize different storage layers. + +## Transaction log + +The first option is `read_transaction_log`. The transaction log is built upon clustering streams. Clustering streams are per-table message stores that enable data to be propagated across a cluster. Harper leverages streams for use with the transaction log. When clustering is enabled all transactions that occur against a table are pushed to its stream, and thus make up the transaction log. + +If you would like to use the transaction log, but have not set up clustering yet, please see ["How to Cluster"](../../developers/clustering/). + +## Transaction Log Operations + +### read\_transaction\_log + +The `read_transaction_log` operation returns a prescribed set of records, based on given parameters. The example below will give a maximum of 2 records within the timestamps provided. + +```json +{ + "operation": "read_transaction_log", + "schema": "dev", + "table": "dog", + "from": 1598290235769, + "to": 1660249020865, + "limit": 2 +} +``` + +_See example response below._ + +### read\_transaction\_log Response + +```json +[ + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619736, + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38, + "__updatedtime__": 1660165619688, + "__createdtime__": 1660165619688 + } + ] + }, + { + "operation": "update", + "user": "admin", + "timestamp": 1660165620040, + "records": [ + { + "id": 1, + "dog_name": "Penny B", + "__updatedtime__": 1660165620036 + } + ] + } +] +``` + +_See example request above._ + +### delete\_transaction\_logs\_before + +The `delete_transaction_logs_before` operation will delete transaction log data according to the given parameters. The example below will delete records older than the timestamp provided. + +```json +{ + "operation": "delete_transaction_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1598290282817 +} +``` + +_Note: Streams are used for catchup if a node goes down. If you delete messages from a stream there is a chance catchup won't work._ + +Read on for `read_audit_log`, the second option, for logging transactions executed against a table. diff --git a/site/versioned_docs/version-4.5/deployments/_category_.json b/site/versioned_docs/version-4.5/deployments/_category_.json new file mode 100644 index 00000000..8fdd6e17 --- /dev/null +++ b/site/versioned_docs/version-4.5/deployments/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Deployments", + "position": 3, + "link": { + "type": "generated-index", + "title": "Deployments Documentation", + "description": "Installation and deployment guides for HarperDB", + "keywords": [ + "deployments" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/deployments/configuration.md b/site/versioned_docs/version-4.5/deployments/configuration.md new file mode 100644 index 00000000..fab9369b --- /dev/null +++ b/site/versioned_docs/version-4.5/deployments/configuration.md @@ -0,0 +1,1127 @@ +--- +title: Configuration File +--- + +# Configuration File + +Harper is configured through a [YAML](https:/yaml.org/) file called `harperdb-config.yaml` located in the Harper root directory (by default this is a directory named `hdb` located in the home directory of the current user). + +Some configuration will be populated by default in the config file on install, regardless of whether it is used. + +*** + +## Using the Configuration File and Naming Conventions + +The configuration elements in `harperdb-config.yaml` use camelcase, such as `operationsApi`. + +To change a configuration value, edit the `harperdb-config.yaml` file and save any changes. **HarperDB must be restarted for changes to take effect.** + +Alternatively, all configuration values can also be modified using environment variables, command line arguments, or the operations API via the [`set_configuration` operation](../developers/operations-api/utilities#set-configuration). + +For nested configuration elements, use underscores to represent parent-child relationships. When accessed this way, elements are case-insensitive. + +For example, to disable logging rotation in the `logging` section: + +```yaml +logging: + rotation: + enabled: false +``` + +You could apply this change using: +* Environment variable: `LOGGING_ROTATION_ENABLED=false` +* Command line variable: `--LOGGING_ROTATION_ENABLED false` +* Operations API (`set_configuration`): `logging_rotation_enabled: false` + +To change the `port` in the `http` section, use: +* Environment variable: `HTTP_PORT=` +* Command line variable: `--HTTP_PORT ` +* Operations API (`set_configuration`): `http_port: ` + +To set the `operationsApi.network.port` to `9925`, use: +* Environment variable: `OPERATIONSAPI_NETWORK_PORT=9925` +* Command line variable: `--OPERATIONSAPI_NETWORK_PORT 9925` +* Operations API (`set_configuration`): `operationsApi_network_port: 9925` + +_Note: Component configuration cannot be added or updated via CLI or ENV variables._ + +## Importing installation configuration + +To use a custom configuration file to set values on install, use the CLI/ENV variable `HDB_CONFIG` and set it to the path of your custom configuration file. + +To install Harper overtop of an existing configuration file, set `HDB_CONFIG` to the root path of your install `/harperdb-config.yaml` + +*** + +## Configuration Options + +### `http` + +`sessionAffinity` - _Type_: string; _Default_: null + +Harper is a multi-threaded server designed to scale to utilize many CPU cores with high concurrency. Session affinity can help improve the efficiency and fairness of thread utilization by routing multiple requests from the same client to the same thread. This provides a fairer method of request handling by keeping a single user contained to a single thread, can improve caching locality (multiple requests from a single user are more likely to access the same data), and can provide the ability to share information in-memory in user sessions. Enabling session affinity will cause subsequent requests from the same client to be routed to the same thread. + +To enable `sessionAffinity`, you need to specify how clients will be identified from the incoming requests. If you are using Harper to directly serve HTTP requests from users from different remote addresses, you can use a setting of `ip`. However, if you are using Harper behind a proxy server or application server, all the remote ip addresses will be the same and Harper will effectively only run on a single thread. Alternately, you can specify a header to use for identification. If you are using basic authentication, you could use the "Authorization" header to route requests to threads by the user's credentials. If you have another header that uniquely identifies users/clients, you can use that as the value of sessionAffinity. But be careful to ensure that the value does provide sufficient uniqueness and that requests are effectively distributed to all the threads and fully utilizing all your CPU cores. + +```yaml +http: + sessionAffinity: ip +``` + +`compressionThreshold` - _Type_: number; _Default_: 1200 (bytes) + +For HTTP clients that support (Brotli) compression encoding, responses that are larger than than this threshold will be compressed (also note that for clients that accept compression, any streaming responses from queries are compressed as well, since the size is not known beforehand). + +```yaml +http: + compressionThreshold: 1200 +``` + +`cors` - _Type_: boolean; _Default_: true + +Enable Cross Origin Resource Sharing, which allows requests across a domain. + +`corsAccessList` - _Type_: array; _Default_: null + +An array of allowable domains with CORS + +`corsAccessControlAllowHeaders` - _Type_: string; _Default_: 'Accept, Content-Type, Authorization' + +A string representation of a comma separated list of header keys for the [Access-Control-Allow-Headers](https:/developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers) header for OPTIONS requests. + +`headersTimeout` - _Type_: integer; _Default_: 60,000 milliseconds (1 minute) + +Limit the amount of time the parser will wait to receive the complete HTTP headers with. + +`maxHeaderSize` - _Type_: integer; _Default_: 16394 + +The maximum allowed size of HTTP headers in bytes. + +`keepAliveTimeout` - _Type_: integer; _Default_: 30,000 milliseconds (30 seconds) + +Sets the number of milliseconds of inactivity the server needs to wait for additional incoming data after it has finished processing the last response. + +`port` - _Type_: integer; _Default_: 9926 + +The port used to access the component server. + +`securePort` - _Type_: integer; _Default_: null + +The port the Harper component server uses for HTTPS connections. This requires a valid certificate and key. + +`http2` - _Type_: boolean; _Default_: false + +Enables HTTP/2 for the HTTP server. + +`timeout` - _Type_: integer; _Default_: Defaults to 120,000 milliseconds (2 minutes) + +The length of time in milliseconds after which a request will timeout. + +```yaml +http: + cors: true + corsAccessList: + - null + headersTimeout: 60000 + maxHeaderSize: 8192 + https: false + keepAliveTimeout: 30000 + port: 9926 + securePort: null + timeout: 120000 +``` + +`mlts` - _Type_: boolean | object; _Default_: false + +This can be configured to enable mTLS based authentication for incoming connections. If enabled with default options (by setting to `true`), the client certificate will be checked against the certificate authority specified with `tls.certificateAuthority`. And if the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. + +You can also define specific mTLS options by specifying an object for mtls with the following (optional) properties which may be included: + +`user` - _Type_: string; _Default_: Common Name + +This configures a specific username to authenticate as for mTLS connections. If a `user` is defined, any authorized mTLS connection (that authorizes against the certificate authority) will be authenticated as this user. This can also be set to `null`, which indicates that no authentication is performed based on the mTLS authorization. When combined with `required: true`, this can be used to enforce that users must have authorized mTLS _and_ provide credential-based authentication. + +`required` - _Type_: boolean; _Default_: false + +This can be enabled to require client certificates (mTLS) for all incoming MQTT connections. If enabled, any connection that doesn't provide an authorized certificate will be rejected/closed. By default, this is disabled, and authentication can take place with mTLS _or_ standard credential authentication. + +```yaml +http: + mtls: true +``` + +or + +```yaml +http: + mtls: + required: true + user: user-name +``` + +*** + +### `threads` + +The `threads` provides control over how many threads, how much heap memory they may use, and debugging of the threads: + +`count` - _Type_: number; _Default_: One less than the number of logical cores/processors + +The `threads.count` option specifies the number of threads that will be used to service the HTTP requests for the operations API and custom functions. Generally, this should be close to the number of CPU logical cores/processors to ensure the CPU is fully utilized (a little less because Harper does have other threads at work), assuming Harper is the main service on a server. + +```yaml +threads: + count: 11 +``` + +`debug` - _Type_: boolean | object; _Default_: false + +This enables debugging. If simply set to true, this will enable debugging on the main thread on port 9229 with the 127.0.0.1 host interface. This can also be an object for more debugging control. + +`debug.port` - The port to use for debugging the main thread `debug.startingPort` - This will set up a separate port for debugging each thread. This is necessary for debugging individual threads with devtools. `debug.host` - Specify the host interface to listen on `debug.waitForDebugger` - Wait for debugger before starting + +```yaml +threads: + debug: + port: 9249 +``` + +`maxHeapMemory` - _Type_: number; + +```yaml +threads: + maxHeapMemory: 300 +``` + +This specifies the heap memory limit for each thread, in megabytes. The default heap limit is a heuristic based on available memory and thread count. + +*** + +### `replication` + +The `replication` section configures [Harper replication](../developers/replication/), which is used to create Harper clusters and replicate data between the instances. + +```yaml +replication: + hostname: server-one + url: wss:/server-one:9925 + databases: "*" + routes: + - wss:/server-two:9925 + port: null + securePort: 9933, + enableRootCAs: true +``` + +`hostname` - _Type_: string; + +The hostname of the current Harper instance. + +`url` - _Type_: string; + +The URL of the current Harper instance. + +`databases` - _Type_: string/array; _Default_: "\*" (all databases) + +Configure which databases to replicate. This can be a string for all database or an array for specific databases. + +```yaml +replication: + databases: + - db1 + - db2 +``` + +`routes` - _Type_: array; + +An array of routes to connect to other nodes. Each element in the array can be either a string or an object with `hostname`, `port` and optionally `startTime` properties. + +`startTime` - _Type_: string; ISO formatted UTC date string. + +Replication will attempt to catch up on all remote data upon setup. To start replication from a specific date, set this property. + +`revokedCertificates` - _Type_: array; + +An array of serial numbers of revoked certificates. If a connection is attempted with a certificate that is in this list, the connection will be rejected. + +```yaml +replication: + copyTablesToCatchUp: true + hostname: server-one + routes: + - wss:/server-two:9925 # URL based route + - hostname: server-three # define a hostname and port + port: 9930 + startTime: 2024-02-06T15:30:00Z + revokedCertificates: + - 1769F7D6A + - QA69C7E2S +``` + +`port` - _Type_: integer; + +The port to use for replication connections. + +`securePort` - _Type_: integer; _Default_: 9933 + +The port to use for secure replication connections. + +`enableRootCAs` - _Type_: boolean; _Default_: true + +When true, Harper will verify certificates against the Node.js bundled CA store. The bundled CA store is a snapshot of the Mozilla CA store that is fixed at release time. + +`copyTablesToCatchUp` - _Type_: boolean; _Default_: false + +Replication will first attempt to catch up using the audit log. If unsuccessful, it will perform a full table copy. When set to `false`, replication will only use the audit log. + +`shard` - _Type_: integer; + +This defines the shard id of this instance and is used in conjunction with the [Table Resource functions](../developers/replication/sharding#custom-sharding) `setResidency` & `setResidencyById` to programmatically route traffic to the proper shard. + +*** + +### `clustering` using NATS + +The `clustering` section configures the NATS clustering engine, this is used to replicate data between instances of Harper. + +_Note: There exist two ways to create clusters and replicate data in Harper. One option is to use native Harper replication over Websockets. The other option is to use_ [_NATS_](https:/nats.io/about/) _to facilitate the cluster._ + +Clustering offers a lot of different configurations, however in a majority of cases the only options you will need to pay attention to are: + +* `clustering.enabled` Enable the clustering processes. +* `clustering.hubServer.cluster.network.port` The port other nodes will connect to. This port must be accessible from other cluster nodes. +* `clustering.hubServer.cluster.network.routes`The connections to other instances. +* `clustering.nodeName` The name of your node, must be unique within the cluster. +* `clustering.user` The name of the user credentials used for Inter-node authentication. + +`enabled` - _Type_: boolean; _Default_: false + +Enable clustering. + +_Note: If you enabled clustering but do not create and add a cluster user you will get a validation error. See `user` description below on how to add a cluster user._ + +```yaml +clustering: + enabled: true +``` + +`clustering.hubServer.cluster` + +Clustering’s `hubServer` facilitates the Harper mesh network and discovery service. + +```yaml +clustering: + hubServer: + cluster: + name: harperdb + network: + port: 9932 + routes: + - host: 3.62.184.22 + port: 9932 + - host: 3.735.184.8 + port: 9932 +``` + +`name` - _Type_: string, _Default_: harperdb + +The name of your cluster. This name needs to be consistent for all other nodes intended to be meshed in the same network. + +`port` - _Type_: integer, _Default_: 9932 + +The port the hub server uses to accept cluster connections + +`routes` - _Type_: array, _Default_: null + +An object array that represent the host and port this server will cluster to. Each object must have two properties `port` and `host`. Multiple entries can be added to create network resiliency in the event one server is unavailable. Routes can be added, updated and removed either by directly editing the `harperdb-config.yaml` file or by using the `cluster_set_routes` or `cluster_delete_routes` API endpoints. + +`host` - _Type_: string + +The host of the remote instance you are creating the connection with. + +`port` - _Type_: integer + +The port of the remote instance you are creating the connection with. This is likely going to be the `clustering.hubServer.cluster.network.port` on the remote instance. + +`clustering.hubServer.leafNodes` + +```yaml +clustering: + hubServer: + leafNodes: + network: + port: 9931 +``` + +`port` - _Type_: integer; _Default_: 9931 + +The port the hub server uses to accept leaf server connections. + +`clustering.hubServer.network` + +```yaml +clustering: + hubServer: + network: + port: 9930 +``` + +`port` - _Type_: integer; _Default_: 9930 + +Use this port to connect a client to the hub server, for example using the NATs SDK to interact with the server. + +`clustering.leafServer` + +Manages streams, streams are ‘message stores’ that store table transactions. + +```yaml +clustering: + leafServer: + network: + port: 9940 + routes: + - host: 3.62.184.22 + port: 9931 + - host: node3.example.com + port: 9931 + streams: + maxAge: 3600 + maxBytes: 10000000 + maxMsgs: 500 + path: /user/hdb/clustering/leaf +``` + +`port` - _Type_: integer; _Default_: 9940 + +Use this port to connect a client to the leaf server, for example using the NATs SDK to interact with the server. + +`routes` - _Type_: array; _Default_: null + +An object array that represent the host and port the leaf node will directly connect with. Each object must have two properties `port` and `host`. Unlike the hub server, the leaf server will establish connections to all listed hosts. Routes can be added, updated and removed either by directly editing the `harperdb-config.yaml` file or by using the `cluster_set_routes` or `cluster_delete_routes` API endpoints. + +`host` - _Type_: string + +The host of the remote instance you are creating the connection with. + +`port` - _Type_: integer + +The port of the remote instance you are creating the connection with. This is likely going to be the `clustering.hubServer.cluster.network.port` on the remote instance. + +`clustering.leafServer.streams` + +`maxAge` - _Type_: integer; _Default_: null + +The maximum age of any messages in the stream, expressed in seconds. + +`maxBytes` - _Type_: integer; _Default_: null + +The maximum size of the stream in bytes. Oldest messages are removed if the stream exceeds this size. + +`maxMsgs` - _Type_: integer; _Default_: null + +How many messages may be in a stream. Oldest messages are removed if the stream exceeds this number. + +`path` - _Type_: string; _Default_: \/clustering/leaf + +The directory where all the streams are kept. + +```yaml +clustering: + leafServer: + streams: + maxConsumeMsgs: 100 + maxIngestThreads: 2 +``` + +`maxConsumeMsgs` - _Type_: integer; _Default_: 100 + +The maximum number of messages a consumer can process in one go. + +`maxIngestThreads` - _Type_: integer; _Default_: 2 + +The number of Harper threads that are delegated to ingesting messages. + +*** + +`logLevel` - _Type_: string; _Default_: error + +Control the verbosity of clustering logs. + +```yaml +clustering: + logLevel: error +``` + +There exists a log level hierarchy in order as `trace`, `debug`, `info`, `warn`, and `error`. When the level is set to `trace` logs will be created for all possible levels. Whereas if the level is set to `warn`, the only entries logged will be `warn` and `error`. The default value is `error`. + +`nodeName` - _Type_: string; _Default_: null + +The name of this node in your Harper cluster topology. This must be a value unique from the rest of the cluster node names. + +_Note: If you want to change the node name make sure there are no subscriptions in place before doing so. After the name has been changed a full restart is required._ + +```yaml +clustering: + nodeName: great_node +``` + +`tls` + +Transport Layer Security default values are automatically generated on install. + +```yaml +clustering: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem + insecure: true + verify: true +``` + +`certificate` - _Type_: string; _Default_: \/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: \/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: \/keys/privateKey.pem + +Path to the private key file. + +`insecure` - _Type_: boolean; _Default_: true + +When true, will skip certificate verification. For use only with self-signed certs. + +`republishMessages` - _Type_: boolean; _Default_: false + +When true, all transactions that are received from other nodes are republished to this node's stream. When subscriptions are not fully connected between all nodes, this ensures that messages are routed to all nodes through intermediate nodes. This also ensures that all writes, whether local or remote, are written to the NATS transaction log. However, there is additional overhead with republishing, and setting this is to false can provide better data replication performance. When false, you need to ensure all subscriptions are fully connected between every node to every other node, and be aware that the NATS transaction log will only consist of local writes. + +`verify` - _Type_: boolean; _Default_: true + +When true, hub server will verify client certificate using the CA certificate. + +*** + +`user` - _Type_: string; _Default_: null + +The username given to the `cluster_user`. All instances in a cluster must use the same clustering user credentials (matching username and password). + +Inter-node authentication takes place via a special Harper user role type called `cluster_user`. + +The user can be created either through the API using an `add_user` request with the role set to `cluster_user`, or on install using environment variables `CLUSTERING_USER=cluster_person` `CLUSTERING_PASSWORD=pass123!` or CLI variables `harperdb --CLUSTERING_USER cluster_person` `--CLUSTERING_PASSWORD` `pass123!` + +```yaml +clustering: + user: cluster_person +``` + +*** + +### `localStudio` + +The `localStudio` section configures the local Harper Studio, a GUI for Harper hosted on the server. A hosted version of the Harper Studio with licensing and provisioning options is available at https:/studio.harperdb.io. Note, all database traffic from either `localStudio` or Harper Studio is made directly from your browser to the instance. + +`enabled` - _Type_: boolean; _Default_: false + +Enabled the local studio or not. + +```yaml +localStudio: + enabled: false +``` + +*** + +### `logging` + +The `logging` section configures Harper logging across all Harper functionality. This includes standard text logging of application and database events as well as structured data logs of record changes. Logging of application/database events are logged in text format to the `~/hdb/log/hdb.log` file (or location specified by `logging.root`). + +In addition, structured logging of data changes are also available: + +`auditLog` - _Type_: boolean; _Default_: false + +Enabled table transaction logging. + +```yaml +logging: + auditLog: false +``` + +To access the audit logs, use the API operation `read_audit_log`. It will provide a history of the data, including original records and changes made, in a specified table. + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog" +} +``` + +`file` - _Type_: boolean; _Default_: true + +Defines whether to log to a file. + +```yaml +logging: + file: true +``` + +`auditRetention` - _Type_: string|number; _Default_: 3d + +This specifies how long audit logs should be retained. + +`level` - _Type_: string; _Default_: warn + +Control the verbosity of text event logs. + +```yaml +logging: + level: warn +``` + +There exists a log level hierarchy in order as `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. When the level is set to `trace` logs will be created for all possible levels. Whereas if the level is set to `fatal`, the only entries logged will be `fatal` and `notify`. The default value is `error`. + +`console` - _Type_: boolean; _Default_: true + +Controls whether console.log and other console.\* calls (as well as another JS components that writes to `process.stdout` and `process.stderr`) are logged to the log file. By default, these are logged to the log file, but this can be disabled. + +```yaml +logging: + console: true +``` + +`root` - _Type_: string; _Default_: \/log + +The path where the log files will be written. + +```yaml +logging: + root: ~/hdb/log +``` + +`rotation` + +Rotation provides the ability for a user to systematically rotate and archive the `hdb.log` file. To enable `interval` and/or `maxSize` must be set. + +_**Note:**_ `interval` and `maxSize` are approximates only. It is possible that the log file will exceed these values slightly before it is rotated. + +```yaml +logging: + rotation: + enabled: true + compress: false + interval: 1D + maxSize: 100K + path: /user/hdb/log +``` + +`enabled` - _Type_: boolean; _Default_: true + +Enables logging rotation. + +`compress` - _Type_: boolean; _Default_: false + +Enables compression via gzip when logs are rotated. + +`interval` - _Type_: string; _Default_: null + +The time that should elapse between rotations. Acceptable units are D(ays), H(ours) or M(inutes). + +`maxSize` - _Type_: string; _Default_: null + +The maximum size the log file can reach before it is rotated. Must use units M(egabyte), G(igabyte), or K(ilobyte). + +`path` - _Type_: string; _Default_: \/log + +Where to store the rotated log file. File naming convention is `HDB-YYYY-MM-DDT-HH-MM-SSSZ.log`. + +`stdStreams` - _Type_: boolean; _Default_: false + +Log Harper logs to the standard output and error streams. + +```yaml +logging: + stdStreams: false +``` + +`auditAuthEvents` + +`logFailed` - _Type_: boolean; _Default_: false + +Log all failed authentication events. + +_Example:_ `[error] [auth-event]: {"username":"admin","status":"failure","type":"authentication","originating_ip":"127.0.0.1","request_method":"POST","path":"/","auth_strategy":"Basic"}` + +`logSuccessful` - _Type_: boolean; _Default_: false + +Log all successful authentication events. + +_Example:_ `[notify] [auth-event]: {"username":"admin","status":"success","type":"authentication","originating_ip":"127.0.0.1","request_method":"POST","path":"/","auth_strategy":"Basic"}` + +```yaml +logging: + auditAuthEvents: + logFailed: false + logSuccessful: false +``` + +*** + +### `authentication` + +The authentication section defines the configuration for the default authentication mechanism in Harper. + +```yaml +authentication: + authorizeLocal: true + cacheTTL: 30000 + enableSessions: true + operationTokenTimeout: 1d + refreshTokenTimeout: 30d +``` + +`authorizeLocal` - _Type_: boolean; _Default_: true + +This will automatically authorize any requests from the loopback IP address as the superuser. This should be disabled for any Harper servers that may be accessed by untrusted users from the same instance. For example, this should be disabled if you are using a local proxy, or for general server hardening. + +`cacheTTL` - _Type_: number; _Default_: 30000 + +This defines the length of time (in milliseconds) that an authentication (a particular Authorization header or token) can be cached. + +`enableSessions` - _Type_: boolean; _Default_: true + +This will enable cookie-based sessions to maintain an authenticated session. This is generally the preferred mechanism for maintaining authentication in web browsers as it allows cookies to hold an authentication token securely without giving JavaScript code access to token/credentials that may open up XSS vulnerabilities. + +`operationTokenTimeout` - _Type_: string; _Default_: 1d + +Defines the length of time an operation token will be valid until it expires. Example values: https:/github.com/vercel/ms. + +`refreshTokenTimeout` - _Type_: string; _Default_: 1d + +Defines the length of time a refresh token will be valid until it expires. Example values: https:/github.com/vercel/ms. + +### `operationsApi` + +The `operationsApi` section configures the Harper Operations API.\ +All the `operationsApi` configuration is optional. Any configuration that is not provided under this section will default to the `http` configuration section. + +`network` + +```yaml +operationsApi: + network: + cors: true + corsAccessList: + - null + domainSocket: /user/hdb/operations-server + headersTimeout: 60000 + keepAliveTimeout: 5000 + port: 9925 + securePort: null + timeout: 120000 +``` + +`cors` - _Type_: boolean; _Default_: true + +Enable Cross Origin Resource Sharing, which allows requests across a domain. + +`corsAccessList` - _Type_: array; _Default_: null + +An array of allowable domains with CORS + +`domainSocket` - _Type_: string; _Default_: \/hdb/operations-server + +The path to the Unix domain socket used to provide the Operations API through the CLI + +`headersTimeout` - _Type_: integer; _Default_: 60,000 milliseconds (1 minute) + +Limit the amount of time the parser will wait to receive the complete HTTP headers with. + +`keepAliveTimeout` - _Type_: integer; _Default_: 5,000 milliseconds (5 seconds) + +Sets the number of milliseconds of inactivity the server needs to wait for additional incoming data after it has finished processing the last response. + +`port` - _Type_: integer; _Default_: 9925 + +The port the Harper operations API interface will listen on. + +`securePort` - _Type_: integer; _Default_: null + +The port the Harper operations API uses for HTTPS connections. This requires a valid certificate and key. + +`timeout` - _Type_: integer; _Default_: Defaults to 120,000 milliseconds (2 minutes) + +The length of time in milliseconds after which a request will timeout. + +`tls` + +This configures the Transport Layer Security for HTTPS support. + +```yaml +operationsApi: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`certificate` - _Type_: string; _Default_: \/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: \/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: \/keys/privateKey.pem + +Path to the private key file. + +*** + +### `componentsRoot` + +`componentsRoot` - _Type_: string; _Default_: \/components + +The path to the folder containing the local component files. + +```yaml +componentsRoot: ~/hdb/components +``` + +*** + +### `rootPath` + +`rootPath` - _Type_: string; _Default_: home directory of the current user + +The Harper database and applications/API/interface are decoupled from each other. The `rootPath` directory specifies where the Harper application persists data, config, logs, and Custom Functions. + +```yaml +rootPath: /Users/jonsnow/hdb +``` + +*** + +### `storage` + +`writeAsync` - _Type_: boolean; _Default_: false + +The `writeAsync` option turns off disk flushing/syncing, allowing for faster write operation throughput. However, this does not provide storage integrity guarantees, and if a server crashes, it is possible that there may be data loss requiring restore from another backup/another node. + +```yaml +storage: + writeAsync: false +``` + +`caching` - _Type_: boolean; _Default_: true + +The `caching` option enables in-memory caching of records, providing faster access to frequently accessed objects. This can incur some extra overhead for situations where reads are extremely random and don't benefit from caching. + +```yaml +storage: + caching: true +``` + +`compression` - _Type_: boolean; _Default_: true + +The `compression` option enables compression of records in the database. This can be helpful for very large records in reducing storage requirements and potentially allowing more data to be cached. This uses the very fast LZ4 compression algorithm, but this still incurs extra costs for compressing and decompressing. + +```yaml +storage: + compression: false +``` + +`compression.dictionary` _Type_: number; _Default_: null + +Path to a compression dictionary file + +`compression.threshold` _Type_: number; _Default_: Either `4036` or if `storage.pageSize` provided `storage.pageSize - 60` + +Only entries that are larger than this value (in bytes) will be compressed. + +```yaml +storage: + compression: + dictionary: /users/harperdb/dict.txt + threshold: 1000 +``` + +`compactOnStart` - _Type_: boolean; _Default_: false + +When `true` all non-system databases will be compacted when starting Harper, read more [here](../administration/compact). + +`compactOnStartKeepBackup` - _Type_: boolean; _Default_: false + +Keep the backups made by compactOnStart. + +```yaml +storage: + compactOnStart: true + compactOnStartKeepBackup: false +``` + +`maxTransactionQueueTime` - _Type_: time; _Default_: 45s + +The `maxTransactionQueueTime` specifies how long the write queue can get before write requests are rejected (with a 503). + +```yaml +storage: + maxTransactionQueueTime: 2m +``` + +`noReadAhead` - _Type_: boolean; _Default_: false + +The `noReadAhead` option advises the operating system to not read ahead when reading from the database. This provides better memory utilization for databases with small records (less than one page), but can degrade performance in situations where large records are used or frequent range queries are used. + +```yaml +storage: + noReadAhead: true +``` + +`prefetchWrites` - _Type_: boolean; _Default_: true + +The `prefetchWrites` option loads data prior to write transactions. This should be enabled for databases that are larger than memory (although it can be faster to disable this for smaller databases). + +```yaml +storage: + prefetchWrites: true +``` + +`path` - _Type_: string; _Default_: `/database` + +The `path` configuration sets where all database files should reside. + +```yaml +storage: + path: /users/harperdb/storage +``` +_**Note:**_ This configuration applies to all database files, which includes system tables that are used internally by Harper. For this reason if you wish to use a non default `path` value you must move any existing schemas into your `path` location. Existing schemas is likely to include the system schema which can be found at `/schema/system`. + +`blobPaths` - _Type_: string; _Default_: `/blobs` + +The `blobPaths` configuration sets where all the blob files should reside. This can be an array of paths, and if there are multiple, the blobs will be distributed across the paths. + +```yaml +storage: + blobPaths: + - /users/harperdb/big-storage +``` + +`pageSize` - _Type_: number; _Default_: Defaults to the default page size of the OS + +Defines the page size of the database. + +```yaml +storage: + pageSize: 4096 +``` + +`reclamation` + +The reclamation section provides configuration for the reclamation process, which is responsible for reclaiming space when free space is low. For example: + +```yaml +storage: + reclamation: + threshold: 0.4 # Start storage reclamation efforts when free space has reached 40% of the volume space (default) + interval: 1h # Reclamation will run every hour (default) + evictionFactor: 100000 # A factor used to determine how much aggressively to evict cached entries (default) +``` + +*** + +### `tls` + +The section defines the certificates, keys, and settings for Transport Layer Security (TLS) for HTTPS and TLS socket support. This is used for both the HTTP and MQTT protocols. The `tls` section can be a single object with the settings below, or it can be an array of objects, where each object is a separate TLS configuration. By using an array, the TLS configuration can be used to define multiple certificates for different domains/hosts (negotiated through SNI). + +```yaml +tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`certificate` - _Type_: string; _Default_: \/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: \/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: \/keys/privateKey.pem + +Path to the private key file. + +`ciphers` - _Type_: string; + +Allows specific ciphers to be set. + +If you want to define multiple certificates that are applied based on the domain/host requested via SNI, you can define an array of TLS configurations. Each configuration can have the same properties as the root TLS configuration, but can (optionally) also have an additional `host` property to specify the domain/host that the certificate should be used for: + +```yaml +tls: + - certificate: ~/hdb/keys/certificate1.pem + certificateAuthority: ~/hdb/keys/ca1.pem + privateKey: ~/hdb/keys/privateKey1.pem + host: example.com # the host is optional, and if not provided, this certificate's common name will be used as the host name. + - certificate: ~/hdb/keys/certificate2.pem + certificateAuthority: ~/hdb/keys/ca2.pem + privateKey: ~/hdb/keys/privateKey2.pem + +``` + +Note that a `tls` section can also be defined in the `operationsApi` section, which will override the root `tls` section for the operations API. + +*** + +### `mqtt` + +The MQTT protocol can be configured in this section. + +```yaml +mqtt: + network: + port: 1883 + securePort: 8883 + mtls: false + webSocket: true + requireAuthentication: true +``` + +`port` - _Type_: number; _Default_: 1883 + +This is the port to use for listening for insecure MQTT connections. + +`securePort` - _Type_: number; _Default_: 8883 + +This is the port to use for listening for secure MQTT connections. This will use the `tls` configuration for certificates. + +`webSocket` - _Type_: boolean; _Default_: true + +This enables access to MQTT through WebSockets. This will handle WebSocket connections on the http port (defaults to 9926), that have specified a (sub) protocol of `mqtt`. + +`requireAuthentication` - _Type_: boolean; _Default_: true + +This indicates if authentication should be required for establishing an MQTT connection (whether through MQTT connection credentials or mTLS). Disabling this allows unauthenticated connections, which are then subject to authorization for publishing and subscribing (and by default tables/resources do not authorize such access, but that can be enabled at the resource level). + +`mlts` - _Type_: boolean | object; _Default_: false + +This can be configured to enable mTLS based authentication for incoming connections. If enabled with default options (by setting to `true`), the client certificate will be checked against the certificate authority specified in the `tls` section. And if the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. + +You can also define specific mTLS options by specifying an object for mtls with the following (optional) properties which may be included: + +`user` - _Type_: string; _Default_: Common Name + +This configures a specific username to authenticate as for mTLS connections. If a `user` is defined, any authorized mTLS connection (that authorizes against the certificate authority) will be authenticated as this user. This can also be set to `null`, which indicates that no authentication is performed based on the mTLS authorization. When combined with `required: true`, this can be used to enforce that users must have authorized mTLS _and_ provide credential-based authentication. + +`required` - _Type_: boolean; _Default_: false + +This can be enabled to require client certificates (mTLS) for all incoming MQTT connections. If enabled, any connection that doesn't provide an authorized certificate will be rejected/closed. By default, this is disabled, and authentication can take place with mTLS _or_ standard credential authentication. + +`certificateAuthority` - _Type_: string; _Default_: Path from `tls.certificateAuthority` + +This can define a specific path to use for the certificate authority. By default, certificate authorization checks against the CA specified at `tls.certificateAuthority`, but if you need a specific/distinct CA for MQTT, you can set this. + +For example, you could specify that mTLS is required and will authenticate as "user-name": + +```yaml +mqtt: + network: + mtls: + user: user-name + required: true +``` + +*** + +### `databases` + +The `databases` section is an optional configuration that can be used to define where database files should reside down to the table level. This configuration should be set before the database and table have been created. The configuration will not create the directories in the path, that must be done by the user. + +To define where a database and all its tables should reside use the name of your database and the `path` parameter. + +```yaml +databases: + nameOfDatabase: + path: /path/to/database +``` + +To define where specific tables within a database should reside use the name of your database, the `tables` parameter, the name of your table and the `path` parameter. + +```yaml +databases: + nameOfDatabase: + tables: + nameOfTable: + path: /path/to/table +``` + +This same pattern can be used to define where the audit log database files should reside. To do this use the `auditPath` parameter. + +```yaml +databases: + nameOfDatabase: + auditPath: /path/to/database +``` + +**Setting the database section through the command line, environment variables or API** + +When using command line variables,environment variables or the API to configure the databases section a slightly different convention from the regular one should be used. To add one or more configurations use a JSON object array. + +Using command line variables: + +```bash +--DATABASES [{\"nameOfSchema\":{\"tables\":{\"nameOfTable\":{\"path\":\"\/path\/to\/table\"}}}}] +``` + +Using environment variables: + +```bash +DATABASES=[{"nameOfSchema":{"tables":{"nameOfTable":{"path":"/path/to/table"}}}}] +``` + +Using the API: + +```json +{ + "operation": "set_configuration", + "databases": [{ + "nameOfDatabase": { + "tables": { + "nameOfTable": { + "path": "/path/to/table" + } + } + } + }] +} +``` + +*** + +### Components + +`` - _Type_: string + +The name of the component. This will be used to name the folder where the component is installed and must be unique. + +`package` - _Type_: string + +A reference to your [component](../developers/components/managing#adding-components-to-root) package. This could be a remote git repo, a local folder/file or an NPM package. Harper will add this package to a package.json file and call `npm install` on it, so any reference that works with that paradigm will work here. + +Read more about npm install [here](https:/docs.npmjs.com/cli/v8/commands/npm-install) + +`port` - _Type_: number _Default_: whatever is set in `http.port` + +The port that your component should listen on. If no port is provided it will default to `http.port` + +```yaml +: + package: 'HarperDB-Add-Ons/package-name' + port: 4321 +``` diff --git a/site/versioned_docs/version-4.5/deployments/harper-cli.md b/site/versioned_docs/version-4.5/deployments/harper-cli.md new file mode 100644 index 00000000..91240516 --- /dev/null +++ b/site/versioned_docs/version-4.5/deployments/harper-cli.md @@ -0,0 +1,194 @@ +--- +title: Harper CLI +--- + +# Harper CLI + +## Harper CLI + +The Harper command line interface (CLI) is used to administer [self-installed Harper instances](./install-harper/). + +### Installing Harper + +To install Harper with CLI prompts, run the following command: + +```bash +harperdb install +``` + +Alternatively, Harper installations can be automated with environment variables or command line arguments; [see a full list of configuration parameters here](./configuration#using-the-configuration-file-and-naming-conventions). Note, when used in conjunction, command line arguments will override environment variables. + +**Environment Variables** + +```bash +#minimum required parameters for no additional CLI prompts +export TC_AGREEMENT=yes +export HDB_ADMIN_USERNAME=HDB_ADMIN +export HDB_ADMIN_PASSWORD=password +export ROOTPATH=/tmp/hdb/ +export OPERATIONSAPI_NETWORK_PORT=9925 +harperdb install +``` + +**Command Line Arguments** + +```bash +#minimum required parameters for no additional CLI prompts +harperdb install --TC_AGREEMENT yes --HDB_ADMIN_USERNAME HDB_ADMIN --HDB_ADMIN_PASSWORD password --ROOTPATH /tmp/hdb/ --OPERATIONSAPI_NETWORK_PORT 9925 +``` + +*** + +### Starting Harper + +To start Harper after it is installed, run the following command: + +```bash +harperdb start +``` + +*** + +### Stopping Harper + +To stop Harper once it is running, run the following command: + +```bash +harperdb stop +``` + +*** + +### Restarting Harper + +To restart Harper once it is running, run the following command: + +```bash +harperdb restart +``` + +*** + +### Getting the Harper Version + +To check the version of Harper that is installed run the following command: + +```bash +harperdb version +``` + +*** + +### Renew self-signed certificates + +To renew the Harper generated self-signed certificates, run: + +```bash +harperdb renew-certs +``` + +*** + +### Copy a database with compaction + +To copy a Harper database with compaction (to eliminate free-space and fragmentation), use + +```bash +harperdb copy-db +``` + +For example, to copy the default database: + +```bash +harperdb copy-db data /home/user/hdb/database/copy.mdb +``` + +*** + +### Get all available CLI commands + +To display all available Harper CLI commands along with a brief description run: + +```bash +harperdb help +``` + +*** + +### Get the status of Harper and clustering + +To display the status of the Harper process, the clustering hub and leaf processes, the clustering network and replication statuses, run: + +```bash +harperdb status +``` + +*** + +### Backups + +Harper uses a transactional commit process that ensures that data on disk is always transactionally consistent with storage. This means that Harper maintains database integrity in the event of a crash. It also means that you can use any standard volume snapshot tool to make a backup of a Harper database. Database files are stored in the hdb/database directory. As long as the snapshot is an atomic snapshot of these database files, the data can be copied/moved back into the database directory to restore a previous backup (with Harper shut down) , and database integrity will be preserved. Note that simply copying an in-use database file (using `cp`, for example) is _not_ a snapshot, and this would progressively read data from the database at different points in time, which yields unreliable copy that likely will not be usable. Standard copying is only reliable for a database file that is not in use. + +*** + +## Operations API through the CLI + +Some of the API operations are available through the CLI, this includes most operations that do not require nested parameters. To call the operation use the following convention: ` =`. By default, the result will be formatted as YAML, if you would like the result in JSON pass: `json=true`. + +Some examples are: + +```bash +$ harperdb describe_table database=dev table=dog + +schema: dev +name: dog +hash_attribute: id +audit: true +schema_defined: false +attributes: + - attribute: id + is_primary_key: true + - attribute: name + indexed: true +clustering_stream_name: 3307bb542e0081253klnfd3f1cf551b +record_count: 10 +last_updated_record: 1724483231970.9949 +``` + +`harperdb set_configuration logging_level=error` + +`harperdb deploy_component project=my-cool-app package=https:/github.com/HarperDB/application-template` + +`harperdb get_components` + +`harperdb search_by_id database=dev table=dog ids='["1"]' get_attributes='["*"]' json=true` + +`harperdb search_by_value table=dog search_attribute=name search_value=harper get_attributes='["id", "name"]'` + +`harperdb sql sql='select * from dev.dog where id="1"'` + +### Remote Operations + +The CLI can also be used to run operations on remote Harper instances. To do this, pass the `target` parameter with the HTTP address of the remote instance. You generally will also need to provide credentials and specify the `username` and `password` parameters, or you can set environment variables `CLI_TARGET_USERNAME` and `CLI_TARGET_PASSWORD`, for example: + +```bash +export CLI_TARGET_USERNAME=HDB_ADMIN +export CLI_TARGET_PASSWORD=password +harperdb describe_database database=dev target=https:/server.com:9925 +``` + +The same set of operations API are available for remote operations as well. + +#### Remote Component Deployment + +When using remote operations, you can deploy a local component to the remote instance. If you omit the `package` parameter, you can deploy the current directory. This will package the current directory and send it to the target server (also `deploy` is allowed as an alias to `deploy_component`): + +```bash +harperdb deploy target=https:/server.com:9925 +``` + +If you are interacting with a cluster, you may wish to include the `replicated=true` parameter to ensure that the deployment operation is replicated to all nodes in the cluster. You will also need to restart afterwards to apply the changes (here seen with the replicated parameter): + +```bash +harperdb restart target=https:/server.com:9925 replicated=true +``` diff --git a/site/versioned_docs/version-4.5/deployments/harper-cloud/alarms.md b/site/versioned_docs/version-4.5/deployments/harper-cloud/alarms.md new file mode 100644 index 00000000..72b4e7a7 --- /dev/null +++ b/site/versioned_docs/version-4.5/deployments/harper-cloud/alarms.md @@ -0,0 +1,20 @@ +--- +title: Alarms +--- + +# Alarms + +Harper Cloud instance alarms are triggered when certain conditions are met. Once alarms are triggered organization owners will immediately receive an email alert and the alert will be available on the [Instance Configuration](../../administration/harper-studio/instance-configuration) page. The below table describes each alert and their evaluation metrics. + +### Heading Definitions + +* **Alarm**: Title of the alarm. +* **Threshold**: Definition of the alarm threshold. +* **Intervals**: The number of occurrences before an alarm is triggered and the period that the metric is evaluated over. +* **Proposed Remedy**: Recommended solution to avoid the alert in the future. + +| Alarm | Threshold | Intervals | Proposed Remedy | +| ------- | ---------- | --------- | ------------------------------------------------------------------------------------------------------------------------------ | +| Storage | > 90% Disk | 1 x 5min | [Increased storage volume](../../administration/harper-studio/instance-configuration#update-instance-storage) | +| CPU | > 90% Avg | 2 x 5min | [Increase instance size for additional CPUs](../../administration/harper-studio/instance-configuration#update-instance-ram) | +| Memory | > 90% RAM | 2 x 5min | [Increase instance size](../../administration/harper-studio/instance-configuration#update-instance-ram) | diff --git a/site/versioned_docs/version-4.5/deployments/harper-cloud/index.md b/site/versioned_docs/version-4.5/deployments/harper-cloud/index.md new file mode 100644 index 00000000..fbf2d81e --- /dev/null +++ b/site/versioned_docs/version-4.5/deployments/harper-cloud/index.md @@ -0,0 +1,9 @@ +--- +title: Harper Cloud +--- + +# Harper Cloud + +[Harper Cloud](https:/studio.harperdb.io/) is the easiest way to test drive Harper, it’s Harper-as-a-Service. Cloud handles deployment and management of your instances in just a few clicks. Harper Cloud is currently powered by AWS with additional cloud providers on our roadmap for the future. + +You can create a new Harper Cloud instance in the Harper Studio. diff --git a/site/versioned_docs/version-4.5/deployments/harper-cloud/instance-size-hardware-specs.md b/site/versioned_docs/version-4.5/deployments/harper-cloud/instance-size-hardware-specs.md new file mode 100644 index 00000000..72979d8d --- /dev/null +++ b/site/versioned_docs/version-4.5/deployments/harper-cloud/instance-size-hardware-specs.md @@ -0,0 +1,23 @@ +--- +title: Instance Size Hardware Specs +--- + +# Instance Size Hardware Specs + +While Harper Cloud bills by RAM, each instance has other specifications associated with the RAM selection. The following table describes each instance size in detail\*. + +| AWS EC2 Instance Size | RAM (GiB) | # vCPUs | Network (Gbps) | Processor | +| --------------------- | --------- | ------- | -------------- | -------------------------------------- | +| t3.micro | 1 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| t3.small | 2 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| t3.medium | 4 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| m5.large | 8 | 2 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.xlarge | 16 | 4 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.2xlarge | 32 | 8 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.4xlarge | 64 | 16 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.8xlarge | 128 | 32 | 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.12xlarge | 192 | 48 | 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.16xlarge | 256 | 64 | 20 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.24xlarge | 384 | 96 | 25 | Up to 3.1 GHz Intel Xeon Platinum 8000 | + +\*Specifications are subject to change. For the most up to date information, please refer to AWS documentation: [https:/aws.amazon.com/ec2/instance-types/](https:/aws.amazon.com/ec2/instance-types/). diff --git a/site/versioned_docs/version-4.5/deployments/harper-cloud/iops-impact.md b/site/versioned_docs/version-4.5/deployments/harper-cloud/iops-impact.md new file mode 100644 index 00000000..f316fc30 --- /dev/null +++ b/site/versioned_docs/version-4.5/deployments/harper-cloud/iops-impact.md @@ -0,0 +1,42 @@ +--- +title: IOPS Impact on Performance +--- + +# IOPS Impact on Performance + +Harper, like any database, can place a tremendous load on its storage resources. Storage, not CPU or memory, will more often be the bottleneck of server, virtual machine, or a container running Harper. Understanding how storage works, and how much storage performance your workload requires, is key to ensuring that Harper performs as expected. + +## IOPS Overview + +The primary measure of storage performance is the number of input/output operations per second (IOPS) that a storage device can perform. Different storage devices can have dramatically different performance profiles. A hard drive (HDD) might only perform a hundred or so IOPS, while a solid state drive (SSD) might be able to perform tens or hundreds of thousands of IOPS. + +Cloud providers like AWS, which powers Harper Cloud, don’t typically attach individual disks to a virtual machine or container. Instead, they combine large numbers of storage drives to create very high performance storage servers. Chunks (volumes) of that storage are then carved out and presented to many different virtual machines and containers. Due to the shared nature of this type of storage, the cloud provider places configurable limits on the number of IOPS that a volume can perform. The same way that cloud providers charge more for larger capacity volumes, they also charge more for volumes with more IOPS. + +## Harper Cloud Storage + +Harper Cloud utilizes AWS Elastic Block Storage (EBS) General Purpose SSD (gp3) volumes. This is the most common storage type used in AWS, as it provides reasonable performance for most workloads, at a reasonable price. + +AWS EBS gp3 volumes have a baseline performance level of 3,000 IOPS, as a result, all Harper Cloud storage options will offer 3,000 IOPS. We plan to offer scalable IOPS as an option in the future. + +You can read more about AWS EBS volume IOPS here: https:/docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html. + +## Estimating IOPS for Harper Instance + +The number of IOPS required for a particular workload is influenced by many factors. Testing your particular application is the best way to determine the number of IOPS required. A reliable method is to estimate about two IOPS for every index, including the primary key itself. So if a table has two indices besides primary key, estimate that an insert or update will require about six IOPS. Note that that can often be closer to one IOPS per index under load due to internal batching of writes, and sometimes even better when doing sequential inserts. Again it is best to test to verify this with application specific data and write patterns. + +For assistance in estimating IOPS requirements feel free to contact Harper Support or join our Community Slack Channel. + +## Example Use Case IOPS Requirements + +* **Sensor Data Collection** + + In the case of IoT sensors where data collection will be sustained, high IOPS are required. While there are not typically large queries going on in this case, there is a high volume of data being ingested. This implies that IOPS will be sustained at a high level. For example, if you are collecting 100 records per second you would expect to need roughly 3,000 IOPS just to handle the data inserts. +* **Data Analytics/BI Server** + + Providing a server for analytics purposes typically requires a larger machine. Typically these cases involve large scale SQL joins and aggregations, which puts a large strain on reads. Harper utilizes an in-memory cache, which provides a significant performance boost on machines with large amounts of memory. However, if disparate datasets are constantly being queried and/or new data is frequently being loaded, you will find that the system still needs to have high IOPS to meet performance demand. +* **Web Services** + + Typical web service implementations with discrete reads and writes often do not need high IOPS to perform as expected. This is often the case in more transactional systems without the requirement for high performance load. A good rule to follow is that any Harper operation that requires a data scan will be IOPS intensive, but if these are not frequent then the EBS boost will suffice. Queries utilizing equals operations in either SQL or NoSQL do not require a scan due to Harper’s native indexing. +* **High Performance Database** + + Ultimately, if performance is your top priority, Harper should be run on bare metal hardware. Cloud providers offer these options at a higher cost, but they come with obvious performance improvements. diff --git a/site/versioned_docs/version-4.5/deployments/harper-cloud/verizon-5g-wavelength-instances.md b/site/versioned_docs/version-4.5/deployments/harper-cloud/verizon-5g-wavelength-instances.md new file mode 100644 index 00000000..1589acc3 --- /dev/null +++ b/site/versioned_docs/version-4.5/deployments/harper-cloud/verizon-5g-wavelength-instances.md @@ -0,0 +1,31 @@ +--- +title: Verizon 5G Wavelength +--- + +# Verizon 5G Wavelength + +These instances are only accessible from the Verizon network. When accessing your Harper instance please ensure you are connected to the Verizon network, examples include Verizon 5G Internet, Verizon Hotspots, or Verizon mobile devices. + +Harper on Verizon 5G Wavelength brings Harper closer to the end user exclusively on the Verizon network resulting in as little as single-digit millisecond response time from Harper to the client. + +Instances are built via AWS Wavelength. You can read more about [AWS Wavelength here](https:/aws.amazon.com/wavelength/). + +Harper 5G Wavelength Instance Specs While Harper 5G Wavelength bills by RAM, each instance has other specifications associated with the RAM selection. The following table describes each instance size in detail\*. + +| AWS EC2 Instance Size | RAM (GiB) | # vCPUs | Network (Gbps) | Processor | +| --------------------- | --------- | ------- | -------------- | ------------------------------------------- | +| t3.medium | 4 | 2 | Up to 5 | Up to 3.1 GHz Intel Xeon Platinum Processor | +| t3.xlarge | 16 | 4 | Up to 5 | Up to 3.1 GHz Intel Xeon Platinum Processor | +| r5.2xlarge | 64 | 8 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum Processor | + +\*Specifications are subject to change. For the most up to date information, please refer to [AWS documentation](https:/aws.amazon.com/ec2/instance-types/). + +## Harper 5G Wavelength Storage + +Harper 5G Wavelength utilizes AWS Elastic Block Storage (EBS) General Purpose SSD (gp2) volumes. This is the most common storage type used in AWS, as it provides reasonable performance for most workloads, at a reasonable price. + +AWS EBS gp2 volumes have a baseline performance level, which determines the number of IOPS it can perform indefinitely. The larger the volume, the higher its baseline performance. Additionally, smaller gp2 volumes are able to burst to a higher number of IOPS for periods of time. + +Smaller gp2 volumes are perfect for trying out the functionality of Harper, and might also work well for applications that don’t perform many database transactions. For applications that perform a moderate or high number of transactions, we recommend that you use a larger Harper volume. Learn more about the [impact of IOPS on performance here](./iops-impact). + +You can read more about [AWS EBS gp2 volume IOPS here](https:/docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html#ebsvolumetypes\_gp2). diff --git a/site/versioned_docs/version-4.5/deployments/install-harper/index.md b/site/versioned_docs/version-4.5/deployments/install-harper/index.md new file mode 100644 index 00000000..99335044 --- /dev/null +++ b/site/versioned_docs/version-4.5/deployments/install-harper/index.md @@ -0,0 +1,61 @@ +--- +title: Install Harper +--- + +# Install Harper + +## Install Harper + +This documentation contains information for installing Harper locally. Note that if you’d like to get up and running quickly, you can try a [managed instance with Harper Cloud](https:/studio.harperdb.io/sign-up). Harper is a cross-platform database; we recommend Linux for production use, but Harper can run on Windows and Mac as well, for development purposes. Installation is usually very simple and just takes a few steps, but there are a few different options documented here. + +Harper runs on Node.js, so if you do not have it installed, you need to do that first (if you have installed, you can skip to installing Harper, itself). Node.js can be downloaded and installed from [their site](https:/nodejs.org/). For Linux and Mac, we recommend installing and managing Node versions with [NVM, which has instructions for installation](https:/github.com/nvm-sh/nvm). Generally NVM can be installed with the following command: + +```bash +curl -o- https:/raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash +``` + +And then logout and login, and then install Node.js using nvm. We recommend using LTS, but support all currently maintained Node versions (which is currently version 14 and newer, and make sure to always uses latest minor/patch for the major version): + +```bash +nvm install --lts +``` + +#### `Install and Start Harper ` + +Then you can install Harper with NPM and start it: + +```bash +npm install -g harperdb +harperdb +``` + +Harper will automatically start after installation. Harper's installation can be configured with numerous options via CLI arguments, for more information visit the [Harper Command Line Interface](../harper-cli) guide. + +If you are setting up a production server on Linux, [we have much more extensive documentation on how to configure volumes for database storage, set up a systemd script, and configure your operating system to use as a database server in our linux installation guide](./linux). + +## With Docker + +If you would like to run Harper in Docker, install [Docker Desktop](https:/docs.docker.com/desktop/) on your Mac or Windows computer. Otherwise, install the [Docker Engine](https:/docs.docker.com/engine/install/) on your Linux server. + +Once Docker Desktop or Docker Engine is installed, visit our [Docker Hub page](https:/hub.docker.com/r/harperdb/harperdb) for information and examples on how to run a Harper container. + +## Offline Install + +If you need to install Harper on a device that doesn't have an Internet connection, you can choose your version and download the npm package and install it directly (you’ll still need Node.js and NPM): + +[Download Install Package](https:/products-harperdb-io.s3.us-east-2.amazonaws.com/index.html) + +Once you’ve downloaded the .tgz file, run the following command from the directory where you’ve placed it: + +```bash +npm install -g harperdb-X.X.X.tgz harperdb install +``` + +## Installation on Less Common Platforms + +Harper comes with binaries for standard AMD64/x64 or ARM64 CPU architectures on Linux, Windows (x64 only), and Mac (including Apple Silicon). However, if you are installing on a less common platform (Alpine, for example), you will need to ensure that you have build tools installed for the installation process to compile the binaries (this is handled automatically), including: + +* [Go](https:/go.dev/dl/): version 1.19.1 +* GCC +* Make +* Python v3.7, v3.8, v3.9, or v3.10 diff --git a/site/versioned_docs/version-4.5/deployments/install-harper/linux.md b/site/versioned_docs/version-4.5/deployments/install-harper/linux.md new file mode 100644 index 00000000..cece27b9 --- /dev/null +++ b/site/versioned_docs/version-4.5/deployments/install-harper/linux.md @@ -0,0 +1,225 @@ +--- +title: On Linux +--- + +# On Linux + +If you wish to install locally or already have a configured server, see the basic [Installation Guide](./) + +The following is a recommended way to configure Linux and install Harper. These instructions should work reasonably well for any public cloud or on-premises Linux instance. + +*** + +These instructions assume that the following has already been completed: + +1. Linux is installed +1. Basic networking is configured +1. A non-root user account dedicated to Harper with sudo privileges exists +1. An additional volume for storing Harper files is attached to the Linux instance +1. Traffic to ports 9925 (Harper Operations API) 9926 (Harper Application Interface) and 9932 (Harper Clustering) is permitted + +While you will need to access Harper through port 9925 for the administration through the operations API, and port 9932 for clustering, for higher level of security, you may want to consider keeping both of these ports restricted to a VPN or VPC, and only have the application interface (9926 by default) exposed to the public Internet. + +For this example, we will use an AWS Ubuntu Server 22.04 LTS m5.large EC2 Instance with an additional General Purpose SSD EBS volume and the default “ubuntu” user account. + +*** + +### (Optional) LVM Configuration + +Logical Volume Manager (LVM) can be used to stripe multiple disks together to form a single logical volume. If striping disks together is not a requirement, skip these steps. + +Find disk that already has a partition + +```bash +used_disk=$(lsblk -P -I 259 | grep "nvme.n1.*part" | grep -o "nvme.n1") +``` + +Create array of free disks + +```bash +declare -a free_disks +mapfile -t free_disks < <(lsblk -P -I 259 | grep "nvme.n1.*disk" | grep -o "nvme.n1" | grep -v "$used_disk") +``` + +Get quantity of free disks + +```bash +free_disks_qty=${#free_disks[@]} +``` + +Construct pvcreate command + +```bash +cmd_string="" +for i in "${free_disks[@]}" +do +cmd_string="$cmd_string /dev/$i" +done +``` + +Initialize disks for use by LVM + +```bash +pvcreate_cmd="pvcreate $cmd_string" +sudo $pvcreate_cmd +``` + +Create volume group + +```bash +vgcreate_cmd="vgcreate hdb_vg $cmd_string" +sudo $vgcreate_cmd +``` + +Create logical volume + +```bash +sudo lvcreate -n hdb_lv -i $free_disks_qty -l 100%FREE hdb_vg +``` + +### Configure Data Volume + +Run `lsblk` and note the device name of the additional volume + +```bash +lsblk +``` + +Create an ext4 filesystem on the volume (The below commands assume the device name is nvme1n1. If you used LVM to create logical volume, replace /dev/nvme1n1 with /dev/hdb\_vg/hdb\_lv) + +```bash +sudo mkfs.ext4 -L hdb_data /dev/nvme1n1 +``` + +Mount the file system and set the correct permissions for the directory + +```bash +mkdir /home/ubuntu/hdb +sudo mount -t ext4 /dev/nvme1n1 /home/ubuntu/hdb +sudo chown -R ubuntu:ubuntu /home/ubuntu/hdb +sudo chmod 775 /home/ubuntu/hdb +``` + +Create a fstab entry to mount the filesystem on boot + +```bash +echo "LABEL=hdb_data /home/ubuntu/hdb ext4 defaults,noatime 0 1" | sudo tee -a /etc/fstab +``` + +### Configure Linux and Install Prerequisites + +If a swap file or partition does not already exist, create and enable a 2GB swap file + +```bash +sudo dd if=/dev/zero of=/swapfile bs=128M count=16 +sudo chmod 600 /swapfile +sudo mkswap /swapfile +sudo swapon /swapfile +echo "/swapfile swap swap defaults 0 0" | sudo tee -a /etc/fstab +``` + +Increase the open file limits for the ubuntu user + +```bash +echo "ubuntu soft nofile 500000" | sudo tee -a /etc/security/limits.conf +echo "ubuntu hard nofile 1000000" | sudo tee -a /etc/security/limits.conf +``` + +Install Node Version Manager (nvm) + +```bash +curl -o- https:/raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh | bash +``` + +Load nvm (or logout and then login) + +```bash +. ~/.nvm/nvm.sh +``` + +Install Node.js using nvm ([read more about specific Node version requirements](https:/www.npmjs.com/package/harperdb#prerequisites)) + +```bash +nvm install +``` + +### `Install and Start Harper ` + +Here is an example of installing Harper with minimal configuration. + +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --ROOTPATH "/home/ubuntu/hdb" \ + --OPERATIONSAPI_NETWORK_PORT "9925" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" +``` + +Here is an example of installing Harper with commonly used additional configuration. + +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --ROOTPATH "/home/ubuntu/hdb" \ + --OPERATIONSAPI_NETWORK_PORT "9925" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" \ + --HTTP_SECUREPORT "9926" \ + --CLUSTERING_ENABLED "true" \ + --CLUSTERING_USER "cluster_user" \ + --CLUSTERING_PASSWORD "password" \ + --CLUSTERING_NODENAME "hdb1" +``` + +You can also use a custom configuration file to set values on install, use the CLI/ENV variable `HDB_CONFIG` and set it to the path of your [custom configuration file](../configuration): + +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" \ + --HDB_CONFIG "/path/to/your/custom/harperdb-config.yaml" +``` + +#### Start Harper on Boot + +Harper will automatically start after installation. If you wish Harper to start when the OS boots, you have two options: + +You can set up a crontab: + +```bash +(crontab -l 2>/dev/null; echo "@reboot PATH=\"/home/ubuntu/.nvm/versions/node/v18.15.0/bin:$PATH\" && harperdb start") | crontab - +``` + +Or you can create a systemd script at `/etc/systemd/system/harperdb.service` + +Pasting the following contents into the file: + +``` +[Unit] +Description=Harper + +[Service] +Type=simple +Restart=always +User=ubuntu +Group=ubuntu +WorkingDirectory=/home/ubuntu +ExecStart=/bin/bash -c 'PATH="/home/ubuntu/.nvm/versions/node/v18.15.0/bin:$PATH"; harperdb' + +[Install] +WantedBy=multi-user.target +``` + +And then running the following: + +``` +systemctl daemon-reload +systemctl enable harperdb +``` + +For more information visit the [Harper Command Line Interface guide](../harper-cli) and the [Harper Configuration File guide](../configuration). diff --git a/site/versioned_docs/version-4.5/deployments/upgrade-hdb-instance.md b/site/versioned_docs/version-4.5/deployments/upgrade-hdb-instance.md new file mode 100644 index 00000000..f5f403e7 --- /dev/null +++ b/site/versioned_docs/version-4.5/deployments/upgrade-hdb-instance.md @@ -0,0 +1,139 @@ +--- +title: Upgrade a Harper Instance +--- + +# Upgrade a Harper Instance + +This document describes best practices for upgrading self-hosted Harper instances. Harper can be upgraded using a combination of npm and built-in Harper upgrade scripts. Whenever upgrading your Harper installation it is recommended you make a backup of your data first. Note: This document applies to self-hosted Harper instances only. All [Harper Cloud instances](./harper-cloud/) will be upgraded by the Harper Cloud team. + +## Upgrading + +Upgrading Harper is a two-step process. First the latest version of Harper must be downloaded from npm, then the Harper upgrade scripts will be utilized to ensure the newest features are available on the system. + +1. Install the latest version of Harper using `npm install -g harperdb`. + + Note `-g` should only be used if you installed Harper globally (which is recommended). +1. Run `harperdb` to initiate the upgrade process. + + Harper will then prompt you for all appropriate inputs and then run the upgrade directives. + +## Node Version Manager (nvm) + +[Node Version Manager (nvm)](http:/nvm.sh/) is an easy way to install, remove, and switch between different versions of Node.js as required by various applications. More information, including directions on installing nvm can be found here: https:/nvm.sh/. + +Harper supports Node.js versions 14.0.0 and higher, however, **please check our** [**NPM page**](https:/www.npmjs.com/package/harperdb) **for our recommended Node.js version.** To install a different version of Node.js with nvm, run the command: + +```bash +nvm install +``` + +To switch to a version of Node run: + +```bash +nvm use +``` + +To see the current running version of Node run: + +```bash +node --version +``` + +With a handful of different versions of Node.js installed, run nvm with the `ls` argument to list out all installed versions: + +```bash +nvm ls +``` + +When upgrading Harper, we recommend also upgrading your Node version. Here we assume you're running on an older version of Node; the execution may look like this: + +Switch to the older version of Node that Harper is running on (if it is not the current version): + +```bash +nvm use 14.19.0 +``` + +Make sure Harper is not running: + +```bash +harperdb stop +``` + +Uninstall Harper. Note, this step is not required, but will clean up old artifacts of Harper. We recommend removing all other Harper installations to ensure the most recent version is always running. + +```bash +npm uninstall -g harperdb +``` + +Switch to the newer version of Node: + +```bash +nvm use +``` + +Install Harper globally + +```bash +npm install -g harperdb +``` + +Run the upgrade script + +```bash +harperdb +``` + +Start Harper + +```bash +harperdb start +``` + +*** + +## Upgrading Nats to Plexus 4.4 + +To upgrade from NATS clustering to Plexus replication, follow these manual steps. They are designed for a fully replicating cluster to ensure minimal disruption during the upgrade process. + +The core of this upgrade is the _bridge node_. This node will run both NATS and Plexus simultaneously, ensuring that transactions are relayed between the two systems during the transition. The bridge node is crucial in preventing any replication downtime, as it will handle transactions from NATS nodes to Plexus nodes and vice versa. + +### Enabling Plexus + +To enable Plexus on a node that is already running NATS, you will need to update [two values](./configuration) in the `harperdb-config.yaml` file: + +```yaml +replication: + url: wss:/my-cluster-node-1:9925 + hostname: node-1 +``` + +`replication.url` – This should be set to the URL of the current Harper instance. + +`replication.hostname` – Since we are upgrading from NATS, this value should match the `clustering.nodeName` of the current instance. + +### Upgrade Steps + +1. Set up the bridge node: + * Choose one node to be the bridge node. + * On this node, follow the "Enabling Plexus" steps from the previous section, but **do not disable NATS clustering on this instance.** + * Stop the instance and perform the upgrade. + * Start the instance. This node should now be running both Plexus and NATS. +1. Upgrade a node: + * Choose a node that needs upgrading and enable Plexus by following the "Enable Plexus" steps. + * Disable NATS by setting `clustering.enabled` to `false`. + * Stop the instance and upgrade it. + * Start the instance. + * Call [`add_node`](../developers/operations-api/clustering#add-node) on the upgraded instance. In this call, omit `subscriptions` so that a fully replicating cluster is built. The target node for this call should be the bridge node. _Note: depending on your setup, you may need to expand this `add_node` call to include_ [_authorization and/or tls information_](../developers/operations-api/clustering#add-node)_._ + +```json +{ + "operation": "add_node", + "hostname:": "node-1", + "url": "wss:/my-cluster-node-1:9925" +} +``` + +1. Repeat Step 2 on all remaining nodes that need to be upgraded. +1. Disable NATS on the bridge node by setting `clustering.enabled` to `false` and restart the instance. + +Your cluster upgrade should now be complete, with no NATS processes running on any of the nodes. diff --git a/site/versioned_docs/version-4.5/developers/_category_.json b/site/versioned_docs/version-4.5/developers/_category_.json new file mode 100644 index 00000000..9fe399bf --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Developers", + "position": 1, + "link": { + "type": "generated-index", + "title": "Developers Documentation", + "description": "Comprehensive guides and references for building applications with HarperDB", + "keywords": [ + "developers" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/developers/applications/caching.md b/site/versioned_docs/version-4.5/developers/applications/caching.md new file mode 100644 index 00000000..e111ac5b --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/applications/caching.md @@ -0,0 +1,288 @@ +--- +title: Caching +--- + +# Caching + +Harper has integrated support for caching data from external sources. With built-in caching capabilities and distributed high-performance low-latency responsiveness, Harper makes an ideal data caching server. Harper can store cached data in standard tables, as queryable structured data, so data can easily be consumed in one format (for example JSON or CSV) and provided to end users in different formats with different selected properties (for example MessagePack, with a subset of selected properties), or even with customized querying capabilities. Harper also manages and provides timestamps/tags for proper caching control, facilitating further downstreaming caching. With these combined capabilities, Harper is an extremely fast, interoperable, flexible, and customizable caching server. + +## Configuring Caching + +To set up caching, first you will need to define a table that you will use as your cache (to store the cached data). You can review the [introduction to building applications](./) for more information on setting up the application (and the [defining schemas documentation](./defining-schemas)), but once you have defined an application folder with a schema, you can add a table for caching to your `schema.graphql`: + +```graphql +type MyCache @table(expiration: 3600) @export { + id: ID @primaryKey +} +``` + +You may also note that we can define a time-to-live (TTL) expiration on the table, indicating when table records/entries should expire and be evicted from this table. This is generally necessary for "passive" caches where there is no active notification of when entries expire. However, this is not needed if you provide a means of notifying when data is invalidated and changed. The units for expiration, and other duration-based properties, are in seconds. + +While you can provide a single expiration time, there are actually several expiration timings that are potentially relevant, and can be independently configured. These settings are available as directive properties on the table configuration (like `expiration` above): stale expiration: The point when a request for a record should trigger a request to origin (but might possibly return the current stale record depending on policy) must-revalidate expiration: The point when a request for a record must make a request to origin first and return the latest value from origin. eviction expiration: The point when a record is actually removed from the caching table. + +You can provide a single expiration and it defines the behavior for all three. You can also provide three settings for expiration, through table directives: +* expiration - The amount of time until a record goes stale. +* eviction - The amount of time after expiration before a record can be evicted (defaults to zero). +* scanInterval - The interval for scanning for expired records (defaults to one quarter of the total of expiration and eviction). + +## Define External Data Source + +Next, you need to define the source for your cache. External data sources could be HTTP APIs, other databases, microservices, or any other source of data. This can be defined as a resource class in your application's `resources.js` module. You can extend the `Resource` class (which is available as a global variable in the Harper environment) as your base class. The first method to implement is a `get()` method to define how to retrieve the source data. For example, if we were caching an external HTTP API, we might define it as such: + +```javascript +class ThirdPartyAPI extends Resource { + async get() { + return (await fetch(`http:/some-api.com/${this.getId()}`)).json(); + } +} +``` + +Next, we define this external data resource as the "source" for the caching table we defined above: + +```javascript +const { MyCache } = tables; +MyCache.sourcedFrom(ThirdPartyAPI); +``` + +Now we have a fully configured and connected caching table. If you access data from `MyCache` (for example, through the REST API, like `/MyCache/some-id`), Harper will check to see if the requested entry is in the table and return it if it is available (and hasn't expired). If there is no entry, or it has expired (it is older than one hour in this case), it will go to the source, calling the `get()` method, which will then retrieve the requested entry. Once the entry is retrieved, it will be saved/cached in the caching table (for one hour based on our expiration time). + +```mermaid +flowchart TD + Client1(Client 1)-->Cache(Caching Table) + Client2(Client 2)-->Cache + Cache-->Resource(Data Source Connector) + Resource-->API(Remote Data Source API) +``` + + +Harper handles waiting for an existing cache resolution to finish and uses its result. This prevents a "cache stampede" when entries expire, ensuring that multiple requests to a cache entry will all wait on a single request to the data source. + +Cache tables with an expiration are periodically pruned for expired entries. Because this is done periodically, there is usually some amount of time between when a record has expired and when the record is actually evicted (the cached data is removed). But when a record is checked for availability, the expiration time is used to determine if the record is fresh (and the cache entry can be used). + +### Eviction with Indexing + +Eviction is the removal of a locally cached copy of data, but it does not imply the deletion of the actual data from the canonical or origin data source. Because evicted records still exist (just not in the local cache), if a caching table uses expiration (and eviction), and has indexing on certain attributes, the data is not removed from the indexes. The indexes that reference the evicted record are preserved, along with the attribute data necessary to maintain these indexes. Therefore eviction means the removal of non-indexed data (in this case evictions are stored as "partial" records). Eviction only removes the data that can be safely removed from a cache without affecting the integrity or behavior of the indexes. If a search query is performed that matches this evicted record, the record will be requested on-demand to fulfill the search query. + +### Specifying a Timestamp + +In the example above, we simply retrieved data to fulfill a cache request. We may want to supply the timestamp of the record we are fulfilling as well. This can be set on the context for the request: + +```javascript +class ThirdPartyAPI extends Resource { + async get() { + let response = await fetch(`http:/some-api.com/${this.getId()}`); + this.getContext().lastModified = response.headers.get('Last-Modified'); + return response.json(); + } +} +``` + +#### Specifying an Expiration + +In addition, we can also specify when a cached record "expires". When a cached record expires, this means that a request for that record will trigger a request to the data source again. This does not necessarily mean that the cached record has been evicted (removed), although expired records will be periodically evicted. If the cached record still exists, the data source can revalidate it and return it. For example: + +```javascript +class ThirdPartyAPI extends Resource { + async get() { + const context = this.getContext(); + let headers = new Headers(); + if (context.replacingVersion) / this is the existing cached record + headers.set('If-Modified-Since', new Date(context.replacingVersion).toUTCString()); + let response = await fetch(`http:/some-api.com/${this.getId()}`, { headers }); + let cacheInfo = response.headers.get('Cache-Control'); + let maxAge = cacheInfo?.match(/max-age=(\d)/)?.[1]; + if (maxAge) / we can set a specific expiration time by setting context.expiresAt + context.expiresAt = Date.now() + maxAge * 1000; / convert from seconds to milliseconds and add to current time + / we can just revalidate and return the record if the origin has confirmed that it has the same version: + if (response.status === 304) return context.replacingRecord; + ... +``` + +## Active Caching and Invalidation + +The cache we have created above is a "passive" cache; it only pulls data from the data source as needed, and has no knowledge of if and when data from the data source has actually changed, so it must rely on timer-based expiration to periodically retrieve possibly updated data. This means that it is possible that the cache may have stale data for a while (if the underlying data has changed, but the cached data hasn't expired), and the cache may have to refresh more than necessary if the data source data hasn't changed. Consequently it can be significantly more effective to implement an "active" cache, in which the data source is monitored and notifies the cache when any data changes. This ensures that when data changes, the cache can immediately load the updated data, and unchanged data can remain cached much longer (or indefinitely). + +### Invalidate + +One way to provide more active caching is to specifically invalidate individual records. Invalidation is useful when you know the source data has changed, and the cache needs to re-retrieve data from the source the next time that record is accessed. This can be done by executing the `invalidate()` method on a resource. For example, you could extend a table (in your resources.js) and provide a custom POST handler that does invalidation: + +```javascript +const { MyTable } = tables; +export class MyTableEndpoint extends MyTable { + async post(data) { + if (data.invalidate) / use this flag as a marker + this.invalidate(); + } +} +``` + +(Note that if you are now exporting this endpoint through resources.js, you don't necessarily need to directly export the table separately in your schema.graphql). + +### Subscriptions + +We can provide more control of an active cache with subscriptions. If there is a way to receive notifications from the external data source of data changes, we can implement this data source as an "active" data source for our cache by implementing a `subscribe` method. A `subscribe` method should return an asynchronous iterable that iterates and returns events indicating the updates. One straightforward way of creating an asynchronous iterable is by defining the `subscribe` method as an asynchronous generator. If we had an endpoint that we could poll for changes every second, we could implement this like: + +```javascript +class ThirdPartyAPI extends Resource { + async *subscribe() { + setInterval(() => { / every second retrieve more data + / get the next data change event from the source + let update = (await fetch(`http:/some-api.com/latest-update`)).json(); + const event = { / define the change event (which will update the cache) + type: 'put', / this would indicate that the event includes the new data value + id: / the primary key of the record that updated + value: / the new value of the record that updated + timestamp: / the timestamp of when the data change occurred + }; + yield event; / this returns this event, notifying the cache of the change + }, 1000); + } + async get() { +... +``` + +Notification events should always include an `id` property to indicate the primary key of the updated record. The event should have a `value` property for `put` and `message` event types. The `timestamp` is optional and can be used to indicate the exact timestamp of the change. The following event `type`s are supported: + +* `put` - This indicates that the record has been updated and provides the new value of the record. +* `invalidate` - Alternately, you can notify with an event type of `invalidate` to indicate that the data has changed, but without the overhead of actually sending the data (the `value` property is not needed), so the data only needs to be sent if and when the data is requested through the cache. An `invalidate` will evict the entry and update the timestamp to indicate that there is new data that should be requested (if needed). +* `delete` - This indicates that the record has been deleted. +* `message` - This indicates a message is being passed through the record. The record value has not changed, but this is used for [publish/subscribe messaging](../real-time). +* `transaction` - This indicates that there are multiple writes that should be treated as a single atomic transaction. These writes should be included as an array of data notification events in the `writes` property. + +And the following properties can be defined on event objects: + +* `type`: The event type as described above. +* `id`: The primary key of the record that updated +* `value`: The new value of the record that updated (for put and message) +* `writes`: An array of event properties that are part of a transaction (used in conjunction with the transaction event type). +* `table`: The name of the table with the record that was updated. This can be used with events within a transaction to specify events across multiple tables. +* `timestamp`: The timestamp of when the data change occurred + +With an active external data source with a `subscribe` method, the data source will proactively notify the cache, ensuring a fresh and efficient active cache. Note that with an active data source, we still use the `sourcedFrom` method to register the source for a caching table, and the table will automatically detect and call the subscribe method on the data source. + +By default, Harper will only run the subscribe method on one thread. Harper is multi-threaded and normally runs many concurrent worker threads, but typically running a subscription on multiple threads can introduce overlap in notifications and race conditions and running on a subscription on a single thread is preferable. However, if you want to enable subscribe on multiple threads, you can define a `static subscribeOnThisThread` method to specify if the subscription should run on the current thread: + +```javascript +class ThirdPartyAPI extends Resource { + static subscribeOnThisThread(threadIndex) { + return threadIndex < 2; / run on two threads (the first two threads) + } + async *subscribe() { + .... +``` + +An alternative to using asynchronous generators is to use a subscription stream and send events to it. A default subscription stream (that doesn't generate its own events) is available from the Resource's default subscribe method: + +```javascript +class ThirdPartyAPI extends Resource { + subscribe() { + const subscription = super.subscribe(); + setupListeningToRemoteService().on('update', (event) => { + subscription.send(event); + }); + return subscription; + } +} +``` + +## Downstream Caching + +It is highly recommended that you utilize the [REST interface](../rest) for accessing caching tables, as it facilitates downstreaming caching for clients. Timestamps are recorded with all cached entries. Timestamps are then used for incoming [REST requests to specify the `ETag` in the response](../rest#cachingconditional-requests). Clients can cache data themselves and send requests using the `If-None-Match` header to conditionally get a 304 and preserve their cached data based on the timestamp/`ETag` of the entries that are cached in Harper. Caching tables also have [subscription capabilities](./caching#subscribing-to-caching-tables), which means that downstream caches can be fully "layered" on top of Harper, both as passive or active caches. + +## Write-Through Caching + +The cache we have defined so far only has data flowing from the data source to the cache. However, you may wish to support write methods, so that writes to the cache table can flow through to underlying canonical data source, as well as populate the cache. This can be accomplished by implementing the standard write methods, like `put` and `delete`. If you were using an API with standard RESTful methods, you can pass writes through to the data source like this: + +```javascript +class ThirdPartyAPI extends Resource { + async put(data) { + await fetch(`http:/some-api.com/${this.getId()}`, { + method: 'PUT', + body: JSON.stringify(data) + }); + } + async delete() { + await fetch(`http:/some-api.com/${this.getId()}`, { + method: 'DELETE', + }); + } + ... +``` + +When doing an insert or update to the MyCache table, the data will be sent to the underlying data source through the `put` method and the new record value will be stored in the cache as well. + +### Loading from Source in Methods + +When you are using a caching table, it is important to remember that any resource methods besides `get()`, will not automatically load data from the source. If you have defined a `put()`, `post()`, or `delete()` method and you need the source data, you can ensure it is loaded by calling the `ensureLoaded()` method. For example, if you want to modify the existing record from the source, adding a property to it: + +```javascript +class MyCache extends tables.MyCache { + async post(data) { + / if the data is not cached locally, retrieves from source: + await this.ensuredLoaded(); + / now we can be sure that the data is loaded, and can access properties + this.quantity = this.quantity - data.purchases; + } +} +``` + +### Subscribing to Caching Tables + +You can subscribe to a caching table just like any other table. The one difference is that normal tables do not usually have `invalidate` events, but an active caching table may have `invalidate` events. Again, this event type gives listeners an opportunity to choose whether or not to actually retrieve the value that changed. + +### Passive-Active Updates + +With our passive update examples, we have provided a data source handler with a `get()` method that returns the specific requested record as the response. However, we can also actively update other records in our response handler (if our data source provides data that should be propagated to other related records). This can be done transactionally, to ensure that all updates occur atomically. The context that is provided to the data source holds the transaction information, so we can simply pass the context to any update/write methods that we call. For example, let's say we are loading a blog post, which also includes comment records: + +```javascript +const { Post, Comment } = tables; +class BlogSource extends Resource { + get() { + const post = await (await fetch(`http:/my-blog-server/${this.getId()}`).json()); + for (let comment of post.comments) { + await Comment.put(comment, this); / save this comment as part of our current context and transaction + } + return post; + } +} +Post.sourcedFrom(BlogSource); +``` + +Here both the update to the post and the update to the comments will be atomically/transactionally committed together with the same timestamp. + +## Cache-Control header + +When interacting with cached data, you can also use the `Cache-Control` request header to specify certain caching behaviors. When performing a PUT (or POST) method, you can use the `max-age` directive to indicate how long the resource should be cached (until stale): + +```http +PUT /my-resource/id +Cache-Control: max-age=86400 +``` + +You can use the `only-if-cached` directive on GET requests to only return a resource if it is cached (otherwise will return 504). Note, that if the entry is not cached, this will still trigger a request for the source data from the data source. If you do not want source data retrieved, you can add the `no-store` directive. You can also use the `no-cache` directive if you do not want to use the cached resource. If you wanted to check if there is a cached resource without triggering a request to the data source: + +```http +GET /my-resource/id +Cache-Control: only-if-cached, no-store +``` + +You may also use the `stale-if-error` to indicate if it is acceptable to return a stale cached resource when the data source returns an error (network connection error, 500, 502, 503, or 504). The `must-revalidate` directive can indicate a stale cached resource can not be returned, even when the data source has an error (by default a stale cached resource is returned when there is a network connection error). + + +## Caching Flow +It may be helpful to understand the flow of a cache request. When a request is made to a caching table: +* Harper will first create a resource instance to handle the process, and ensure that the data is loaded for the resource instance. To do this, it will first check if the record is in the table/cache. + * If the record is not in the cache, Harper will first check if there is a current request to get the record from the source. If there is, Harper will wait for the request to complete and return the record from the cache. + * If not, Harper will call the `get()` method on the source to retrieve the record. The record will then be stored in the cache. + * If the record is in the cache, Harper will check if the record is stale. If the record is not stale, Harper will immediately return the record from the cache. If the record is stale, Harper will call the `get()` method on the source to retrieve the record. + * The record will then be stored in the cache. This will write the record to the cache in a separate asynchronous/background write-behind transaction, so it does not block the current request, then return the data immediately once it has it. +* The `get()` method will be called on the resource instance to return the record to the client (or perform any querying on the record). If this is overriden, the method will be called at this time. + +### Caching Flow with Write-Through +When a writes are performed on a caching table (in `put()` or `post()` method, for example), the flow is slightly different: +* Harper will have first created a resource instance to handle the process, and this resource instance that will be the current `this` for a call to `put()` or `post()`. +* If a `put()` or `update()` is called, for example, this action will be record in the current transaction. +* Once the transaction is committed (which is done automatically as the request handler completes), the transaction write will be sent to the source to update the data. + * The local writes will wait for the source to confirm the writes have completed (note that this effectively allows you to perform a two-phase transactional write to the source, and the source can confirm the writes have completed before the transaction is committed locally). + * The transaction writes will then be written the local caching table. +* The transaction handler will wait for the local commit to be written, then the transaction will be resolved and a response will be sent to the client. diff --git a/site/versioned_docs/version-4.5/developers/applications/debugging.md b/site/versioned_docs/version-4.5/developers/applications/debugging.md new file mode 100644 index 00000000..c7c085bf --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/applications/debugging.md @@ -0,0 +1,39 @@ +--- +title: Debugging Applications +--- + +# Debugging Applications + +Harper components and applications run inside the Harper process, which is a standard Node.js process that can be debugged with standard JavaScript development tools like Chrome's devtools, VSCode, and WebStorm. Debugging can be performed by launching the Harper entry script with your IDE, or you can start Harper in dev mode and connect your debugger to the running process (defaults to standard 9229 port): + +``` +harperdb dev +# or to run and debug a specific app +harperdb dev /path/to/app +``` + +Once you have connected a debugger, you may set breakpoints in your application and fully debug it. Note that when using the `dev` command from the CLI, this will run Harper in single-threaded mode. This would not be appropriate for production use, but makes it easier to debug applications. + +For local debugging and development, it is recommended that you use standard console log statements for logging. For production use, you may want to use Harper's logging facilities, so you aren't logging to the console. The logging functions are available on the global `logger` variable that is provided by Harper. This logger can be used to output messages directly to the Harper log using standardized logging level functions, described below. The log level can be set in the [Harper Configuration File](../../deployments/configuration). + +Harper Logger Functions + +* `trace(message)`: Write a 'trace' level log, if the configured level allows for it. +* `debug(message)`: Write a 'debug' level log, if the configured level allows for it. +* `info(message)`: Write a 'info' level log, if the configured level allows for it. +* `warn(message)`: Write a 'warn' level log, if the configured level allows for it. +* `error(message)`: Write a 'error' level log, if the configured level allows for it. +* `fatal(message)`: Write a 'fatal' level log, if the configured level allows for it. +* `notify(message)`: Write a 'notify' level log. + +For example, you can log a warning: + +```javascript +logger.warn('You have been warned'); +``` + +If you want to ensure a message is logged, you can use `notify` as these messages will appear in the log regardless of log level configured. + +## Viewing the Log + +The Harper Log can be found in your local `~/hdb/log/hdb.log` file (or in the log folder if you have specified an alternate hdb root), or in the Studio Status page. Additionally, you can use the [`read_log` operation](../operations-api/logs) to query the Harper log. diff --git a/site/versioned_docs/version-4.5/developers/applications/define-routes.md b/site/versioned_docs/version-4.5/developers/applications/define-routes.md new file mode 100644 index 00000000..9d3a1526 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/applications/define-routes.md @@ -0,0 +1,118 @@ +--- +title: Define Fastify Routes +--- + +# Define Fastify Routes + +Harper’s applications provide an extension for loading [Fastify](https:/www.fastify.io/) routes as a way to handle endpoints. While we generally recommend building your endpoints/APIs with Harper's [REST interface](../rest) for better performance and standards compliance, Fastify's route can provide an extensive API for highly customized path handling. Below is a very simple example of a route declaration. + +The fastify route handler can be configured in your application's config.yaml (this is the default config if you used the [application template](https:/github.com/HarperDB/application-template)): + +```yaml +fastifyRoutes: # This loads files that define fastify routes using fastify's auto-loader + files: routes/*.js # specify the location of route definition modules + path: . # relative to the app-name, like http:/server/app-name/route-name +``` + +By default, route URLs are configured to be: + +* \[**Instance URL**]:\[**HTTP Port**]/\[**Project Name**]/\[**Route URL**] + +However, you can specify the path to be `/` if you wish to have your routes handling the root path of incoming URLs. + +* The route below, using the default config, within the **dogs** project, with a route of **breeds** would be available at **http:/localhost:9926/dogs/breeds**. + +In effect, this route is just a pass-through to Harper. The same result could have been achieved by hitting the core Harper API, since it uses **hdbCore.preValidation** and **hdbCore.request**, which are defined in the “helper methods” section, below. + +```javascript +export default async (server, { hdbCore, logger }) => { + server.route({ + url: '/', + method: 'POST', + preValidation: hdbCore.preValidation, + handler: hdbCore.request, + }) +} +``` + +## Custom Handlers + +For endpoints where you want to execute multiple operations against Harper, or perform additional processing (like an ML classification, or an aggregation, or a call to a 3rd party API), you can define your own logic in the handler. The function below will execute a query against the dogs table, and filter the results to only return those dogs over 4 years in age. + +**IMPORTANT: This route has NO preValidation and uses hdbCore.requestWithoutAuthentication, which- as the name implies- bypasses all user authentication. See the security concerns and mitigations in the “helper methods” section, below.** + +```javascript +export default async (server, { hdbCore, logger }) => { + server.route({ + url: '/:id', + method: 'GET', + handler: (request) => { + request.body= { + operation: 'sql', + sql: `SELECT * FROM dev.dog WHERE id = ${request.params.id}` + }; + + const result = await hdbCore.requestWithoutAuthentication(request); + return result.filter((dog) => dog.age > 4); + } + }); +} +``` + +## Custom preValidation Hooks + +The simple example above was just a pass-through to Harper- the exact same result could have been achieved by hitting the core Harper API. But for many applications, you may want to authenticate the user using custom logic you write, or by conferring with a 3rd party service. Custom preValidation hooks let you do just that. + +Below is an example of a route that uses a custom validation hook: + +```javascript +import customValidation from '../helpers/customValidation'; + +export default async (server, { hdbCore, logger }) => { + server.route({ + url: '/:id', + method: 'GET', + preValidation: (request) => customValidation(request, logger), + handler: (request) => { + request.body= { + operation: 'sql', + sql: `SELECT * FROM dev.dog WHERE id = ${request.params.id}` + }; + + return hdbCore.requestWithoutAuthentication(request); + } + }); +} +``` + +Notice we imported customValidation from the **helpers** directory. To include a helper, and to see the actual code within customValidation, see [Helper Methods](./define-routes#helper-methods). + +## Helper Methods + +When declaring routes, you are given access to 2 helper methods: hdbCore and logger. + +**hdbCore** + +hdbCore contains three functions that allow you to authenticate an inbound request, and execute operations against Harper directly, by passing the standard Operations API. + +* **preValidation** + + This is an array of functions used for fastify authentication. The second function takes the authorization header from the inbound request and executes the same authentication as the standard Harper Operations API (for example, `hdbCore.preValidation[1](./req, resp, callback)`). It will determine if the user exists, and if they are allowed to perform this operation. **If you use the request method, you have to use preValidation to get the authenticated user**. +* **request** + + This will execute a request with Harper using the operations API. The `request.body` should contain a standard Harper operation and must also include the `hdb_user` property that was in `request.body` provided in the callback. +* **requestWithoutAuthentication** + + Executes a request against Harper without any security checks around whether the inbound user is allowed to make this request. For security purposes, you should always take the following precautions when using this method: + + * Properly handle user-submitted values, including url params. User-submitted values should only be used for `search_value` and for defining values in records. Special care should be taken to properly escape any values if user-submitted values are used for SQL. + +**logger** + +This helper allows you to write directly to the log file, hdb.log. It’s useful for debugging during development, although you may also use the console logger. There are 5 functions contained within logger, each of which pertains to a different **logging.level** configuration in your harperdb-config.yaml file. + +* logger.trace(‘Starting the handler for /dogs’) +* logger.debug(‘This should only fire once’) +* logger.warn(‘This should never ever fire’) +* logger.error(‘This did not go well’) +* logger.fatal(‘This did not go very well at all’) diff --git a/site/versioned_docs/version-4.5/developers/applications/defining-roles.md b/site/versioned_docs/version-4.5/developers/applications/defining-roles.md new file mode 100644 index 00000000..d6c766fc --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/applications/defining-roles.md @@ -0,0 +1,51 @@ +--- +title: Defining Roles +--- + +In addition to [defining a database schema](./defining-schemas), you can also define roles in your application. Roles are a way to group permissions together and assign them to users as part of Harper's [role based access control](../security/users-and-roles). An application component may declare roles that should exist for the application in a roles configuration file. To use this, first specify your roles config file in the `config.yaml` in your application directory: + +```yaml +roles: + files: roles.yaml +``` +Now you can create a roles.yaml in your application directory: +```yaml +declared-role: + super_user: false # This is a boolean value that indicates if the role is a super user or not + # Now we can grant the permissions to databases, here we grant permissions to the default data database + data: # This is the same structure as role object that is used in the roles operations APIs + TableOne: + read: true + insert: true + TableTwo: + read: true + insert: false + update: true + delete: true + attributes: + name: + read: true + insert: false + update: true +``` + +With this in place, where Harper starts up, it will create the roles in the roles.yaml file if they do not already exist. If they do exist, it will update the roles with the new permissions. This allows you to manage your roles in your application code and have them automatically created or updated when the application starts. + +The structure of the roles.yaml file is: +```yaml +: + permission: # contains the permissions for the role, this structure is optional, and you can place flags like super_user here as a shortcut + super_user: + : # each database with permissions can be added as named properties on the role + tables: # this structure is optional, and table names can be placed directly under the database as a shortcut + : + read: # indicates if the role has read permission to this table + insert: # indicates if the role has insert permission to this table + update: # indicates if the role has update permission to this table + delete: # indicates if the role has delete permission to this table + attributes: + : # individual attributes can have permissions as well + read: + insert: + update: +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/developers/applications/defining-schemas.md b/site/versioned_docs/version-4.5/developers/applications/defining-schemas.md new file mode 100644 index 00000000..d0aa42b8 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/applications/defining-schemas.md @@ -0,0 +1,222 @@ +--- +title: Defining Schemas +--- + +# Defining Schemas + +Schemas define tables and their attributes. Schemas can be declaratively defined in Harper's using GraphQL schema definitions. Schemas definitions can be used to ensure that tables exist (that are required for applications), and have the appropriate attributes. Schemas can define the primary key, data types for attributes, if they are required, and specify which attributes should be indexed. The [introduction to applications provides](./) a helpful introduction to how to use schemas as part of database application development. + +Schemas can be used to define the expected structure of data, but are also highly flexible and support heterogeneous data structures and by default allows data to include additional properties. The standard types for GraphQL schemas are specified in the [GraphQL schema documentation](https:/graphql.org/learn/schema/). + +An example schema that defines a couple tables might look like: + +```graphql +# schema.graphql: +type Dog @table { + id: ID @primaryKey + name: String + breed: String + age: Int +} + +type Breed @table { + id: ID @primaryKey +} +``` + +In this example, you can see that we specified the expected data structure for records in the Dog and Breed table. For example, this will enforce that Dog records are required to have a `name` property with a string (or null, unless the type were specified to be non-nullable). This does not preclude records from having additional properties (see `@sealed` for preventing additional properties. For example, some Dog records could also optionally include a `favoriteTrick` property. + +In this page, we will describe the specific directives that Harper uses for defining tables and attributes in a schema. + +### Type Directives + +#### `@table` + +The schema for tables are defined using GraphQL type definitions with a `@table` directive: + +```graphql +type TableName @table +``` + +By default the table name is inherited from the type name (in this case the table name would be "TableName"). The `@table` directive supports several optional arguments (all of these are optional and can be freely combined): + +* `@table(table: "table_name")` - This allows you to explicitly specify the table name. +* `@table(database: "database_name")` - This allows you to specify which database the table belongs to. This defaults to the "data" database. +* `@table(expiration: 3600)` - Sets an expiration time on entries in the table before they are automatically cleared (primarily useful for caching tables). This is specified in seconds. +* `@table(audit: true)` - This enables the audit log for the table so that a history of record changes are recorded. This defaults to [configuration file's setting for `auditLog`](../../deployments/configuration#logging). + +Database naming: the default "data" database is generally a good default choice for tables in applications that will not be reused in other applications (and don't need to worry about staying in a separate namespace). Application with many tables may wish to organize the tables into separate databases (but remember that transactions do not preserve atomicity across different databases, only across tables in the same database). For components that are designed for re-use, it is recommended that you use a database name that is specific to the component (e.g. "my-component-data") to avoid name collisions with other components. + +#### `@export` + +This indicates that the specified table should be exported as a resource that is accessible as an externally available endpoints, through REST, MQTT, or any of the external resource APIs. + +This directive also accepts a `name` parameter to specify the name that should be used for the exported resource (how it will appear in the URL path). For example: + +``` +type MyTable @table @export(name: "my-table") +``` + +This table would be available at the URL path `/my-table/`. Without the `name` parameter, the exported name defaults to the name of the table type ("MyTable" in this example). + +### Relationships: `@relationship` + +Defining relationships is the foundation of using "join" queries in Harper. A relationship defines how one table relates to another table using a foreign key. Using the `@relationship` directive will define a property as a computed property, which resolves to the an record/instance from a target type, based on the referenced attribute, which can be in this table or the target table. The `@relationship` directive must be used in combination with an attribute with a type that references another table. + +#### `@relationship(from: attribute)` + +This defines a relationship where the foreign key is defined in this table, and relates to the primary key of the target table. If the foreign key is single-valued, this establishes a many-to-one relationship with the target table. The foreign key may also be a multi-valued array, in which case this will be a many-to-many relationship. For example, we can define a foreign key that references another table and then define the relationship. Here we create a `brandId` attribute that will be our foreign key (it will hold an id that references the primary key of the Brand table), and we define a relationship to the `Brand` table through the `brand` attribute: + +```graphql +type Product @table @export { + id: ID @primaryKey + brandId: ID @indexed + brand: Brand @relationship(from: brandId) +} +type Brand @table @export { + id: ID @primaryKey +} +``` + +Once this is defined we can use the `brand` attribute as a [property in our product instances](../../technical-details/reference/resource) and allow for querying by `brand` and selecting brand attributes as returned properties in [query results](../rest). + +Again, the foreign key may be a multi-valued array (array of keys referencing the target table records). For example, if we had a list of features that references a Feature table: + +```graphql +type Product @table @export { + id: ID @primaryKey + featureIds: [ID] @indexed # array of ids + features: [Feature] @relationship(from: featureIds) # array of referenced feature records +} +type Feature @table { + id: ID @primaryKey + ... +} +``` + +#### `@relationship(to: attribute)` + +This defines a relationship where the foreign key is defined in the target table and relates to primary key of this table. If the foreign key is single-valued, this establishes a one-to-many relationship with the target table. Note that the target table type must be an array element type (like `[Table]`). The foreign key may also be a multi-valued array, in which case this will be a many-to-many relationship. For example, we can define on a reciprocal relationship, from the example above, adding a relationship from brand back to product. Here we use continue to use the `brandId` attribute from the `Product` schema, and we define a relationship to the `Product` table through the `products` attribute: + +```graphql +type Brand @table @export { + id: ID @primaryKey + name: String + products: [Product] @relationship(to: brandId) +} +``` + +Once this is defined we can use the `products` attribute as a property in our brand instances and allow for querying by `products` and selecting product attributes as returned properties in query results. + +Note that schemas can also reference themselves with relationships, allowing records to define relationships like parent-child relationships between records in the same table. Also note, that for a many-to-many relationship, you must not combine the `to` and `from` property in the same relationship directive. + +### Computed Properties: `@computed` + +The `@computed` directive specifies that a field is computed based on other fields in the record. This is useful for creating derived fields that are not stored in the database, but are computed when specific record fields is queried/accessed. The `@computed` directive must be used in combination with a field that is a function that computes the value of the field. For example: + +```graphql +type Product @table { + id: ID @primaryKey + price: Float + taxRate: Float + totalPrice: Float @computed(from: "price + (price * taxRate)") +} +``` + +The `from` argument specifies the expression that computes the value of the field. The expression can reference other fields in the record. The expression is evaluated when the record is queried or indexed. + +The `computed` directive may also be defined in a JavaScript module, which is useful for more complex computations. You can specify a computed attribute, and then define the function with the `setComputedAttribute` method. For example: + +```graphql +type Product @table { +... + totalPrice: Float @computed +} +``` + +```javascript +tables.Product.setComputedAttribute('totalPrice', (record) => { + return record.price + (record.price * record.taxRate); +}); +``` + +Computed properties may also be indexed, which provides a powerful mechanism for creating indexes on derived fields with custom querying capabilities. This can provide a mechanism for composite indexes, custom full-text indexing, vector indexing, or other custom indexing strategies. A computed property can be indexed by adding the `@indexed` directive to the computed property. When using a JavaScript module for a computed property that is indexed, it is highly recommended that you specify a `version` argument to ensure that the computed attribute is re-evaluated when the function is updated. For example: + +```graphql +type Product @table { +... + totalPrice: Float @computed(version: 1) @indexed +} +``` + +If you were to update the `setComputedAttribute` function for the `totalPrice` attribute, to use a new formula, you must increment the `version` argument to ensure that the computed attribute is re-indexed (note that on a large database, re-indexing may be a lengthy operation). Failing to increment the `version` argument with a modified function can result in an inconsistent index. The computed function must be deterministic, and should not have side effects, as it may be re-evaluated multiple times during indexing. + +Note that computed properties will not be included by default in a query result, you must explicitly include them in query results using the `select` query function. + +Another example of using a computed custom index, is that we could index all the comma-separated words in a `tags` property by doing (similar techniques are used for full-text indexing): + +```graphql +type Product @table { + id: ID @primaryKey + tags: String # comma delimited set of tags + tagsSeparated: String[] @computed(from: "tags.split(/\\s*,\\s*/)") @indexed # split and index the tags +} +``` + +For more in-depth information on computed properties, visit our blog [here](https:/www.harpersystems.dev/development/tutorials/how-to-create-custom-indexes-with-computed-properties) + +### Field Directives + +The field directives can be used for information about each attribute in table type definition. + +#### `@primaryKey` + +The `@primaryKey` directive specifies that an attribute is the primary key for a table. These must be unique and when records are created, this will be auto-generated if no primary key is provided. When a primary key is auto-generated, it will be a UUID (as a string) if the primary key type is `String` or `ID`. If the primary key type is `Int`, `Long`, or `Any`, then the primary key will be an auto-incremented number. Using numeric primary keys is more efficient than using UUIDs. Note that if the type is `Int`, the primary key will be limited to 32-bit, which can be limiting and problematic for large tables. It is recommended that if you will be relying on auto-generated keys, that you use a primary key type of `Long` or `Any` (the latter will allow you to also use strings as primary keys). + +#### `@indexed` + +The `@indexed` directive specifies that an attribute should be indexed. This is necessary if you want to execute queries using this attribute (whether that is through RESTful query parameters, SQL, or NoSQL operations). + +#### `@createdTime` + +The `@createdTime` directive indicates that this property should be assigned a timestamp of the creation time of the record (in epoch milliseconds). + +#### `@updatedTime` + +The `@updatedTime` directive indicates that this property should be assigned a timestamp of each updated time of the record (in epoch milliseconds). + +#### `@sealed` + +The `@sealed` directive specifies that no additional properties should be allowed on records besides though specified in the type itself + +### Defined vs Dynamic Schemas + +If you do not define a schema for a table and create a table through the operations API (without specifying attributes) or studio, such a table will not have a defined schema and will follow the behavior of a ["dynamic-schema" table](../../technical-details/reference/dynamic-schema). It is generally best-practice to define schemas for your tables to ensure predictable, consistent structures with data integrity. + +### Field Types + +Harper supports the following field types in addition to user defined (object) types: + +* `String`: String/text +* `Int`: A 32-bit signed integer (from -2147483648 to 2147483647) +* `Long`: A 54-bit signed integer (from -9007199254740992 to 9007199254740992) +* `Float`: Any number (any number that can be represented as a [64-bit double precision floating point number](https:/en.wikipedia.org/wiki/Double-precision\_floating-point\_format). Note that all numbers are stored in the most compact representation available) +* `BigInt`: Any integer (negative or positive) with less than 300 digits (Note that `BigInt` is a distinct and separate type from standard numbers in JavaScript, so custom code should handle this type appropriately) +* `Boolean`: true or false +* `ID`: A string (but indicates it is not intended to be human readable) +* `Any`: Any primitive, object, or array is allowed +* `Date`: A Date object +* `Bytes`: Binary data as a Buffer or Uint8Array +* `Blob`: Binary data as a [Blob](../../technical-details/reference/blob), designed for large blocks of data that can be streamed. It is recommend that you use this for binary data that will typically be larger than 20KB. + +#### Renaming Tables + +It is important to note that Harper does not currently support renaming tables. If you change the name of a table in your schema definition, this will result in the creation of a new, empty table. + +### OpenAPI Specification + +_The_ [_OpenAPI Specification_](https:/spec.openapis.org/oas/v3.1.0) _defines a standard, programming language-agnostic interface description for HTTP APIs, which allows both humans and computers to discover and understand the capabilities of a service without requiring access to source code, additional documentation, or inspection of network traffic._ + +If a set of endpoints are configured through a Harper GraphQL schema, those endpoints can be described by using a default REST endpoint called `GET /openapi`. + +_Note: The `/openapi` endpoint should only be used as a starting guide, it may not cover all the elements of an endpoint._ diff --git a/site/versioned_docs/version-4.5/developers/applications/example-projects.md b/site/versioned_docs/version-4.5/developers/applications/example-projects.md new file mode 100644 index 00000000..466ce267 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/applications/example-projects.md @@ -0,0 +1,37 @@ +--- +title: Example Projects +--- + +# Example Projects + +**Library of example Harper applications and components:** + +* [Authorization in Harper using Okta Customer Identity Cloud](https:/www.harperdb.io/post/authorization-in-harperdb-using-okta-customer-identity-cloud), by Yitaek Hwang + +* [How to Speed Up your Applications by Caching at the Edge with Harper](https:/dev.to/doabledanny/how-to-speed-up-your-applications-by-caching-at-the-edge-with-harperdb-3o2l), by Danny Adams + +* [OAuth Authentication in Harper using Auth0 & Node.js](https:/www.harperdb.io/post/oauth-authentication-in-harperdb-using-auth0-and-node-js), by Lucas Santos + +* [How To Create a CRUD API with Next.js & Harper Custom Functions](https:/www.harperdb.io/post/create-a-crud-api-w-next-js-harperdb), by Colby Fayock + +* [Build a Dynamic REST API with Custom Functions](https:/harperdb.io/blog/build-a-dynamic-rest-api-with-custom-functions/), by Terra Roush + +* [How to use Harper Custom Functions to Build your Entire Backend](https:/dev.to/andrewbaisden/how-to-use-harperdb-custom-functions-to-build-your-entire-backend-a2m), by Andrew Baisden + +* [Using TensorFlowJS & Harper Custom Functions for Machine Learning](https:/harperdb.io/blog/using-tensorflowjs-harperdb-for-machine-learning/), by Kevin Ashcraft + +* [Build & Deploy a Fitness App with Python & Harper](https:/www.youtube.com/watch?v=KMkmA4i2FQc), by Patrick Löber + +* [Create a Discord Slash Bot using Harper Custom Functions](https:/geekysrm.hashnode.dev/discord-slash-bot-with-harperdb-custom-functions), by Soumya Ranjan Mohanty + +* [How I used Harper Custom Functions to Build a Web App for my Newsletter](https:/blog.hrithwik.me/how-i-used-harperdb-custom-functions-to-build-a-web-app-for-my-newsletter), by Hrithwik Bharadwaj + +* [How I used Harper Custom Functions and Recharts to create Dashboard](https:/blog.greenroots.info/how-to-create-dashboard-with-harperdb-custom-functions-and-recharts), by Tapas Adhikary + +* [How To Use Harper Custom Functions With Your React App](https:/dev.to/tyaga001/how-to-use-harperdb-custom-functions-with-your-react-app-2c43), by Ankur Tyagi + +* [Build a Web App Using Harper’s Custom Functions](https:/www.youtube.com/watch?v=rz6prItVJZU), livestream by Jaxon Repp + +* [How to Web Scrape Using Python, Snscrape & Custom Functions](https:/hackernoon.com/how-to-web-scrape-using-python-snscrape-and-harperdb), by Davis David + +* [What’s the Big Deal w/ Custom Functions](https:/rss.com/podcasts/harperdb-select-star/278933/), Select* Podcast \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/developers/applications/index.md b/site/versioned_docs/version-4.5/developers/applications/index.md new file mode 100644 index 00000000..e6cb2a68 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/applications/index.md @@ -0,0 +1,168 @@ +--- +title: Applications +--- + +# Applications + +## Overview of Harper Applications + +Harper is more than a database, it's a distributed clustering platform allowing you to package your schema, endpoints and application logic and deploy them to an entire fleet of Harper instances optimized for on-the-edge scalable data delivery. + +In this guide, we are going to explore the evermore extensible architecture that Harper provides by building a Harper component, a fundamental building-block of the Harper ecosystem. + +When working through this guide, we recommend you use the [Harper Application Template](https:/github.com/HarperDB/application-template) repo as a reference. + +## Understanding the Component Application Architecture + +Harper provides several types of components. Any package that is added to Harper is called a "component", and components are generally categorized as either "applications", which deliver a set of endpoints for users, or "extensions", which are building blocks for features like authentication, additional protocols, and connectors that can be used by other components. Components can be added to the `hdb/components` directory and will be loaded by Harper when it starts. Components that are remotely deployed to Harper (through the studio or the operation API) are installed into the `hdb/node_modules` directory. Using `harperdb run .` or `harperdb dev .` allows us to specifically load a certain application in addition to any that have been manually added to `hdb/components` or installed (in `hdb/node_modules`). + +```mermaid +flowchart LR + Client(Client)-->Endpoints + Client(Client)-->HTTP + Client(Client)-->Extensions + subgraph Harper + direction TB + Applications(Applications)-- "Schemas" --> Tables[(Tables)] + Applications-->Endpoints[/Custom Endpoints/] + Applications-->Extensions + Endpoints-->Tables + HTTP[/REST/HTTP/]-->Tables + Extensions[/Extensions/]-->Tables + end +``` + +## Custom Functionality with JavaScript + +[The getting started guide](../../getting-started/first-harper-app) covers how to build an application entirely through schema configuration. However, if your application requires more custom functionality, you will probably want to employ your own JavaScript modules to implement more specific features and interactions. This gives you tremendous flexibility and control over how data is accessed and modified in Harper. Let's take a look at how we can use JavaScript to extend and define "resources" for custom functionality. Let's add a property to the dog records when they are returned, that includes their age in human years. In Harper, data is accessed through our [Resource API](../../technical-details/reference/resource), a standard interface to access data sources, tables, and make them available to endpoints. Database tables are `Resource` classes, and so extending the function of a table is as simple as extending their class. + +To define custom (JavaScript) resources as endpoints, we need to create a `resources.js` module (this goes in the root of your application folder). And then endpoints can be defined with Resource classes that `export`ed. This can be done in addition to, or in lieu of the `@export`ed types in the schema.graphql. If you are exporting and extending a table you defined in the schema make sure you remove the `@export` from the schema so that don't export the original table or resource to the same endpoint/path you are exporting with a class. Resource classes have methods that correspond to standard HTTP/REST methods, like `get`, `post`, `patch`, and `put` to implement specific handling for any of these methods (for tables they all have default implementations). To do this, we get the `Dog` class from the defined tables, extend it, and export it: + +```javascript +/ resources.js: +const { Dog } = tables; / get the Dog table from the Harper provided set of tables (in the default database) + +export class DogWithHumanAge extends Dog { + get(query) { + this.humanAge = 15 + this.age * 5; / silly calculation of human age equivalent + return super.get(query); + } +} +``` + +Here we exported the `DogWithHumanAge` class (exported with the same name), which directly maps to the endpoint path. Therefore, now we have a `/DogWithHumanAge/` endpoint based on this class, just like the direct table interface that was exported as `/Dog/`, but the new endpoint will return objects with the computed `humanAge` property. Resource classes provide getters/setters for every defined attribute so that accessing instance properties like `age`, will get the value from the underlying record. The instance holds information about the primary key of the record so updates and actions can be applied to the correct record. And changing or assigning new properties can be saved or included in the resource as it returned and serialized. The `return super.get(query)` call at the end allows for any query parameters to be applied to the resource, such as selecting individual properties (with a [`select` query parameter](../rest#select-properties)). + +Often we may want to incorporate data from other tables or data sources in your data models. Next, let's say that we want a `Breed` table that holds detailed information about each breed, and we want to add that information to the returned dog object. We might define the Breed table as (back in schema.graphql): + +```graphql +type Breed @table { + name: String @primaryKey + description: String @indexed + lifespan: Int + averageWeight: Float +} +``` + +And next we will use this table in our `get()` method. We will call the new table's (static) `get()` method to retrieve a breed by id. To do this correctly, we access the table using our current context by passing in `this` as the second argument. This is important because it ensures that we are accessing the data atomically, in a consistent snapshot across tables. This provides automatically tracking of most recently updated timestamps across resources for caching purposes. This allows for sharing of contextual metadata (like user who requested the data), and ensure transactional atomicity for any writes (not needed in this get operation, but important for other operations). The resource methods are automatically wrapped with a transaction (will commit/finish when the method completes), and this allows us to fully utilize multiple resources in our current transaction. With our own snapshot of the database for the Dog and Breed table we can then access data like this: + +```javascript +/resource.js: +const { Dog, Breed } = tables; / get the Breed table too +export class DogWithBreed extends Dog { + async get(query) { + let breedDescription = await Breed.get(this.breed, this); + this.breedDescription = breedDescription; + return super.get(query); + } +} +``` + +The call to `Breed.get` will return an instance of the `Breed` resource class, which holds the record specified the provided id/primary key. Like the `Dog` instance, we can access or change properties on the Breed instance. + +Here we have focused on customizing how we retrieve data, but we may also want to define custom actions for writing data. While HTTP PUT method has a specific semantic definition (replace current record), a common method for custom actions is through the HTTP POST method. the POST method has much more open-ended semantics and is a good choice for custom actions. POST requests are handled by our Resource's post() method. Let's say that we want to define a POST handler that adds a new trick to the `tricks` array to a specific instance. We might do it like this, and specify an action to be able to differentiate actions: + +```javascript +export class CustomDog extends Dog { + async post(data) { + if (data.action === 'add-trick') + this.tricks.push(data.trick); + } +} +``` + +And a POST request to /CustomDog/ would call this `post` method. The Resource class then automatically tracks changes you make to your resource instances and saves those changes when this transaction is committed (again these methods are automatically wrapped in a transaction and committed once the request handler is finished). So when you push data on to the `tricks` array, this will be recorded and persisted when this method finishes and before sending a response to the client. + +The `post` method automatically marks the current instance as being update. However, you can also explicitly specify that you are changing a resource by calling the `update()` method. If you want to modify a resource instance that you retrieved through a `get()` call (like `Breed.get()` call above), you can call its `update()` method to ensure changes are saved (and will be committed in the current transaction). + +We can also define custom authorization capabilities. For example, we might want to specify that only the owner of a dog can make updates to a dog. We could add logic to our `post` method or `put` method to do this, but we may want to separate the logic so these methods can be called separately without authorization checks. The [Resource API](../../technical-details/reference/resource) defines `allowRead`, `allowUpdate`, `allowCreate`, and `allowDelete`, or to easily configure individual capabilities. For example, we might do this: + +```javascript +export class CustomDog extends Dog { + allowUpdate(user) { + return this.owner === user.username; + } +} +``` + +Any methods that are not defined will fall back to Harper's default authorization procedure based on users' roles. If you are using/extending a table, this is based on Harper's [role based access](../security/users-and-roles). If you are extending the base `Resource` class, the default access requires super user permission. + +You can also use the `default` export to define the root path resource handler. For example: + +```javascript +/ resources.json +export default class CustomDog extends Dog { + ... +``` + +This will allow requests to url like / to be directly resolved to this resource. + +## Define Custom Data Sources + +We can also directly implement the Resource class and use it to create new data sources from scratch that can be used as endpoints. Custom resources can also be used as caching sources. Let's say that we defined a `Breed` table that was a cache of information about breeds from another source. We could implement a caching table like: + +```javascript +const { Breed } = tables; / our Breed table +class BreedSource extends Resource { / define a data source + async get() { + return (await fetch(`http:/best-dog-site.com/${this.getId()}`)).json(); + } +} +/ define that our breed table is a cache of data from the data source above, with a specified expiration +Breed.sourcedFrom(BreedSource, { expiration: 3600 }); +``` + +The [caching documentation](./caching) provides much more information on how to use Harper's powerful caching capabilities and set up data sources. + +Harper provides a powerful JavaScript API with significant capabilities that go well beyond a "getting started" guide. See our documentation for more information on using the [`globals`](../../technical-details/reference/globals) and the [Resource interface](../../technical-details/reference/resource). + +## Configuring Applications/Components + +Every application or component can define their own configuration in a `config.yaml`. If you are using the application template, you will have a [default configuration in this config file](https:/github.com/HarperDB/application-template/blob/main/config.yaml) (which is default configuration if no config file is provided). Within the config file, you can configure how different files and resources are loaded and handled. The default configuration file itself is documented with directions. Each entry can specify any `files` that the loader will handle, and can also optionally specify what, if any, URL `path`s it will handle. A path of `/` means that the root URLs are handled by the loader, and a path of `.` indicates that the URLs that start with this application's name are handled. + +This config file allows you define a location for static files, as well (that are directly delivered as-is for incoming HTTP requests). + +Each configuration entry can have the following properties, in addition to properties that may be specific to the individual component: + +* `files`: This specifies the set of files that should be handled the component. This is a glob pattern, so a set of files can be specified like "directory/**". +* `path`: This is the URL path that is handled by this component. +* `root`: This specifies the root directory for mapping file paths to the URLs. For example, if you want all the files in `web/**` to be available in the root URL path via the static handler, you could specify a root of `web`, to indicate that the web directory maps to the root URL path. +* `package`: This is used to specify that this component is a third party package, and can be loaded from the specified package reference (which can be an NPM package, Github reference, URL, etc.). + +## Define Fastify Routes + +Exporting resource will generate full RESTful endpoints. But, you may prefer to define endpoints through a framework. Harper includes a resource plugin for defining routes with the Fastify web framework. Fastify is a full-featured framework with many plugins, that provides sophisticated route definition capabilities. + +By default, applications are configured to load any modules in the `routes` directory (matching `routes/*.js`) with Fastify's autoloader, which will allow these modules to export a function to define fastify routes. See the [defining routes documentation](./define-routes) for more information on how to create Fastify routes. + +However, Fastify is not as fast as Harper's RESTful endpoints (about 10%-20% slower/more-overhead), nor does it automate the generation of a full uniform interface with correct RESTful header interactions (for caching control), so generally the Harper's REST interface is recommended for optimum performance and ease of use. + +## Restarting Your Instance + +Generally, Harper will auto-detect when files change and auto-restart the appropriate threads. However, if there are changes that aren't detected, you may manually restart, with the `restart_service` operation: + +```json +{ + "operation": "restart_service", + "service": "http_workers" +} +``` diff --git a/site/versioned_docs/version-4.5/developers/applications/web-applications.md b/site/versioned_docs/version-4.5/developers/applications/web-applications.md new file mode 100644 index 00000000..d9892b9a --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/applications/web-applications.md @@ -0,0 +1,63 @@ +--- +title: Web Applications on Harper +--- + +# Web Applications on Harper + +Harper is an efficient, capable, and robust platform for developing web applications, with numerous capabilities designed +specifically for optimized web application delivery. In addition, there are a number of tools and frameworks that can be used +with Harper to create web applications with standard best-practice design and development patterns. Running these frameworks +on Harper can unlock tremendous scalability and performance benefits by leveraging Harper's built-in multi-threading, +caching, and distributed design. + +Harper's unique ability to run JavaScript code directly on the server side, combined with its built-in database for data storage, querying, and caching +allows you to create full-featured web applications with a single platform. This eliminates the overhead of legacy solutions that +require separate application servers, databases, and caching layers, and their requisite communication overhead and latency, while +allowing the full stack to deployed to distributed locations with full local response handling, providing an incredibly low latency web experience. + +## Web Application Frameworks + +With built-in caching mechanisms, and an easy-to-use JavaScript API for interacting with data, creating full-featured applications +using popular frameworks is a simple and straightforward process. + +Get started today with one of our examples: + +- [Next.js](https:/github.com/HarperDB/nextjs-example) +- [React SSR](https:/github.com/HarperDB/react-ssr-example) +- [Vue SSR](https:/github.com/HarperDB/vue-ssr-example) +- [Svelte SSR](https:/github.com/HarperDB/svelte-ssr-example) +- [Solid SSR](https:/github.com/HarperDB/solid-ssr-example) + +## Cookie Support + +Harper includes support for authenticated sessions using cookies. This allows you to create secure, authenticated web applications +using best-practice security patterns, allowing users to login and maintain a session without any credential storage on the client side +that can be compromised. A login endpoint can be defined by exporting a resource and calling the `login` method on the request object. For example, this could be a login endpoint in your resources.js file: + +```javascript +export class Login extends Resource { + async post(data) { + const { username, password } = data; + await request.login(username, password); + return { message: 'Logged in!' }; + } +} +``` + +This endpoint can be called from the client side using a standard fetch request, a cookie will be returned, and the session will be maintained by Harper. +This allows web applications to directly interact with Harper and database resources, without needing to go through extra layers of authentication handling. + +## Browser Caching Negotiation + +Browsers support caching negotiation with revalidation, which allows requests for locally cached data to be sent to servers with a tag or timestamp. Harper REST functionality can fully interact with these headers, and return `304 Not Modified` response based on prior `Etag` sent in headers. It is highly recommended that you utilize the [REST interface](../rest) for accessing tables, as it facilitates this downstream browser caching. Timestamps are recorded with all records and are then returned [as the `ETag` in the response](../rest#cachingconditional-requests). Utilizing this browser caching can greatly reduce the load on your server and improve the performance of your web application by being able to instantly use locally cached data after revalidation from the server. + +## Built-in Cross-Origin Resource Sharing (CORS) + +Harper includes built-in support for Cross-Origin Resource Sharing (CORS), which allows you to define which domains are allowed to access your Harper instance. This is a critical security feature for web applications, as it prevents unauthorized access to your data from other domains, while allowing cross-domain access from known hosts. You can define the allowed domains in your [Harper configuration file](../../deployments/configuration#http), and Harper will automatically handle the CORS headers for you. + +## More Resources + +Make sure to check out our developer videos too: + +- [Next.js on Harper | Step-by-Step Guide for Next Level Next.js Performance](https:/youtu.be/GqLEwteFJYY) +- [Server-side Rendering (SSR) with Multi-Tier Cache Demo](https:/youtu.be/L-tnBNhO9Fc) \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/developers/clustering/certificate-management.md b/site/versioned_docs/version-4.5/developers/clustering/certificate-management.md new file mode 100644 index 00000000..11ff0a6c --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/clustering/certificate-management.md @@ -0,0 +1,70 @@ +--- +title: Certificate Management +--- + +# Certificate Management + +## Development + +Out of the box Harper generates certificates that are used when Harper nodes are clustered together to securely share data between nodes. These certificates are meant for testing and development purposes. Because these certificates do not have Common Names (CNs) that will match the Fully Qualified Domain Name (FQDN) of the Harper node, the following settings (see the full [configuration file](../../deployments/configuration) docs for more details) are defaulted & recommended for ease of development: + +``` +clustering: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem + insecure: true + verify: true +``` + +The certificates that Harper generates are stored in your `/keys/`. + +`insecure` is set to `true` to accept the certificate CN mismatch due to development certificates. + +`verify` is set to `true` to enable mutual TLS between the nodes. + +## Production + +In a production environment, we recommend using your own certificate authority (CA), or a public CA such as LetsEncrypt to generate certs for your Harper cluster. This will let you generate certificates with CNs that match the FQDN of your nodes. + +Once you generate new certificates, to make Harper start using them you can either replace the generated files with your own, or update the configuration to point to your new certificates, and then restart Harper. + +Since these new certificates can be issued with correct CNs, you should set `insecure` to `false` so that nodes will do full validation of the certificates of the other nodes. + +### Certificate Requirements + +* Certificates must have an `Extended Key Usage` that defines both `TLS Web Server Authentication` and `TLS Web Client Authentication` as these certificates will be used to accept connections from other Harper nodes and to make requests to other Harper nodes. Example: + +``` +X509v3 Key Usage: critical + Digital Signature, Key Encipherment +X509v3 Extended Key Usage: + TLS Web Server Authentication, TLS Web Client Authentication +``` + +* If you are using an intermediate CA to issue the certificates, the entire certificate chain (to the root CA) must be included in the `certificateAuthority` file. +* If your certificates expire you will need a way to issue new certificates to the nodes and then restart Harper. If you are using a public CA such as LetsEncrypt, a tool like `certbot` can be used to renew certificates. + +### Certificate Troubleshooting + +If you are having TLS issues with clustering, use the following steps to verify that your certificates are valid. + +1. Make sure certificates can be parsed and that you can view the contents: + +``` +openssl x509 -in .pem -noout -text` +``` + +1. Make sure the certificate validates with the CA: + +``` +openssl verify -CAfile .pem .pem` +``` + +1. Make sure the certificate and private key are a valid pair by verifying that the output of the following commands match: + +``` +openssl rsa -modulus -noout -in .pem | openssl md5 +openssl x509 -modulus -noout -in .pem | openssl md5 +``` diff --git a/site/versioned_docs/version-4.5/developers/clustering/creating-a-cluster-user.md b/site/versioned_docs/version-4.5/developers/clustering/creating-a-cluster-user.md new file mode 100644 index 00000000..5569ff04 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/clustering/creating-a-cluster-user.md @@ -0,0 +1,59 @@ +--- +title: Creating a Cluster User +--- + +# Creating a Cluster User + +Inter-node authentication takes place via Harper users. There is a special role type called `cluster_user` that exists by default and limits the user to only clustering functionality. + +A `cluster_user` must be created and added to the `harperdb-config.yaml` file for clustering to be enabled. + +All nodes that are intended to be clustered together need to share the same `cluster_user` credentials (i.e. username and password). + +There are multiple ways a `cluster_user` can be created, they are: + +1. Through the operations API by calling `add_user` + +```json +{ + "operation": "add_user", + "role": "cluster_user", + "username": "cluster_account", + "password": "letsCluster123!", + "active": true +} +``` + +When using the API to create a cluster user the `harperdb-config.yaml` file must be updated with the username of the new cluster user. + +This can be done through the API by calling `set_configuration` or by editing the `harperdb-config.yaml` file. + +```json +{ + "operation": "set_configuration", + "clustering_user": "cluster_account" +} +``` + +In the `harperdb-config.yaml` file under the top-level `clustering` element there will be a user element. Set this to the name of the cluster user. + +```yaml +clustering: + user: cluster_account +``` + +_Note: When making any changes to the `harperdb-config.yaml` file, Harper must be restarted for the changes to take effect._ + +1. Upon installation using **command line variables**. This will automatically set the user in the `harperdb-config.yaml` file. + +_Note: Using command line or environment variables for setting the cluster user only works on install._ + +``` +harperdb install --CLUSTERING_USER cluster_account --CLUSTERING_PASSWORD letsCluster123! +``` + +1. Upon installation using **environment variables**. This will automatically set the user in the `harperdb-config.yaml` file. + +``` +CLUSTERING_USER=cluster_account CLUSTERING_PASSWORD=letsCluster123 +``` diff --git a/site/versioned_docs/version-4.5/developers/clustering/enabling-clustering.md b/site/versioned_docs/version-4.5/developers/clustering/enabling-clustering.md new file mode 100644 index 00000000..2b80d4e7 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/clustering/enabling-clustering.md @@ -0,0 +1,49 @@ +--- +title: Enabling Clustering +--- + +# Enabling Clustering + +Clustering does not run by default; it needs to be enabled. + +To enable clustering the `clustering.enabled` configuration element in the `harperdb-config.yaml` file must be set to `true`. + +There are multiple ways to update this element, they are: + +1. Directly editing the `harperdb-config.yaml` file and setting enabled to `true` + +```yaml +clustering: + enabled: true +``` + +_Note: When making any changes to the `harperdb-config.yaml` file Harper must be restarted for the changes to take effect._ + +1. Calling `set_configuration` through the operations API + +```json +{ + "operation": "set_configuration", + "clustering_enabled": true +} +``` + +_Note: When making any changes to Harper configuration Harper must be restarted for the changes to take effect._ + +1. Using **command line variables**. + +``` +harperdb --CLUSTERING_ENABLED true +``` + +1. Using **environment variables**. + +``` +CLUSTERING_ENABLED=true +``` + +An efficient way to **install Harper**, **create the cluster user**, **set the node name** and **enable clustering** in one operation is to combine the steps using command line and/or environment variables. Here is an example using command line variables. + +``` +harperdb install --CLUSTERING_ENABLED true --CLUSTERING_NODENAME Node1 --CLUSTERING_USER cluster_account --CLUSTERING_PASSWORD letsCluster123! +``` diff --git a/site/versioned_docs/version-4.5/developers/clustering/establishing-routes.md b/site/versioned_docs/version-4.5/developers/clustering/establishing-routes.md new file mode 100644 index 00000000..26ebf659 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/clustering/establishing-routes.md @@ -0,0 +1,73 @@ +--- +title: Establishing Routes +--- + +# Establishing Routes + +A route is a connection between two nodes. It is how the clustering network is established. + +Routes do not need to cross connect all nodes in the cluster. You can select a leader node or a few leaders and all nodes connect to them, you can chain, etc… As long as there is one route connecting a node to the cluster all other nodes should be able to reach that node. + +Using routes the clustering servers will create a mesh network between nodes. This mesh network ensures that if a node drops out all other nodes can still communicate with each other. That being said, we recommend designing your routing with failover in mind, this means not storing all your routes on one node but dispersing them throughout the network. + +A simple route example is a two node topology, if Node1 adds a route to connect it to Node2, Node2 does not need to add a route to Node1. That one route configuration is all that’s needed to establish a bidirectional connection between the nodes. + +A route consists of a `port` and a `host`. + +`port` - the clustering port of the remote instance you are creating the connection with. This is going to be the `clustering.hubServer.cluster.network.port` in the Harper configuration on the node you are connecting with. + +`host` - the host of the remote instance you are creating the connection with.This can be an IP address or a URL. + +Routes are set in the `harperdb-config.yaml` file using the `clustering.hubServer.cluster.network.routes` element, which expects an object array, where each object has two properties, `port` and `host`. + +```yaml +clustering: + hubServer: + cluster: + network: + routes: + - host: 3.62.184.22 + port: 9932 + - host: 3.735.184.8 + port: 9932 +``` + +![figure 1](/img/v4.5/clustering/figure1.png) + +This diagram shows one way of using routes to connect a network of nodes. Node2 and Node3 do not reference any routes in their config. Node1 contains routes for Node2 and Node3, which is enough to establish a network between all three nodes. + +There are multiple ways to set routes, they are: + +1. Directly editing the `harperdb-config.yaml` file (refer to code snippet above). +1. Calling `cluster_set_routes` through the API. + +```json +{ + "operation": "cluster_set_routes", + "server": "hub", + "routes":[ {"host": "3.735.184.8", "port": 9932} ] +} +``` + +_Note: When making any changes to Harper configuration Harper must be restarted for the changes to take effect._ + +1. From the command line. + +```bash +--CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES "[{\"host\": \"3.735.184.8\", \"port\": 9932}]" +``` + +1. Using environment variables. + +```bash +CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES=[{"host": "3.735.184.8", "port": 9932}] +``` + +The API also has `cluster_get_routes` for getting all routes in the config and `cluster_delete_routes` for deleting routes. + +```json +{ + "operation": "cluster_delete_routes", + "routes":[ {"host": "3.735.184.8", "port": 9932} ] +} +``` diff --git a/site/versioned_docs/version-4.5/developers/clustering/index.md b/site/versioned_docs/version-4.5/developers/clustering/index.md new file mode 100644 index 00000000..14556f3c --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/clustering/index.md @@ -0,0 +1,31 @@ +--- +title: NATS Clustering +--- + +# NATS Clustering + +Harper 4.0 - 4.3 used a clustering system based on NATS for replication. In 4.4+, Harper has moved to a new native replication system that has better performance, reliability, and data consistency. This document describes the legacy NATS clustering system. Harper clustering is the process of connecting multiple Harper databases together to create a database mesh network that enables users to define data replication patterns. + +Harper’s clustering engine replicates data between instances of Harper using a highly performant, bi-directional pub/sub model on a per-table basis. Data replicates asynchronously with eventual consistency across the cluster following the defined pub/sub configuration. Individual transactions are sent in the order in which they were transacted, once received by the destination instance, they are processed in an ACID-compliant manner. Conflict resolution follows a last writer wins model based on recorded transaction time on the transaction and the timestamp on the record on the node. + +*** + +### Common Use Case + +A common use case is an edge application collecting and analyzing sensor data that creates an alert if a sensor value exceeds a given threshold: + +* The edge application should not be making outbound http requests for security purposes. +* There may not be a reliable network connection. +* Not all sensor data will be sent to the cloud--either because of the unreliable network connection, or maybe it’s just a pain to store it. +* The edge node should be inaccessible from outside the firewall. +* The edge node will send alerts to the cloud with a snippet of sensor data containing the offending sensor readings. + +Harper simplifies the architecture of such an application with its bi-directional, table-level replication: + +* The edge instance subscribes to a “thresholds” table on the cloud instance, so the application only makes localhost calls to get the thresholds. +* The application continually pushes sensor data into a “sensor\_data” table via the localhost API, comparing it to the threshold values as it does so. +* When a threshold violation occurs, the application adds a record to the “alerts” table. +* The application appends to that record array “sensor\_data” entries for the 60 seconds (or minutes, or days) leading up to the threshold violation. +* The edge instance publishes the “alerts” table up to the cloud instance. + +By letting Harper focus on the fault-tolerant logistics of transporting your data, you get to write less code. By moving data only when and where it’s needed, you lower storage and bandwidth costs. And by restricting your app to only making local calls to Harper, you reduce the overall exposure of your application to outside forces. diff --git a/site/versioned_docs/version-4.5/developers/clustering/managing-subscriptions.md b/site/versioned_docs/version-4.5/developers/clustering/managing-subscriptions.md new file mode 100644 index 00000000..8d2cafef --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/clustering/managing-subscriptions.md @@ -0,0 +1,199 @@ +--- +title: Managing subscriptions +--- + +Tables are replicated when the table is designated as replicating and there is subscription between the nodes. +Tables designated as replicating by default, but can be changed by setting `replicate` to `false` in the table definition: +```graphql +type Product @table(replicate: false) { + id: ID! + name: String! +} +``` +Or in your harperdb-config.yaml, you can set the default replication behavior for databases, and indicate which databases +should be replicated by default: + +```yaml +replication: + databases: data +``` +If a table is not in the list of databases to be replicated, it will not be replicated unless the table is specifically set to replicate: + +```graphql +type Product @table(replicate: true) { + id: ID! + name: String! +} +``` + +Reading hdb_nodes (what we do _to_ the node, not what the node does). + +The subscription can be set to publish, subscribe, or both. + + + + +# Managing subscriptions + +Subscriptions can be added, updated, or removed through the API. + +_Note: The databases and tables in the subscription must exist on either the local or the remote node. Any databases or tables that do not exist on one particular node, for example, the local node, will be automatically created on the local node._ + +To add a single node and create one or more subscriptions use `set_node_replication`. + +```json +{ + "operation": "set_node_replication", + "node_name": "Node2", + "subscriptions": [ + { + "database": "data", + "table": "dog", + "publish": false, + "subscribe": true + }, + { + "database": "data", + "table": "chicken", + "publish": true, + "subscribe": true + } + ] +} +``` + +This is an example of adding Node2 to your local node. Subscriptions are created for two tables, dog and chicken. + +To update one or more subscriptions with a single node you can also use `set_node_replication`, however this will behave as a PATCH/upsert, where only the subscription(s) changing will be inserted/update while the others will be left untouched. + +```json +{ + "operation": "set_node_replication", + "node_name": "Node2", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ] +} +``` + +This call will update the subscription with the dog table. Any other subscriptions with Node2 will not change. + +To add or update subscriptions with one or more nodes in one API call use `configure_cluster`. + +```json +{ + "operation": "configure_cluster", + "connections": [ + { + "node_name": "Node2", + "subscriptions": [ + { + "database": "dev", + "table": "chicken", + "publish": false, + "subscribe": true + }, + { + "database": "prod", + "table": "dog", + "publish": true, + "subscribe": true + } + ] + }, + { + "node_name": "Node3", + "subscriptions": [ + { + "database": "dev", + "table": "chicken", + "publish": true, + "subscribe": false + } + ] + } + ] +} +``` + +_Note: `configure_cluster` will override **any and all** existing subscriptions defined on the local node. This means that before going through the connections in the request and adding the subscriptions, it will first go through **all existing subscriptions the local node has** and remove them. To get all existing subscriptions use `cluster_status`._ + +#### Start time + +There is an optional property called `start_time` that can be passed in the subscription. This property accepts an ISO formatted UTC date. + +`start_time` can be used to set from what time you would like to source transactions from a table when creating or updating a subscription. + +```json +{ + "operation": "set_node_replication", + "node_name": "Node2", + "subscriptions": [ + { + "database": "dev", + "table": "dog", + "publish": false, + "subscribe": true, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] +} +``` + +This example will get all transactions on Node2’s dog table starting from `2022-09-02T20:06:35.993Z` and replicate them locally on the dog table. + +If no start time is passed it defaults to the current time. + +_Note: start time utilizes clustering to back source transactions. For this reason it can only source transactions that occurred when clustering was enabled._ + +#### Remove node + +To remove a node and all its subscriptions use `remove_node`. + +```json +{ + "operation":"remove_node", + "node_name":"Node2" +} +``` + +#### Cluster status + +To get the status of all connected nodes and see their subscriptions use `cluster_status`. + +```json +{ + "node_name": "Node1", + "is_enabled": true, + "connections": [ + { + "node_name": "Node2", + "status": "open", + "ports": { + "clustering": 9932, + "operations_api": 9925 + }, + "latency_ms": 65, + "uptime": "11m 19s", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ], + "system_info": { + "hdb_version": "4.0.0", + "node_version": "16.17.1", + "platform": "linux" + } + } + ] +} +``` diff --git a/site/versioned_docs/version-4.5/developers/clustering/naming-a-node.md b/site/versioned_docs/version-4.5/developers/clustering/naming-a-node.md new file mode 100644 index 00000000..67ac2c49 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/clustering/naming-a-node.md @@ -0,0 +1,45 @@ +--- +title: Naming a Node +--- + +# Naming a Node + +Node name is the name given to a node. It is how nodes are identified within the cluster and must be unique to the cluster. + +The name cannot contain any of the following characters: `.,*>` . Dot, comma, asterisk, greater than, or whitespace. + +The name is set in the `harperdb-config.yaml` file using the `clustering.nodeName` configuration element. + +_Note: If you want to change the node name make sure there are no subscriptions in place before doing so. After the name has been changed a full restart is required._ + +There are multiple ways to update this element, they are: + +1. Directly editing the `harperdb-config.yaml` file. + +```yaml +clustering: + nodeName: Node1 +``` + +_Note: When making any changes to the `harperdb-config.yaml` file Harper must be restarted for the changes to take effect._ + +1. Calling `set_configuration` through the operations API + +```json +{ + "operation": "set_configuration", + "clustering_nodeName":"Node1" +} +``` + +1. Using command line variables. + +``` +harperdb --CLUSTERING_NODENAME Node1 +``` + +1. Using environment variables. + +``` +CLUSTERING_NODENAME=Node1 +``` diff --git a/site/versioned_docs/version-4.5/developers/clustering/requirements-and-definitions.md b/site/versioned_docs/version-4.5/developers/clustering/requirements-and-definitions.md new file mode 100644 index 00000000..22bc3977 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/clustering/requirements-and-definitions.md @@ -0,0 +1,11 @@ +--- +title: Requirements and Definitions +--- + +# Requirements and Definitions + +To create a cluster you must have two or more nodes\* (aka instances) of Harper running. + +\*_A node is a single instance/installation of Harper. A node of Harper can operate independently with clustering on or off._ + +On the following pages we'll walk you through the steps required, in order, to set up a Harper cluster. diff --git a/site/versioned_docs/version-4.5/developers/clustering/subscription-overview.md b/site/versioned_docs/version-4.5/developers/clustering/subscription-overview.md new file mode 100644 index 00000000..2c135a86 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/clustering/subscription-overview.md @@ -0,0 +1,45 @@ +--- +title: Subscription Overview +--- + +# Subscription Overview + +A subscription defines how data should move between two nodes. They are exclusively table level and operate independently. They connect a table on one node to a table on another node, the subscription will apply to a matching database name and table name on both nodes. + +_Note: ‘local’ and ‘remote’ will often be referred to. In the context of these docs ‘local’ is the node that is receiving the API request to create/update a subscription and remote is the other node that is referred to in the request, the node on the other end of the subscription._ + +A subscription consists of: + +`database` - the name of the database that the table you are creating the subscription for belongs to. *Note, this was previously referred to as schema and may occasionally still be referenced that way.* + +`table` - the name of the table the subscription will apply to. + +`publish` - a boolean which determines if transactions on the local table should be replicated on the remote table. + +`subscribe` - a boolean which determines if transactions on the remote table should be replicated on the local table. + +#### Publish subscription + +![figure 2](/img/v4.5/clustering/figure2.png) + +This diagram is an example of a `publish` subscription from the perspective of Node1. + +The record with id 2 has been inserted in the dog table on Node1, after it has completed that insert it is sent to Node 2 and inserted in the dog table there. + +#### Subscribe subscription + +![figure 3](/img/v4.5/clustering/figure3.png) + +This diagram is an example of a `subscribe` subscription from the perspective of Node1. + +The record with id 3 has been inserted in the dog table on Node2, after it has completed that insert it is sent to Node1 and inserted there. + +#### Subscribe and Publish + +![figure 4](/img/v4.5/clustering/figure4.png) + +This diagram shows both subscribe and publish but publish is set to false. You can see that because subscribe is true the insert on Node2 is being replicated on Node1 but because publish is set to false the insert on Node1 is _**not**_ being replicated on Node2. + +![figure 5](/img/v4.5/clustering/figure5.png) + +This shows both subscribe and publish set to true. The insert on Node1 is replicated on Node2 and the update on Node2 is replicated on Node1. diff --git a/site/versioned_docs/version-4.5/developers/clustering/things-worth-knowing.md b/site/versioned_docs/version-4.5/developers/clustering/things-worth-knowing.md new file mode 100644 index 00000000..02c188ae --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/clustering/things-worth-knowing.md @@ -0,0 +1,43 @@ +--- +title: Things Worth Knowing +--- + +# Things Worth Knowing + +Additional information that will help you define your clustering topology. + +*** + +### Transactions + +Transactions that are replicated across the cluster are: + +* Insert +* Update +* Upsert +* Delete +* Bulk loads + * CSV data load + * CSV file load + * CSV URL load + * Import from S3 + +When adding or updating a node any databases and tables in the subscription that don’t exist on the remote node will be automatically created. + +**Destructive database operations do not replicate across a cluster**. Those operations include `drop_database`, `drop_table`, and `drop_attribute`. If the desired outcome is to drop database information from any nodes then the operation(s) will need to be run on each node independently. + +Users and roles are not replicated across the cluster. + +*** + +### Queueing + +Harper has built-in resiliency for when network connectivity is lost within a subscription. When connections are reestablished, a catchup routine is executed to ensure data that was missed, specific to the subscription, is sent/received as defined. + +*** + +### Topologies + +Harper clustering creates a mesh network between nodes giving end users the ability to create an infinite number of topologies. subscription topologies can be simple or as complex as needed. + +![](/img/v4.5/clustering/figure6.png) diff --git a/site/versioned_docs/version-4.5/developers/components/built-in.md b/site/versioned_docs/version-4.5/developers/components/built-in.md new file mode 100644 index 00000000..5e25a652 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/components/built-in.md @@ -0,0 +1,153 @@ +--- +title: Built-In Components +--- + +# Built-In Components + +Harper provides extended features using built-in components. They do **not** need to be installed with a package manager, and simply must be specified in a config to run. These are used throughout many Harper docs, guides, and examples. Unlike external components which have their own semantic versions, built-in components follow Harper's semantic version. + +- [Built-In Components](#built-in-components) + - [fastifyRoutes](#fastifyroutes) + - [graphql](#graphql) + - [graphqlSchema](#graphqlschema) + - [jsResource](#jsresource) + - [loadEnv](#loadenv) + - [rest](#rest) + - [roles](#roles) + - [static](#static) + + + + + +## fastifyRoutes + +Specify custom endpoints using [Fastify](https:/fastify.dev/). + +This component is a [Resource Extension](./reference#resource-extension) and can be configured with the [`files`, `path`, and `root`](./reference#resource-extension-configuration) configuration options. + +Complete documentation for this feature is available here: [Define Fastify Routes](../applications/define-routes) + +```yaml +fastifyRoutes: + files: './routes/*.js' +``` + +## graphql + +> GraphQL querying is **experimental**, and only partially implements the GraphQL Over HTTP / GraphQL specifications. + +Enables GraphQL querying via a `/graphql` endpoint loosely implementing the GraphQL Over HTTP specification. + +Complete documentation for this feature is available here: [GraphQL](../../technical-details/reference/graphql) + +```yaml +graphql: true +``` + +## graphqlSchema + +Specify schemas for Harper tables and resources via GraphQL schema syntax. + +This component is a [Resource Extension](./reference#resource-extension) and can be configured with the [`files`, `path`, and `root`](./reference#resource-extension-configuration) configuration options. + +Complete documentation for this feature is available here: [Defining Schemas](../applications/defining-schemas) + +```yaml +graphqlSchema: + files: './schemas.graphql' +``` + +## jsResource + +Specify custom, JavaScript based Harper resources. + +Refer to the Application [Custom Functionality with JavaScript](../applications/#custom-functionality-with-javascript) guide, or [Resource Class](../../technical-details/reference/resource) reference documentation for more information on custom resources. + +This component is a [Resource Extension](./reference#resource-extension) and can be configured with the [`files`, `path`, and `root`](./reference#resource-extension-configuration) configuration options. + +```yaml +jsResource: + files: './resource.js' +``` + +## loadEnv + +Load environment variables via files like `.env`. + +This component is a [Resource Extension](./reference#resource-extension) and can be configured with the [`files`, `path`, and `root`](./reference#resource-extension-configuration) configuration options. + +Ensure this component is specified first in `config.yaml` so that environment variables are loaded prior to loading any other components. + +```yaml +loadEnv: + files: '.env' +``` + +This component matches the default behavior of dotenv where existing variables take precedence. Specify the `override` option in order to override existing environment variables assigned to `process.env`: + +```yaml +loadEnv: + files: '.env' + override: true +``` + +> Important: Harper is a single process application. Environment variables are loaded onto `process.env` and will be shared throughout all Harper components. This means environment variables loaded by one component will be available on other components (as long as the components are loaded in the correct order). + + + + + + + + + +## rest + +Enable automatic REST endpoint generation for exported resources with this component. + +Complete documentation for this feature is available here: [REST](../rest) + +```yaml +rest: true +``` + +This component contains additional options: + +To enable `Last-Modified` header support: + +```yaml +rest: + lastModified: true +``` + +To disable automatic WebSocket support: + +```yaml +rest: + webSocket: false +``` + +## roles + +Specify roles for Harper tables and resources. + +This component is a [Resource Extension](./reference#resource-extension) and can be configured with the [`files`, `path`, and `root`](./reference#resource-extension-configuration) configuration options. + +Complete documentation for this feature is available here: [Defining Roles](../applications/defining-roles) + +```yaml +roles: + files: './roles.yaml' +``` + +## static + +Specify which files to server statically from the Harper HTTP endpoint. Built using the [send](https:/www.npmjs.com/package/send) and [serve-static](https:/www.npmjs.com/package/serve-static) modules. + +This component is a [Resource Extension](./reference#resource-extension) and can be configured with the [`files`, `path`, and `root`](./reference#resource-extension-configuration) configuration options. + +```yaml +static: + files: './web/*' +``` diff --git a/site/versioned_docs/version-4.5/developers/components/index.md b/site/versioned_docs/version-4.5/developers/components/index.md new file mode 100644 index 00000000..6d479c95 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/components/index.md @@ -0,0 +1,26 @@ +--- +title: Components +--- + +# Components + +Harper components are a core Harper concept defined as flexible JavaScript based _extensions_ of the highly extensible core Harper platform. They are executed by Harper directly and have complete access to the Harper [Global APIs](../../technical-details/reference/globals) (such as `Resource`, `databases`, and `tables`). + +A key aspect to components are their extensibility; components can be built on other components. For example, a [Harper Application](../applications/) is a component that uses many other components. The [application template](https:/github.com/HarperDB/application-template) demonstrates many of Harper's built-in components such as `rest` (for automatic REST endpoint generation), `graphqlSchema` (for table schema definitions), and many more. + +From management to development, the following pages document everything a developer needs to know about Harper components. + +- [Managing Components](./managing) - developing, installing, deploying, and executing Harper components locally and remotely +- [Technical Reference](./reference) - detailed, technical reference for component development +- [Built-In Components](./built-in) - documentation for all of Harper's built-in components (i.e. `rest`) + +## Custom Components + +The following list is all of Harper's officially maintained, custom components. They are all available on npm and GitHub. + +- [`@harperdb/nextjs`](https:/github.com/HarperDB/nextjs) +- [`@harperdb/apollo`](https:/github.com/HarperDB/apollo) +- [`@harperdb/status-check`](https:/github.com/HarperDB/status-check) +- [`@harperdb/prometheus-exporter`](https:/github.com/HarperDB/prometheus-exporter) +- [`@harperdb/acl-connect`](https:/github.com/HarperDB/acl-connect) +- [`@harperdb/astro`](https:/github.com/HarperDB/astro) \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/developers/components/managing.md b/site/versioned_docs/version-4.5/developers/components/managing.md new file mode 100644 index 00000000..31155ed3 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/components/managing.md @@ -0,0 +1,179 @@ +--- +title: Managing +--- + +# Managing + +Harper offers several approaches to managing components that differ between local development and Harper managed instances. This page will cover the recommended methods of developing, installing, deploying, and running Harper components. + +## Local Development + +Harper is designed to be simple to run locally. Generally, Harper should be installed locally on a machine using a global package manager install (i.e. `npm i -g harperdb`). + +> Before continuing, ensure Harper is installed and the `harperdb` CLI is available. For more information, review the [installation guide](../../deployments/install-harper/). + +When developing a component locally there are a number of ways to run it on Harper. + +### `dev` and `run` commands + +The quickest way to run a component is by using the `dev` command within the component directory. + +The `harperdb dev .` command will automatically watch for file changes within the component directory and restart the Harper threads when changes are detected. + +The `dev` command will **not** restart the main thread; if this is a requirement, switch to using `run` instead and manually start/stop the process to execute the main thread. + +Stop execution for either of these processes by sending a SIGINT (generally CTRL/CMD+C) signal to the process. + +### Deploying to a local Harper instance + +Alternatively, to mimic interfacing with a hosted Harper instance, use operation commands instead. + +1. Start up Harper with `harperdb` +1. _Deploy_ the component to the local instance by executing: + + ```sh + harperdb deploy_component \ + project= \ + package= \ + restart=true + ``` + + * Make sure to omit the `target` option so that it _deploys_ to the Harper instance running locally + * The `package=` option creates a symlink to the component simplifying restarts + * By default, the `deploy_component` operation command will _deploy_ the current directory by packaging it up and streaming the bytes. By specifying `package`, it skips this and references the file path directly + * The `restart=true` option automatically restarts Harper threads after the component is deployed + * If set to `'rolling'`, a rolling restart will be triggered after the component is deployed +1. In another terminal, use the `harperdb restart` command to restart the instance's threads at any time + * With `package=`, the component source is symlinked so changes will automatically be picked up between restarts + * If `package` was omitted, run the `deploy_component` command again with any new changes +1. To remove the component use `harperdb drop_component project=` + +Similar to the previous section, if the main thread needs to be restarted, start and stop the Harper instance manually (with the component deployed). Upon Harper startup, the component will automatically be loaded and executed across all threads. + +> Not all [component operations](../operations-api/components) are available via CLI. When in doubt, switch to using the Operations API via network requests to the local Harper instance. + +For example, to properly _deploy_ a `test-component` locally, the command would look like: + +```sh +harperdb deploy_component \ + project=test-component \ + package=/Users/dev/test-component \ + restart=true +``` + +> If the current directory is the component directory, use a shortcut such as `package=$(pwd)` to avoid typing out the complete path. + +## Remote Management + +Managing components on a remote Harper instance is best accomplished through [component operations](../operations-api/components), similar to using the `deploy_component` command locally. Before continuing, always backup critical Harper instances. Managing, deploying, and executing components can directly impact a live system. + +Remote Harper instances work very similarly to local Harper instances. The primary component management operations still include `deploy_component`, `drop_component`, and `restart`. + +The key to remote management is specifying a remote `target` along with appropriate username/password values. These can all be specified using CLI arguments: `target`, `username`, and `password`. Alternatively, the `CLI_TARGET_USERNAME` and `CLI_TARGET_PASSWORD` environment variables can replace the `username` and `password` arguments. + +All together: + +```sh +harperdb deploy_component \ + project= \ + package= \ + username= \ + password= \ + target= \ + restart=true \ + replicated=true +``` + +Or, using environment variables: + +```sh +export CLI_TARGET_USERNAME= +export CLI_TARGET_PASSWORD= +harperdb deploy_component \ + project= \ + package= \ + target= \ + restart=true \ + replicated=true +``` + +Unlike local development where `package` should be set to a local file path for symlinking and improved development experience purposes, now it has some additional options. + +A local component can be deployed to a remote instance by **omitting** the `package` field. Harper will automatically package the local directory and include that along with the rest of the deployment operation. + +Furthermore, the `package` field can be set to any valid [npm dependency value](https:/docs.npmjs.com/cli/v11/configuring-npm/package-json#dependencies). + +* For components deployed to npm, specify the package name: `package="@harperdb/status-check"` +* For components on GitHub, specify the URL: `package="https:/github.com/HarperDB/status-check"`, or the shorthand `package=HarperDB/status-check` +* Private repositories also work if the correct SSH keys are on the server: `package="git+ssh:/git@github.com:HarperDB/secret-component.git"` + * Reference the [SSH Key](../operations-api/components#add-ssh-key) operations for more information on managing SSH keys on a remote instance +* Even tarball URLs are supported: `package="https:/example.com/component.tar.gz"` + +> When using git tags, we highly recommend that you use the semver directive to ensure consistent and reliable installation by npm. In addition to tags, you can also reference branches or commit numbers. + +These `package` values are all supported because behind-the-scenes, Harper is generating a `package.json` file for the components. Then, it uses a form of `npm install` to resolve them as dependencies. This is why symlinks are generated when specifying a file path locally. The following [Advanced](./managing#advanced) section explores this pattern in more detail. + +Finally, don't forget to include `restart=true`, or run `harperdb restart target=`. + +## Advanced + +The following methods are advanced and should be executed with caution as they can have unintended side-effects. Always backup any critical Harper instances before continuing. + +First, locate the Harper installation `rootPath` directory. Generally, this is `~/hdb`. It can be retrieved by running `harperdb get_configuration` and looking for the `rootPath` field. + +> For a useful shortcut on POSIX compliant machines run: `harperdb get_configuration json=true | jq ".rootPath" | sed 's/"/g'` + +This path is the Harper instance. Within this directory, locate the root config titled `harperdb-config.yaml`, and the components root path. The components root path will be `/components` by default (thus, `~/hdb/components`), but it can also be configured. If necessary, use `harperdb get_configuration` again and look for the `componentsRoot` field for the exact path. + +### Adding components to root + +Similar to how components can specify other components within their `config.yaml`, components can be added to Harper by adding them to the `harperdb-config.yaml`. + +The configuration is very similar to that of `config.yaml`. Entries are comprised of a top-level `:`, and an indented `package: ` field. Any additional component options can also be included as indented fields. + +```yaml +status-check: + package: "@harperdb/status-check" +``` + +The key difference between this and a component's `config.yaml` is that the name does **not** need to be associated with a `package.json` dependency. When Harper starts up, it transforms these configurations into a `package.json` file, and then executes a form of `npm install`. Thus, the `package: ` can be any valid dependency syntax such as npm packages, GitHub repos, tarballs, and local directories are all supported. + +Given a root config like: + +```yaml +myGithubComponent: + package: HarperDB-Add-Ons/package#v2.2.0 # install from GitHub +myNPMComponent: + package: harperdb # install from npm +myTarBall: + package: /Users/harper/cool-component.tar # install from tarball +myLocal: + package: /Users/harper/local # install from local path +myWebsite: + package: https:/harperdb-component # install from URL +``` + +Harper will generate a `package.json` like: + +```json +{ + "dependencies": { + "myGithubComponent": "github:HarperDB-Add-Ons/package#v2.2.0", + "myNPMComponent": "npm:harperdb", + "myTarBall": "file:/Users/harper/cool-component.tar", + "myLocal": "file:/Users/harper/local", + "myWebsite": "https:/harperdb-component" + } +} +``` + +npm will install all the components and store them in ``. A symlink back to `/node_modules` is also created for dependency resolution purposes. + +The package prefix is automatically added, however you can manually set it in your package reference. + +```yaml +myCoolComponent: + package: file:/Users/harper/cool-component.tar +``` + +By specifying a file path, npm will generate a symlink and then changes will be automatically picked up between restarts. diff --git a/site/versioned_docs/version-4.5/developers/components/reference.md b/site/versioned_docs/version-4.5/developers/components/reference.md new file mode 100644 index 00000000..22d55063 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/components/reference.md @@ -0,0 +1,251 @@ +--- +title: Component Reference +--- + +# Component Reference + +The technical definition of a Harper component is fairly loose. In the absolute, simplest form, a component is any JavaScript module that is compatible with the [default component configuration](#default-component-configuration). For example, a module with a singular `resources.js` file is technically a valid component. + +Harper provides many features as _built-in components_, these can be used directly without installing any other dependencies. + +Other features are provided by _custom components_. These can be npm packages such as [@harperdb/nextjs](https:/github.com/HarperDB/nextjs) and [@harperdb/apollo](https:/github.com/HarperDB/apollo) (which are maintained by Harper), or something maintained by the community. Custom components follow the same configuration rules and use the same APIs that Harper's built-in components do. The only difference is that they must be apart of the component's dependencies. + +> Documentation is available for all [built-in](./built-in) and [custom](./#custom-components) Harper components. + + + +## Component Configuration + +Harper components are configured with a `config.yaml` file located in the root of the component module directory. This file is how a component configures other components it depends on. Each entry in the file starts with a component name, and then configuration values are indented below it. + +```yaml +name: + option-1: value + option-2: value +``` + +It is the entry's `name` that is used for component resolution. It can be one of the [built-in components](./built-in), or it must match a package dependency of the component as specified by `package.json`. The [Custom Component Configuration](#custom-component-configuration) section provides more details and examples. + +For some built-in components they can be configured with as little as a top-level boolean; for example, the [rest](./built-in#rest) extension can be enabled with just: + +```yaml +rest: true +``` + +Other components (built-in or custom), will generally have more configuration options. Some options are ubiquitous to the Harper platform, such as the `files`, `path`, and `root` options for a [Resource Extension](#resource-extension-configuration), or `package` for a [custom component](#custom-component-configuration). Additionally, [custom options](#protocol-extension-configuration) can be defined for [Protocol Extensions](#protocol-extension). + +### Custom Component Configuration + +Any custom component **must** be configured with the `package` option in order for Harper to load that component. When enabled, the name of package must match a dependency of the component. For example, to use the `@harperdb/nextjs` extension, it must first be included in `package.json`: + +```json +{ + "dependencies": { + "@harperdb/nextjs": "^1.0.0" + } +} +``` + +Then, within `config.yaml` it can be enabled and configured using: + +```yaml +'@harperdb/nextjs': + package: '@harperdb/nextjs' + # ... +``` + +Since npm allows for a [variety of dependency configurations](https:/docs.npmjs.com/cli/configuring-npm/package-json#dependencies), this can be used to create custom references. For example, to depend on a specific GitHub branch, first update the `package.json`: + +```json +{ + "dependencies": { + "harper-nextjs-test-feature": "HarperDB/nextjs#test-feature" + } +} +``` + +And now in `config.yaml`: + +```yaml +harper-nextjs-test-feature: + package: '@harperdb/nextjs' + files: '/*' + # ... +``` + +### Default Component Configuration + +Harper components do not need to specify a `config.yaml`. Harper uses the following default configuration to load components. + +```yaml +rest: true +graphql: true +graphqlSchema: + files: '*.graphql' +roles: + files: 'roles.yaml' +jsResource: + files: 'resources.js' +fastifyRoutes: + files: 'routes/*.js' + path: '.' +static: + files: 'web/**' +``` + +Refer to the [built-in components](./built-in) documentation for more information on these fields. + +If a `config.yaml` is defined, it will **not** be merged with the default config. + +## Extensions + +A Harper Extension is a extensible component that is intended to be used by other components. The built-in components [graphqlSchema](./built-in#graphqlschema) and [jsResource](./built-in#jsresource) are both examples of extensions. + +There are two key types of Harper Extensions: **Resource Extension** and **Protocol Extensions**. The key difference is a **Protocol Extensions** can return a **Resource Extension**. + +Functionally, what makes an extension a component is the contents of `config.yaml`. Unlike the Application Template referenced earlier, which specified multiple components within the `config.yaml`, an extension will specify an `extensionModule` option. + +- **extensionModule** - `string` - _required_ - A path to the extension module source code. The path must resolve from the root of the extension module directory. + +For example, the [Harper Next.js Extension](https:/github.com/HarperDB/nextjs) `config.yaml` specifies `extensionModule: ./extension.js`. + +If the extension is being written in something other than JavaScript (such as TypeScript), ensure that the path resolves to the built version, (i.e. `extensionModule: ./dist/index.js`) + +It is also recommended that all extensions have a `package.json` that specifies JavaScript package metadata such as name, version, type, etc. Since extensions are just JavaScript packages, they can do anything a JavaScript package can normally do. It can be written in TypeScript, and compiled to JavaScript. It can export an executable (using the [bin](https:/docs.npmjs.com/cli/configuring-npm/package-json#bin) property). It can be published to npm. The possibilities are endless! + +Furthermore, what defines an extension separately from a component is that it leverages any of the [Resource Extension](#resource-extension-api) or [Protocol Extension](#protocol-extension-api) APIs. The key is in the name, **extensions are extensible**. + +### Resource Extension + +A Resource Extension is for processing a certain type of file or directory. For example, the built-in [jsResource](./built-in#jsresource) extension handles executing JavaScript files. + +Resource Extensions are comprised of four distinct function exports, [`handleFile()`](#handlefilecontents-urlpath-path-resources-void--promisevoid), [`handleDirectory()`](#handledirectoryurlpath-path-resources-boolean--void--promiseboolean--void), [`setupFile()`](#setupfilecontents-urlpath-path-resources-void--promisevoid), and [`setupDirectory()`](#setupdirectoryurlpath-path-resources-boolean--void--promiseboolean--void). The `handleFile()` and `handleDirectory()` methods are executed on **all worker threads**, and are _executed again during restarts_. The `setupFile()` and `setupDirectory()` methods are only executed **once** on the **main thread** during the initial system start sequence. + +> Keep in mind that the CLI command `harperdb restart` or CLI argument `restart=true` only restarts the worker threads. If a component is deployed using `harperdb deploy`, the code within the `setupFile()` and `setupDirectory()` methods will not be executed until the system is completely shutdown and turned back on. + +Other than their execution behavior, the `handleFile()` and `setupFile()` methods, and `handleDirectory()` and `setupDirectory()` methods have identical function definitions (arguments and return value behavior). + +#### Resource Extension Configuration + +Any [Resource Extension](#resource-extension) can be configured with the `files`, `path`, and `root` options. These options control how _files_ and _directories_ are resolved in order to be passed to the extension's `handleFile()`, `setupFile()`, `handleDirectory()`, and `setupDirectory()` methods. + +- **files** - `string` - *required* - Specifies the set of files and directories that should be handled by the component. Can be a glob pattern. +- **path** - `string` - *optional* - Specifies the URL path to be handled by the component. +- **root** - `string` - *optional* - Specifies the root directory for mapping file paths to the URLs. + +For example, to configure the [static](./built-in#static) component to server all files from `web` to the root URL path: + +```yaml +static: + files: 'web/**' + root: 'web' +``` + +Or, to configure the [graphqlSchema](./built-in#graphqlschema) component to load all schemas within the `src/schema` directory: + +```yaml +graphqlSchema: + files: 'src/schema/*.schema' +``` + +#### Resource Extension API + +In order for an extension to be classified as a Resource Extension it must implement at least one of the `handleFile()`, `handleDirectory()`, `setupFile()`, or `setupDirectory()` methods. As a standalone extension, these methods should be named and exported directly. For example: + +```js +/ ESM +export function handleFile() {} +export function setupDirectory() {} + +/ or CJS +function handleDirectory() {} +function setupFile() {} + +module.exports = { handleDirectory, setupFile } +``` + +When returned by a [Protocol Extension](#protocol-extension), these methods should be defined on the object instead: + +```js +export function start() { + return { + handleFile () {} + } +} +``` + +##### `handleFile(contents, urlPath, path, resources): void | Promise` +##### `setupFile(contents, urlPath, path, resources): void | Promise` + +These methods are for processing individual files. They can be async. + +> Remember! +> +> `setupFile()` is executed **once** on the **main thread** during the main start sequence. +> +> `handleFile()` is executed on **worker threads** and is executed again during restarts. + +Parameters: + +- **contents** - `Buffer` - The contents of the file +- **urlPath** - `string` - The recommended URL path of the file +- **path** - `string` - The relative path of the file + +- **resources** - `Object` - A collection of the currently loaded resources + +Returns: `void | Promise` + +##### `handleDirectory(urlPath, path, resources): boolean | void | Promise` +##### `setupDirectory(urlPath, path, resources): boolean | void | Promise` + +These methods are for processing directories. They can be async. + +If the function returns or resolves a truthy value, then the component loading sequence will end and no other entries within the directory will be processed. + +> Remember! +> +> `setupFile()` is executed **once** on the **main thread** during the main start sequence. +> +> `handleFile()` is executed on **worker threads** and is executed again during restarts. + +Parameters: + +- **urlPath** - `string` - The recommended URL path of the file +- **path** - `string` - The relative path of the directory + +- **resources** - `Object` - A collection of the currently loaded resources + +Returns: `boolean | void | Promise` + +### Protocol Extension + +A Protocol Extension is a more advanced form of a Resource Extension and is mainly used for implementing higher level protocols. For example, the [Harper Next.js Extension](https:/github.com/HarperDB/nextjs) handles building and running a Next.js project. A Protocol Extension is particularly useful for adding custom networking handlers (see the [`server`](../../technical-details/reference/globals#server) global API documentation for more information). + +#### Protocol Extension Configuration + +In addition to the `files`, `path`, and `root` [Resource Extension configuration](#resource-extension-configuration) options, and the `package` [Custom Component configuration](#custom-component-configuration) option, Protocol Extensions can also specify additional configuration options. Any options added to the extension configuration (in `config.yaml`), will be passed through to the `options` object of the `start()` and `startOnMainThread()` methods. + +For example, the [Harper Next.js Extension](https:/github.com/HarperDB/nextjs#options) specifies multiple option that can be included in its configuration. For example, a Next.js app using `@harperdb/nextjs` may specify the following `config.yaml`: + +```yaml +'@harperdb/nextjs': + package: '@harperdb/nextjs' + files: '/*' + prebuilt: true + dev: false +``` + +Many protocol extensions will use the `port` and `securePort` options for configuring networking handlers. Many of the [`server`](../../technical-details/reference/globals#server) global APIs accept `port` and `securePort` options, so components replicated this for simpler pass-through. + +#### Protocol Extension API + +A Protocol Extension is made up of two distinct methods, [`start()`](#startoptions-resourceextension--promiseresourceextension) and [`startOnMainThread()`](#startonmainthreadoptions-resourceextension--promiseresourceextension). Similar to a Resource Extension, the `start()` method is executed on _all worker threads_, and _executed again on restarts_. The `startOnMainThread()` method is **only** executed **once** during the initial system start sequence. These methods have identical `options` object parameter, and can both return a Resource Extension (i.e. an object containing one or more of the methods listed above). + +##### `start(options): ResourceExtension | Promise` +##### `startOnMainThread(options): ResourceExtension | Promise` + +Parameters: + +- **options** - `Object` - An object representation of the extension's configuration options. + +Returns: `Object` - An object that implements any of the [Resource Extension APIs](#resource-extension-api) diff --git a/site/versioned_docs/version-4.5/developers/miscellaneous/google-data-studio.md b/site/versioned_docs/version-4.5/developers/miscellaneous/google-data-studio.md new file mode 100644 index 00000000..47fd80bd --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/miscellaneous/google-data-studio.md @@ -0,0 +1,37 @@ +--- +title: Google Data Studio +--- + +# Google Data Studio + +[Google Data Studio](https:/datastudio.google.com/) is a free collaborative visualization tool which enables users to build configurable charts and tables quickly. The Harper Google Data Studio connector seamlessly integrates your Harper data with Google Data Studio so you can build custom, real-time data visualizations. + +The Harper Google Data Studio Connector is subject to our [Terms of Use](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/) and [Privacy Policy](https:/harperdb.io/legal/privacy-policy/). + +## Requirements + +The Harper database must be accessible through the Internet in order for Google Data Studio servers to access it. The database may be hosted by you or via [Harper Cloud](../../deployments/harper-cloud/). + +## Get Started + +Get started by selecting the Harper connector from the [Google Data Studio Partner Connector Gallery](https:/datastudio.google.com/u/0/datasources/create). + +1. Log in to https:/datastudio.google.com/. +1. Add a new Data Source using the Harper connector. The current release version can be added as a data source by following this link: [Harper Google Data Studio Connector](https:/datastudio.google.com/datasources/create?connectorId=AKfycbxBKgF8FI5R42WVxO-QCOq7dmUys0HJrUJMkBQRoGnCasY60_VJeO3BhHJPvdd20-S76g). +1. Authorize the connector to access other servers on your behalf (this allows the connector to contact your database). +1. Enter the Web URL to access your database (preferably with HTTPS), as well as the Basic Auth key you use to access the database. Just include the key, not the word “Basic” at the start of it. +1. Check the box for “Secure Connections Only” if you want to always use HTTPS connections for this data source; entering a Web URL that starts with https:/ will do the same thing, if you prefer. +1. Check the box for “Allow Bad Certs” if your Harper instance does not have a valid SSL certificate. [Harper Cloud](../../deployments/harper-cloud/) always has valid certificates, and so will never require this to be checked. Instances you set up yourself may require this, if you are using self-signed certs. If you are using [Harper Cloud](../../deployments/harper-cloud/) or another instance you know should always have valid SSL certificates, do not check this box. +1. Choose your Query Type. This determines what information the configuration will ask for after pressing the Next button. + * Table will ask you for a Schema and a Table to return all fields of using `SELECT *`. + * SQL will ask you for the SQL query you’re using to retrieve fields from the database. You may `JOIN` multiple tables together, and use Harper specific SQL functions, along with the usual power SQL grants. +1. When all information is entered correctly, press the Connect button in the top right of the new Data Source view to generate the Schema. You may also want to name the data source at this point. If the connector encounters any errors, a dialog box will tell you what went wrong so you can correct the issue. +1. If there are no errors, you now have a data source you can use in your reports! You may change the types of the generated fields in the Schema view if you need to (for instance, changing a Number field to a specific currency), as well as creating new fields from the report view that do calculations on other fields. + +## Considerations + +* Both Postman and the [Harper Studio](../../deployments/harper-cloud/) app have ways to convert a user:password pair to a Basic Auth token. Use either to create the token for the connector’s user. + * You may sign out of your current user by going to the instances tab in Harper Studio, then clicking on the lock icon at the top-right of a given instance’s box. Click the lock again to sign in as any user. The Basic Auth token will be visible in the Authorization header portion of any code created in the Sample Code tab. +* It’s highly recommended that you create a read-only user role in Harper Studio, and create a user with that role for your data sources to use. This prevents that authorization token from being used to alter your database, should someone else ever get ahold of it. +* The RecordCount field is intended for use as a metric, for counting how many instances of a given set of values appear in a report’s data set. +* _Do not attempt to create fields with spaces in their names_ for any data sources! Google Data Studio will crash when attempting to retrieve a field with such a name, producing a System Error instead of a useful chart on your reports. Using CamelCase or snake\_case gets around this. diff --git a/site/versioned_docs/version-4.5/developers/miscellaneous/index.md b/site/versioned_docs/version-4.5/developers/miscellaneous/index.md new file mode 100644 index 00000000..13ee450a --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/miscellaneous/index.md @@ -0,0 +1,7 @@ +--- +title: Miscellaneous +--- + +# Miscellaneous + +This section covers a grouping of reference documents for various external developer tools, packages, SDKs, etc. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/developers/miscellaneous/query-optimization.md b/site/versioned_docs/version-4.5/developers/miscellaneous/query-optimization.md new file mode 100644 index 00000000..4a2dbc6c --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/miscellaneous/query-optimization.md @@ -0,0 +1,37 @@ +--- +title: Query Optimization +--- + +## Query Optimization + +Harper has powerful query functionality with excellent performance characteristics. However, like any database, different queries can vary significantly in performance. It is important to understand how querying works to help you optimize your queries for the best performance. + +### Query Execution + +At a fundamental level, querying involves defining conditions to find matching data and then executing those conditions against the database and delivering the results based on required fields, relationships, and ordering. Harper supports indexed fields, and these indexes are used to speed up query execution. When conditions are specified in a query, Harper will attempt to utilize indexes to optimize the speed of query execution. When a field is not indexed, a query specifies a condition on that field, and the database check each potential record to determine if it matches the condition. + +When a query is performed with multiple conditions, Harper will attempt to optimize the ordering of these conditions. When using intersecting conditions (the default, an `and` operator, matching records must all match all conditions), Harper will attempt to to apply the most selective and performant condition first. This means that if one condition can use an index and is more selective than another, it will be used first to find the initial matching set of data and then filter based on the remaining conditions. If a condition can search an indexed field, with a selective condition, it will be used before conditions that aren't indexed, or as selective. The `search` method includes an `explain` flag that can be used to return a query execution order to understand how the query is being executed. This can be useful for debugging and optimizing queries. + +For a union query, each condition is executed separately and the results are combined/merged. + +### Condition, Operators, and Indexing + +When a query is performed, the conditions specified in the query are evaluated against the data in the database. The conditions can be simple or complex, and can include scalar operators such as `=`, `!=`, `>`, `<`, `>=`, `<=`, as well as `starts_with`, `contains`, and `ends_with`. The use of these operators can affect the performance of the query, especially when used with indexed fields. If an indexed field is not used, the database will have to check each potential record to determine if it matches the condition. If the only condition is not indexed, or there are no conditions with an indexed field, the database will have to check every record with a full table scan and can be very slow for large datasets (it will get slower as the dataset grows, `O(n)`). + +The use of indexed fields can significantly improve the performance of a query, providing fast performance even as the database grows in size (`O(log n)`). However, indexed fields require extra writes to the database when performing insert, update, or delete operations. This is because the index must be updated to reflect the changes in the data. This can slow down write operations, but the trade-off is often worth it if the field is frequently used in queries. + +The different operators can also affect the performance of a query. For example, using the `=` operator on an indexed field is generally faster than using the `!=` operator, as the latter requires checking all records that do not match the condition. An index is a sorted listed of values, so the greater than and less than operators will also utilize indexed fields when possible. If the range is narrow, these operations can be very fast. A wide range could yield a large number of records and will naturally incur more overhead. The `starts_with` operator can also leverage indexed fields because it quickly find the correct matching entries in the sorted index. On other hand, the `contains` and `ends_with` and not equal (`!=` or `not_equal`) operators can not leverage the indexes, so they will require a full table scan to find the matching records if they are not used in conjunction in with a selective/indexed condition. There is a special case of `!= null` which can use indexes to find non-null records. However, there is generally only helpful for sparse fields where a small subset are non-null values. More generally, operators are more efficient if they are selecting on fields with a high cardinality. + +Conditions can be applied to primary key fields or other indexed fields (known as secondary indexes). In general, querying on a primary key will be faster than querying on a secondary index, as the primary key is the most efficient way to access data in the database, and doesn't require cross-referencing to the main records. + +### Relationships/Joins + +Harper supports relationships between tables, allowing for "join" queries that. This does result in more complex queries with potentially larger performance overhead, as more lookups are necessary to connect matched or selected data with other tables. Similar principles apply to conditions which use relationships. Indexed fields and comparators that leverage the ordering are still valuable for performance. It is also important that if a condition on a table is connected to another table's foreign key, that that foreign key also be indexed. Likewise, if a query `select`s data from a related table that uses a foreign key to relate, that it is indexed. The same principles of higher cardinality applies here as well, more unique values allow for efficient lookups. + + +### Sorting +Queries can also specify a sort order. This can also significantly impact performance. If a query specifies a sort order on an indexed field, the database can use the index to quickly retrieve the data in the specified order. A sort order can be used in conjunction with a condition on the same (indexed) field can utilize the index for ordering. However, if the sort order is not on an indexed field, or the query specifies conditions on different fields, Harper will generally need to sort the data after retrieving it, which can be slow for large datasets. The same principles apply to sorting as they do to conditions. Sorting on a primary key is generally faster than sorting on a secondary index, if the condition aligns with the sort order. + +### Streaming + +One of the unique and powerful features of Harper's querying functionality is the ability to stream query results. When possible, Harper can return records from a query as they are found, rather than waiting for the entire query to complete. This can significantly improve performance for large queries, as it allows the application to start processing results or sending the initial data before the entire query is complete (improving time-to-first-byte speed, for example). However, using a sort order on a query with conditions that are not on an aligned index requires that the entire query result be loaded in order to perform the sorting, which defeats the streaming benefits. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/developers/miscellaneous/sdks.md b/site/versioned_docs/version-4.5/developers/miscellaneous/sdks.md new file mode 100644 index 00000000..13998f80 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/miscellaneous/sdks.md @@ -0,0 +1,22 @@ +--- +title: SDKs +description: >- + Software Development Kits available for connecting to Harper from different + languages. +--- + +# SDKs + +| SDK/Tool | Description | Installation | +| ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------- | +| [HarperDB.NET.Client](https:/www.nuget.org/packages/HarperDB.NET.Client) | A Dot Net Core client to execute operations against HarperDB | `dotnet add package HarperDB.NET.Client --version 1.1.0` | +| [Websocket Client](https:/www.npmjs.com/package/harperdb-websocket-client) | A Javascript client for real-time access to HarperDB transactions | `npm i -s harperdb-websocket-client` | +| [Gatsby HarperDB Source](https:/www.npmjs.com/package/gatsby-source-harperdb) | Use Harper as the data source for a Gatsby project at the build time | `npm i -s gatsby-source-harperdb` | +| [HarperDB.EntityFrameworkCore](https:/www.nuget.org/packages/HarperDB.EntityFrameworkCore) | The Harper EntityFrameworkCore Provider Package for .NET 6.0 | `dotnet add package HarperDB.EntityFrameworkCore --version 1.0.0` | +| [Python SDK](https:/pypi.org/project/harperdb/) | Python3 implementations of Harper API functions with wrappers for an object-oriented interface | `pip3 install harperdb` | +| [HarperDB Flutter SDK](https:/github.com/HarperDB/harperdb-sdk-flutter) | A Harper SDK for Flutter | `flutter pub add harperdb` | +| [React Hook](https:/www.npmjs.com/package/use-harperdb) | A ReactJS Hook for HarperDB | `npm i -s use-harperdb` | +| [Node Red Node](https:/flows.nodered.org/node/node-red-contrib-harperdb) | Easy drag and drop connections to Harper using the Node-Red platform | `npm i -s node-red-contrib-harperdb` | +| [NodeJS SDK](https:/www.npmjs.com/package/harperive) | A Harper SDK for NodeJS | `npm i -s harperive` | +| [HarperDB Cargo Crate](https:/crates.io/crates/harperdb) | A Harper SDK for Rust | `Cargo.toml > harperdb = '1.0.0'` | +| [HarperDB Go SDK](https:/github.com/HarperDB-Add-Ons/sdk-go) | A Harper SDK for Go | `go get github.com/HarperDB-Add-Ons/sdk-go` | diff --git a/site/versioned_docs/version-4.5/developers/operations-api/advanced-json-sql-examples.md b/site/versioned_docs/version-4.5/developers/operations-api/advanced-json-sql-examples.md new file mode 100644 index 00000000..61c26f47 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/advanced-json-sql-examples.md @@ -0,0 +1,1780 @@ +--- +title: Advanced JSON SQL Examples +--- + +# Advanced JSON SQL Examples + +## Create movies database +Create a new database called "movies" using the 'create_database' operation. + +_Note: Creating a database is optional, if one is not created Harper will default to using a database named `data`_ + +### Body +```json +{ + "operation": "create_database", + "database": "movies" +} +``` + +### Response: 200 +```json +{ + "message": "database 'movies' successfully created" +} +``` + +--- + +## Create movie Table +Creates a new table called "movie" inside the database "movies" using the ‘create_table’ operation. + +### Body + +```json +{ + "operation": "create_table", + "database": "movies", + "table": "movie", + "primary_key": "id" +} +``` + +### Response: 200 +```json +{ + "message": "table 'movies.movie' successfully created." +} +``` + + +--- + +## Create credits Table +Creates a new table called "credits" inside the database "movies" using the ‘create_table’ operation. + +### Body + +```json +{ + "operation": "create_table", + "database": "movies", + "table": "credits", + "primary_key": "movie_id" +} +``` + +### Response: 200 +```json +{ + "message": "table 'movies.credits' successfully created." +} +``` + + +--- + +## Bulk Insert movie Via CSV +Inserts data from a hosted CSV file into the "movie" table using the 'csv_url_load' operation. + +### Body + +```json +{ + "operation": "csv_url_load", + "database": "movies", + "table": "movie", + "csv_url": "https:/search-json-sample-data.s3.us-east-2.amazonaws.com/movie.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 1889eee4-23c1-4945-9bb7-c805fc20726c" +} +``` + + +--- + +## Bulk Insert credits Via CSV +Inserts data from a hosted CSV file into the "credits" table using the 'csv_url_load' operation. + +### Body + +```json +{ + "operation": "csv_url_load", + "database": "movies", + "table": "credits", + "csv_url": "https:/search-json-sample-data.s3.us-east-2.amazonaws.com/credits.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 3a14cd74-67f3-41e9-8ccd-45ffd0addc2c", + "job_id": "3a14cd74-67f3-41e9-8ccd-45ffd0addc2c" +} +``` + + +--- + +## View raw data +In the following example we will be running expressions on the keywords & production_companies attributes, so for context we are displaying what the raw data looks like. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT title, rank, keywords, production_companies FROM movies.movie ORDER BY rank LIMIT 10" +} +``` + +### Response: 200 +```json +[ + { + "title": "Ad Astra", + "rank": 1, + "keywords": [ + { + "id": 305, + "name": "moon" + }, + { + "id": 697, + "name": "loss of loved one" + }, + { + "id": 839, + "name": "planet mars" + }, + { + "id": 14626, + "name": "astronaut" + }, + { + "id": 157265, + "name": "moon colony" + }, + { + "id": 162429, + "name": "solar system" + }, + { + "id": 240119, + "name": "father son relationship" + }, + { + "id": 244256, + "name": "near future" + }, + { + "id": 257878, + "name": "planet neptune" + }, + { + "id": 260089, + "name": "space walk" + } + ], + "production_companies": [ + { + "id": 490, + "name": "New Regency Productions", + "origin_country": "" + }, + { + "id": 79963, + "name": "Keep Your Head", + "origin_country": "" + }, + { + "id": 73492, + "name": "MadRiver Pictures", + "origin_country": "" + }, + { + "id": 81, + "name": "Plan B Entertainment", + "origin_country": "US" + }, + { + "id": 30666, + "name": "RT Features", + "origin_country": "BR" + }, + { + "id": 30148, + "name": "Bona Film Group", + "origin_country": "CN" + }, + { + "id": 22213, + "name": "TSG Entertainment", + "origin_country": "US" + } + ] + }, + { + "title": "Extraction", + "rank": 2, + "keywords": [ + { + "id": 3070, + "name": "mercenary" + }, + { + "id": 4110, + "name": "mumbai (bombay), india" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 9730, + "name": "crime boss" + }, + { + "id": 11107, + "name": "rescue mission" + }, + { + "id": 18712, + "name": "based on graphic novel" + }, + { + "id": 265216, + "name": "dhaka (dacca), bangladesh" + } + ], + "production_companies": [ + { + "id": 106544, + "name": "AGBO", + "origin_country": "US" + }, + { + "id": 109172, + "name": "Thematic Entertainment", + "origin_country": "US" + }, + { + "id": 92029, + "name": "TGIM Films", + "origin_country": "US" + } + ] + }, + { + "title": "To the Beat! Back 2 School", + "rank": 3, + "keywords": [ + { + "id": 10873, + "name": "school" + } + ], + "production_companies": [] + }, + { + "title": "Bloodshot", + "rank": 4, + "keywords": [ + { + "id": 2651, + "name": "nanotechnology" + }, + { + "id": 9715, + "name": "superhero" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 164218, + "name": "psychotronic" + }, + { + "id": 255024, + "name": "shared universe" + }, + { + "id": 258575, + "name": "valiant comics" + } + ], + "production_companies": [ + { + "id": 34, + "name": "Sony Pictures", + "origin_country": "US" + }, + { + "id": 10246, + "name": "Cross Creek Pictures", + "origin_country": "US" + }, + { + "id": 6573, + "name": "Mimran Schur Pictures", + "origin_country": "US" + }, + { + "id": 333, + "name": "Original Film", + "origin_country": "US" + }, + { + "id": 103673, + "name": "The Hideaway Entertainment", + "origin_country": "US" + }, + { + "id": 124335, + "name": "Valiant Entertainment", + "origin_country": "US" + }, + { + "id": 5, + "name": "Columbia Pictures", + "origin_country": "US" + }, + { + "id": 1225, + "name": "One Race", + "origin_country": "US" + }, + { + "id": 30148, + "name": "Bona Film Group", + "origin_country": "CN" + } + ] + }, + { + "title": "The Call of the Wild", + "rank": 5, + "keywords": [ + { + "id": 818, + "name": "based on novel or book" + }, + { + "id": 4542, + "name": "gold rush" + }, + { + "id": 15162, + "name": "dog" + }, + { + "id": 155821, + "name": "sled dogs" + }, + { + "id": 189390, + "name": "yukon" + }, + { + "id": 207928, + "name": "19th century" + }, + { + "id": 259987, + "name": "cgi animation" + }, + { + "id": 263806, + "name": "1890s" + } + ], + "production_companies": [ + { + "id": 787, + "name": "3 Arts Entertainment", + "origin_country": "US" + }, + { + "id": 127928, + "name": "20th Century Studios", + "origin_country": "US" + }, + { + "id": 22213, + "name": "TSG Entertainment", + "origin_country": "US" + } + ] + }, + { + "title": "Sonic the Hedgehog", + "rank": 6, + "keywords": [ + { + "id": 282, + "name": "video game" + }, + { + "id": 6054, + "name": "friendship" + }, + { + "id": 10842, + "name": "good vs evil" + }, + { + "id": 41645, + "name": "based on video game" + }, + { + "id": 167043, + "name": "road movie" + }, + { + "id": 172142, + "name": "farting" + }, + { + "id": 188933, + "name": "bar fight" + }, + { + "id": 226967, + "name": "amistad" + }, + { + "id": 245230, + "name": "live action remake" + }, + { + "id": 258111, + "name": "fantasy" + }, + { + "id": 260223, + "name": "videojuego" + } + ], + "production_companies": [ + { + "id": 333, + "name": "Original Film", + "origin_country": "US" + }, + { + "id": 10644, + "name": "Blur Studios", + "origin_country": "US" + }, + { + "id": 77884, + "name": "Marza Animation Planet", + "origin_country": "JP" + }, + { + "id": 4, + "name": "Paramount", + "origin_country": "US" + }, + { + "id": 113750, + "name": "SEGA", + "origin_country": "JP" + }, + { + "id": 100711, + "name": "DJ2 Entertainment", + "origin_country": "" + }, + { + "id": 24955, + "name": "Paramount Animation", + "origin_country": "US" + } + ] + }, + { + "title": "Birds of Prey (and the Fantabulous Emancipation of One Harley Quinn)", + "rank": 7, + "keywords": [ + { + "id": 849, + "name": "dc comics" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 187056, + "name": "woman director" + }, + { + "id": 229266, + "name": "dc extended universe" + } + ], + "production_companies": [ + { + "id": 9993, + "name": "DC Entertainment", + "origin_country": "US" + }, + { + "id": 82968, + "name": "LuckyChap Entertainment", + "origin_country": "GB" + }, + { + "id": 103462, + "name": "Kroll & Co Entertainment", + "origin_country": "US" + }, + { + "id": 174, + "name": "Warner Bros. Pictures", + "origin_country": "US" + }, + { + "id": 429, + "name": "DC Comics", + "origin_country": "US" + }, + { + "id": 128064, + "name": "DC Films", + "origin_country": "US" + }, + { + "id": 101831, + "name": "Clubhouse Pictures", + "origin_country": "US" + } + ] + }, + { + "title": "Justice League Dark: Apokolips War", + "rank": 8, + "keywords": [ + { + "id": 849, + "name": "dc comics" + } + ], + "production_companies": [ + { + "id": 2785, + "name": "Warner Bros. Animation", + "origin_country": "US" + }, + { + "id": 9993, + "name": "DC Entertainment", + "origin_country": "US" + }, + { + "id": 429, + "name": "DC Comics", + "origin_country": "US" + } + ] + }, + { + "title": "Parasite", + "rank": 9, + "keywords": [ + { + "id": 1353, + "name": "underground" + }, + { + "id": 5318, + "name": "seoul" + }, + { + "id": 5732, + "name": "birthday party" + }, + { + "id": 5752, + "name": "private lessons" + }, + { + "id": 9866, + "name": "basement" + }, + { + "id": 10453, + "name": "con artist" + }, + { + "id": 11935, + "name": "working class" + }, + { + "id": 12565, + "name": "psychological thriller" + }, + { + "id": 13126, + "name": "limousine driver" + }, + { + "id": 14514, + "name": "class differences" + }, + { + "id": 14864, + "name": "rich poor" + }, + { + "id": 17997, + "name": "housekeeper" + }, + { + "id": 18015, + "name": "tutor" + }, + { + "id": 18035, + "name": "family" + }, + { + "id": 33421, + "name": "crime family" + }, + { + "id": 173272, + "name": "flood" + }, + { + "id": 188861, + "name": "smell" + }, + { + "id": 198673, + "name": "unemployed" + }, + { + "id": 237462, + "name": "wealthy family" + } + ], + "production_companies": [ + { + "id": 7036, + "name": "CJ Entertainment", + "origin_country": "KR" + }, + { + "id": 4399, + "name": "Barunson E&A", + "origin_country": "KR" + } + ] + }, + { + "title": "Star Wars: The Rise of Skywalker", + "rank": 10, + "keywords": [ + { + "id": 161176, + "name": "space opera" + } + ], + "production_companies": [ + { + "id": 1, + "name": "Lucasfilm", + "origin_country": "US" + }, + { + "id": 11461, + "name": "Bad Robot", + "origin_country": "US" + }, + { + "id": 2, + "name": "Walt Disney Pictures", + "origin_country": "US" + }, + { + "id": 120404, + "name": "British Film Commission", + "origin_country": "" + } + ] + } +] +``` + + +--- + +## Simple search_json call +This query uses search_json to convert the keywords object array to a simple string array. The expression '[name]' tells the function to extract all values for the name attribute and wrap them in an array. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT title, rank, search_json('[name]', keywords) as keywords FROM movies.movie ORDER BY rank LIMIT 10" +} +``` + +### Response: 200 +```json +[ + { + "title": "Ad Astra", + "rank": 1, + "keywords": [ + "moon", + "loss of loved one", + "planet mars", + "astronaut", + "moon colony", + "solar system", + "father son relationship", + "near future", + "planet neptune", + "space walk" + ] + }, + { + "title": "Extraction", + "rank": 2, + "keywords": [ + "mercenary", + "mumbai (bombay), india", + "based on comic", + "crime boss", + "rescue mission", + "based on graphic novel", + "dhaka (dacca), bangladesh" + ] + }, + { + "title": "To the Beat! Back 2 School", + "rank": 3, + "keywords": [ + "school" + ] + }, + { + "title": "Bloodshot", + "rank": 4, + "keywords": [ + "nanotechnology", + "superhero", + "based on comic", + "psychotronic", + "shared universe", + "valiant comics" + ] + }, + { + "title": "The Call of the Wild", + "rank": 5, + "keywords": [ + "based on novel or book", + "gold rush", + "dog", + "sled dogs", + "yukon", + "19th century", + "cgi animation", + "1890s" + ] + }, + { + "title": "Sonic the Hedgehog", + "rank": 6, + "keywords": [ + "video game", + "friendship", + "good vs evil", + "based on video game", + "road movie", + "farting", + "bar fight", + "amistad", + "live action remake", + "fantasy", + "videojuego" + ] + }, + { + "title": "Birds of Prey (and the Fantabulous Emancipation of One Harley Quinn)", + "rank": 7, + "keywords": [ + "dc comics", + "based on comic", + "woman director", + "dc extended universe" + ] + }, + { + "title": "Justice League Dark: Apokolips War", + "rank": 8, + "keywords": [ + "dc comics" + ] + }, + { + "title": "Parasite", + "rank": 9, + "keywords": [ + "underground", + "seoul", + "birthday party", + "private lessons", + "basement", + "con artist", + "working class", + "psychological thriller", + "limousine driver", + "class differences", + "rich poor", + "housekeeper", + "tutor", + "family", + "crime family", + "flood", + "smell", + "unemployed", + "wealthy family" + ] + }, + { + "title": "Star Wars: The Rise of Skywalker", + "rank": 10, + "keywords": [ + "space opera" + ] + } +] +``` + + +--- + +## Use search_json in a where clause +This example shows how we can use SEARCH_JSON to filter out records in a WHERE clause. The production_companies attribute holds an object array of companies that produced each movie, we want to only see movies which were produced by Marvel Studios. Our expression is a filter '$[name="Marvel Studios"]' this tells the function to iterate the production_companies array and only return entries where the name is "Marvel Studios". + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT title, release_date FROM movies.movie where search_json('$[name=\"Marvel Studios\"]', production_companies) IS NOT NULL ORDER BY release_date" +} +``` + +### Response: 200 +```json +[ + { + "title": "Iron Man", + "release_date": "2008-04-30" + }, + { + "title": "The Incredible Hulk", + "release_date": "2008-06-12" + }, + { + "title": "Iron Man 2", + "release_date": "2010-04-28" + }, + { + "title": "Thor", + "release_date": "2011-04-21" + }, + { + "title": "Captain America: The First Avenger", + "release_date": "2011-07-22" + }, + { + "title": "Marvel One-Shot: The Consultant", + "release_date": "2011-09-12" + }, + { + "title": "Marvel One-Shot: A Funny Thing Happened on the Way to Thor's Hammer", + "release_date": "2011-10-25" + }, + { + "title": "The Avengers", + "release_date": "2012-04-25" + }, + { + "title": "Marvel One-Shot: Item 47", + "release_date": "2012-09-13" + }, + { + "title": "Iron Man 3", + "release_date": "2013-04-18" + }, + { + "title": "Marvel One-Shot: Agent Carter", + "release_date": "2013-09-08" + }, + { + "title": "Thor: The Dark World", + "release_date": "2013-10-29" + }, + { + "title": "Marvel One-Shot: All Hail the King", + "release_date": "2014-02-04" + }, + { + "title": "Marvel Studios: Assembling a Universe", + "release_date": "2014-03-18" + }, + { + "title": "Captain America: The Winter Soldier", + "release_date": "2014-03-20" + }, + { + "title": "Guardians of the Galaxy", + "release_date": "2014-07-30" + }, + { + "title": "Avengers: Age of Ultron", + "release_date": "2015-04-22" + }, + { + "title": "Ant-Man", + "release_date": "2015-07-14" + }, + { + "title": "Captain America: Civil War", + "release_date": "2016-04-27" + }, + { + "title": "Team Thor", + "release_date": "2016-08-28" + }, + { + "title": "Doctor Strange", + "release_date": "2016-10-25" + }, + { + "title": "Guardians of the Galaxy Vol. 2", + "release_date": "2017-04-19" + }, + { + "title": "Spider-Man: Homecoming", + "release_date": "2017-07-05" + }, + { + "title": "Thor: Ragnarok", + "release_date": "2017-10-25" + }, + { + "title": "Black Panther", + "release_date": "2018-02-13" + }, + { + "title": "Avengers: Infinity War", + "release_date": "2018-04-25" + }, + { + "title": "Ant-Man and the Wasp", + "release_date": "2018-07-04" + }, + { + "title": "Captain Marvel", + "release_date": "2019-03-06" + }, + { + "title": "Avengers: Endgame", + "release_date": "2019-04-24" + }, + { + "title": "Spider-Man: Far from Home", + "release_date": "2019-06-28" + }, + { + "title": "Black Widow", + "release_date": "2020-10-28" + }, + { + "title": "Untitled Spider-Man 3", + "release_date": "2021-11-04" + }, + { + "title": "Thor: Love and Thunder", + "release_date": "2022-02-10" + }, + { + "title": "Doctor Strange in the Multiverse of Madness", + "release_date": "2022-03-23" + }, + { + "title": "Untitled Marvel Project (3)", + "release_date": "2022-07-29" + }, + { + "title": "Guardians of the Galaxy Vol. 3", + "release_date": "2023-02-16" + } +] +``` + + +--- + +## Use search_json to show the movies with the largest casts +This example shows how we can use SEARCH_JSON to perform a simple calculation on JSON and order by the results. The cast attribute holds an object array of details around the cast of a movie. We use the expression '$count(id)' that counts each id and returns the value back which we alias in SQL as cast_size which in turn gets used to sort the rows. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT movie_title, search_json('$count(id)', `cast`) as cast_size FROM movies.credits ORDER BY cast_size DESC LIMIT 10" +} +``` + +### Response: 200 +```json +[ + { + "movie_title": "Around the World in Eighty Days", + "cast_size": 312 + }, + { + "movie_title": "And the Oscar Goes To...", + "cast_size": 259 + }, + { + "movie_title": "Rock of Ages", + "cast_size": 223 + }, + { + "movie_title": "Mr. Smith Goes to Washington", + "cast_size": 213 + }, + { + "movie_title": "Les Misérables", + "cast_size": 208 + }, + { + "movie_title": "Jason Bourne", + "cast_size": 201 + }, + { + "movie_title": "The Muppets", + "cast_size": 191 + }, + { + "movie_title": "You Don't Mess with the Zohan", + "cast_size": 183 + }, + { + "movie_title": "The Irishman", + "cast_size": 173 + }, + { + "movie_title": "Spider-Man: Far from Home", + "cast_size": 173 + } +] +``` + + +--- + +## search_json as a condition, in a select with a table join +This example shows how we can use SEARCH_JSON to find movies where at least of 2 our favorite actors from Marvel films have acted together then list the movie, its overview, release date, and the actors names and their characters. The WHERE clause performs a count on credits.cast attribute that have the matching actors. The SELECT performs the same filter on the cast attribute and performs a transform on each object to just return the actor's name and their character. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT m.title, m.overview, m.release_date, search_json('$[name in [\"Robert Downey Jr.\", \"Chris Evans\", \"Scarlett Johansson\", \"Mark Ruffalo\", \"Chris Hemsworth\", \"Jeremy Renner\", \"Clark Gregg\", \"Samuel L. Jackson\", \"Gwyneth Paltrow\", \"Don Cheadle\"]].{\"actor\": name, \"character\": character}', c.`cast`) as characters FROM movies.credits c INNER JOIN movies.movie m ON c.movie_id = m.id WHERE search_json('$count($[name in [\"Robert Downey Jr.\", \"Chris Evans\", \"Scarlett Johansson\", \"Mark Ruffalo\", \"Chris Hemsworth\", \"Jeremy Renner\", \"Clark Gregg\", \"Samuel L. Jackson\", \"Gwyneth Paltrow\", \"Don Cheadle\"]])', c.`cast`) >= 2" +} +``` + +### Response: 200 +```json +[ + { + "title": "Out of Sight", + "overview": "Meet Jack Foley, a smooth criminal who bends the law and is determined to make one last heist. Karen Sisco is a federal marshal who chooses all the right moves … and all the wrong guys. Now they're willing to risk it all to find out if there's more between them than just the law.", + "release_date": "1998-06-26", + "characters": [ + { + "actor": "Don Cheadle", + "character": "Maurice Miller" + }, + { + "actor": "Samuel L. Jackson", + "character": "Hejira Henry (uncredited)" + } + ] + }, + { + "title": "Iron Man", + "overview": "After being held captive in an Afghan cave, billionaire engineer Tony Stark creates a unique weaponized suit of armor to fight evil.", + "release_date": "2008-04-30", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "Captain America: The First Avenger", + "overview": "During World War II, Steve Rogers is a sickly man from Brooklyn who's transformed into super-soldier Captain America to aid in the war effort. Rogers must stop the Red Skull – Adolf Hitler's ruthless head of weaponry, and the leader of an organization that intends to use a mysterious device of untold powers for world domination.", + "release_date": "2011-07-22", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "In Good Company", + "overview": "Dan Foreman is a seasoned advertisement sales executive at a high-ranking publication when a corporate takeover results in him being placed under naive supervisor Carter Duryea, who is half his age. Matters are made worse when Dan's new supervisor becomes romantically involved with his daughter an 18 year-old college student Alex.", + "release_date": "2004-12-29", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Alex Foreman" + }, + { + "actor": "Clark Gregg", + "character": "Mark Steckle" + } + ] + }, + { + "title": "Zodiac", + "overview": "The true story of the investigation of the \"Zodiac Killer\", a serial killer who terrified the San Francisco Bay Area, taunting police with his ciphers and letters. The case becomes an obsession for three men as their lives and careers are built and destroyed by the endless trail of clues.", + "release_date": "2007-03-02", + "characters": [ + { + "actor": "Mark Ruffalo", + "character": "Dave Toschi" + }, + { + "actor": "Robert Downey Jr.", + "character": "Paul Avery" + } + ] + }, + { + "title": "Hard Eight", + "overview": "A stranger mentors a young Reno gambler who weds a hooker and befriends a vulgar casino regular.", + "release_date": "1996-02-28", + "characters": [ + { + "actor": "Gwyneth Paltrow", + "character": "Clementine" + }, + { + "actor": "Samuel L. Jackson", + "character": "Jimmy" + } + ] + }, + { + "title": "The Spirit", + "overview": "Down these mean streets a man must come. A hero born, murdered, and born again. A Rookie cop named Denny Colt returns from the beyond as The Spirit, a hero whose mission is to fight against the bad forces from the shadows of Central City. The Octopus, who kills anyone unfortunate enough to see his face, has other plans; he is going to wipe out the entire city.", + "release_date": "2008-12-25", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Silken Floss" + }, + { + "actor": "Samuel L. Jackson", + "character": "Octopuss" + } + ] + }, + { + "title": "S.W.A.T.", + "overview": "Hondo Harrelson recruits Jim Street to join an elite unit of the Los Angeles Police Department. Together they seek out more members, including tough Deke Kay and single mom Chris Sanchez. The team's first big assignment is to escort crime boss Alex Montel to prison. It seems routine, but when Montel offers a huge reward to anyone who can break him free, criminals of various stripes step up for the prize.", + "release_date": "2003-08-08", + "characters": [ + { + "actor": "Samuel L. Jackson", + "character": "Sgt. Dan 'Hondo' Harrelson" + }, + { + "actor": "Jeremy Renner", + "character": "Brian Gamble" + } + ] + }, + { + "title": "Iron Man 2", + "overview": "With the world now aware of his dual life as the armored superhero Iron Man, billionaire inventor Tony Stark faces pressure from the government, the press and the public to share his technology with the military. Unwilling to let go of his invention, Stark, with Pepper Potts and James 'Rhodey' Rhodes at his side, must forge new alliances – and confront powerful enemies.", + "release_date": "2010-04-28", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Scarlett Johansson", + "character": "Natalie Rushman / Natasha Romanoff / Black Widow" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + } + ] + }, + { + "title": "Thor", + "overview": "Against his father Odin's will, The Mighty Thor - a powerful but arrogant warrior god - recklessly reignites an ancient war. Thor is cast down to Earth and forced to live among humans as punishment. Once here, Thor learns what it takes to be a true hero when the most dangerous villain of his world sends the darkest forces of Asgard to invade Earth.", + "release_date": "2011-04-21", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye (uncredited)" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + } + ] + }, + { + "title": "View from the Top", + "overview": "A small-town woman tries to achieve her goal of becoming a flight attendant.", + "release_date": "2003-03-21", + "characters": [ + { + "actor": "Gwyneth Paltrow", + "character": "Donna" + }, + { + "actor": "Mark Ruffalo", + "character": "Ted Stewart" + } + ] + }, + { + "title": "The Nanny Diaries", + "overview": "A college graduate goes to work as a nanny for a rich New York family. Ensconced in their home, she has to juggle their dysfunction, a new romance, and the spoiled brat in her charge.", + "release_date": "2007-08-24", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Annie Braddock" + }, + { + "actor": "Chris Evans", + "character": "Hayden \"Harvard Hottie\"" + } + ] + }, + { + "title": "The Perfect Score", + "overview": "Six high school seniors decide to break into the Princeton Testing Center so they can steal the answers to their upcoming SAT tests and all get perfect scores.", + "release_date": "2004-01-30", + "characters": [ + { + "actor": "Chris Evans", + "character": "Kyle" + }, + { + "actor": "Scarlett Johansson", + "character": "Francesca Curtis" + } + ] + }, + { + "title": "The Avengers", + "overview": "When an unexpected enemy emerges and threatens global safety and security, Nick Fury, director of the international peacekeeping agency known as S.H.I.E.L.D., finds himself in need of a team to pull the world back from the brink of disaster. Spanning the globe, a daring recruitment effort begins!", + "release_date": "2012-04-25", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + } + ] + }, + { + "title": "Iron Man 3", + "overview": "When Tony Stark's world is torn apart by a formidable terrorist called the Mandarin, he starts an odyssey of rebuilding and retribution.", + "release_date": "2013-04-18", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / Iron Patriot" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner (uncredited)" + } + ] + }, + { + "title": "Marvel One-Shot: The Consultant", + "overview": "Agent Coulson informs Agent Sitwell that the World Security Council wishes Emil Blonsky to be released from prison to join the Avengers Initiative. As Nick Fury doesn't want to release Blonsky, the two agents decide to send a patsy to sabotage the meeting...", + "release_date": "2011-09-12", + "characters": [ + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark (archive footage)" + } + ] + }, + { + "title": "Thor: The Dark World", + "overview": "Thor fights to restore order across the cosmos… but an ancient race led by the vengeful Malekith returns to plunge the universe back into darkness. Faced with an enemy that even Odin and Asgard cannot withstand, Thor must embark on his most perilous and personal journey yet, one that will reunite him with Jane Foster and force him to sacrifice everything to save us all.", + "release_date": "2013-10-29", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Chris Evans", + "character": "Loki as Captain America (uncredited)" + } + ] + }, + { + "title": "Avengers: Age of Ultron", + "overview": "When Tony Stark tries to jumpstart a dormant peacekeeping program, things go awry and Earth’s Mightiest Heroes are put to the ultimate test as the fate of the planet hangs in the balance. As the villainous Ultron emerges, it is up to The Avengers to stop him from enacting his terrible plans, and soon uneasy alliances and unexpected action pave the way for an epic and unique global adventure.", + "release_date": "2015-04-22", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + } + ] + }, + { + "title": "Captain America: The Winter Soldier", + "overview": "After the cataclysmic events in New York with The Avengers, Steve Rogers, aka Captain America is living quietly in Washington, D.C. and trying to adjust to the modern world. But when a S.H.I.E.L.D. colleague comes under attack, Steve becomes embroiled in a web of intrigue that threatens to put the world at risk. Joining forces with the Black Widow, Captain America struggles to expose the ever-widening conspiracy while fighting off professional assassins sent to silence him at every turn. When the full scope of the villainous plot is revealed, Captain America and the Black Widow enlist the help of a new ally, the Falcon. However, they soon find themselves up against an unexpected and formidable enemy—the Winter Soldier.", + "release_date": "2014-03-20", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + } + ] + }, + { + "title": "Thanks for Sharing", + "overview": "A romantic comedy that brings together three disparate characters who are learning to face a challenging and often confusing world as they struggle together against a common demon—sex addiction.", + "release_date": "2013-09-19", + "characters": [ + { + "actor": "Mark Ruffalo", + "character": "Adam" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Phoebe" + } + ] + }, + { + "title": "Chef", + "overview": "When Chef Carl Casper suddenly quits his job at a prominent Los Angeles restaurant after refusing to compromise his creative integrity for its controlling owner, he is left to figure out what's next. Finding himself in Miami, he teams up with his ex-wife, his friend and his son to launch a food truck. Taking to the road, Chef Carl goes back to his roots to reignite his passion for the kitchen -- and zest for life and love.", + "release_date": "2014-05-08", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Molly" + }, + { + "actor": "Robert Downey Jr.", + "character": "Marvin" + } + ] + }, + { + "title": "Marvel Studios: Assembling a Universe", + "overview": "A look at the story behind Marvel Studios and the Marvel Cinematic Universe, featuring interviews and behind-the-scenes footage from all of the Marvel films, the Marvel One-Shots and \"Marvel's Agents of S.H.I.E.L.D.\"", + "release_date": "2014-03-18", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Himself / Tony Stark / Iron Man" + }, + { + "actor": "Chris Hemsworth", + "character": "Himself / Thor" + }, + { + "actor": "Chris Evans", + "character": "Himself / Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Himself / Bruce Banner / Hulk" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Herself" + }, + { + "actor": "Clark Gregg", + "character": "Himself" + }, + { + "actor": "Samuel L. Jackson", + "character": "Himself" + }, + { + "actor": "Scarlett Johansson", + "character": "Herself" + }, + { + "actor": "Jeremy Renner", + "character": "Himself" + } + ] + }, + { + "title": "Captain America: Civil War", + "overview": "Following the events of Age of Ultron, the collective governments of the world pass an act designed to regulate all superhuman activity. This polarizes opinion amongst the Avengers, causing two factions to side with Iron Man or Captain America, which causes an epic battle between former allies.", + "release_date": "2016-04-27", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + } + ] + }, + { + "title": "Thor: Ragnarok", + "overview": "Thor is imprisoned on the other side of the universe and finds himself in a race against time to get back to Asgard to stop Ragnarok, the destruction of his home-world and the end of Asgardian civilization, at the hands of an all-powerful new threat, the ruthless Hela.", + "release_date": "2017-10-25", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / Hulk" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow (archive footage / uncredited)" + } + ] + }, + { + "title": "Avengers: Endgame", + "overview": "After the devastating events of Avengers: Infinity War, the universe is in ruins due to the efforts of the Mad Titan, Thanos. With the help of remaining allies, the Avengers must assemble once more in order to undo Thanos' actions and restore order to the universe once and for all, no matter what consequences may be in store.", + "release_date": "2019-04-24", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Don Cheadle", + "character": "James Rhodes / War Machine" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Pepper Potts" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "Avengers: Infinity War", + "overview": "As the Avengers and their allies have continued to protect the world from threats too large for any one hero to handle, a new danger has emerged from the cosmic shadows: Thanos. A despot of intergalactic infamy, his goal is to collect all six Infinity Stones, artifacts of unimaginable power, and use them to inflict his twisted will on all of reality. Everything the Avengers have fought for has led up to this moment - the fate of Earth and existence itself has never been more uncertain.", + "release_date": "2018-04-25", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + } + ] + }, + { + "title": "Captain Marvel", + "overview": "The story follows Carol Danvers as she becomes one of the universe’s most powerful heroes when Earth is caught in the middle of a galactic war between two alien races. Set in the 1990s, Captain Marvel is an all-new adventure from a previously unseen period in the history of the Marvel Cinematic Universe.", + "release_date": "2019-03-06", + "characters": [ + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Agent Phil Coulson" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America (uncredited)" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow (uncredited)" + }, + { + "actor": "Don Cheadle", + "character": "James 'Rhodey' Rhodes / War Machine (uncredited)" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk (uncredited)" + } + ] + }, + { + "title": "Spider-Man: Homecoming", + "overview": "Following the events of Captain America: Civil War, Peter Parker, with the help of his mentor Tony Stark, tries to balance his life as an ordinary high school student in Queens, New York City, with fighting crime as his superhero alter ego Spider-Man as a new threat, the Vulture, emerges.", + "release_date": "2017-07-05", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + } + ] + }, + { + "title": "Team Thor", + "overview": "Discover what Thor was up to during the events of Captain America: Civil War.", + "release_date": "2016-08-28", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner" + } + ] + }, + { + "title": "Black Widow", + "overview": "Natasha Romanoff, also known as Black Widow, confronts the darker parts of her ledger when a dangerous conspiracy with ties to her past arises. Pursued by a force that will stop at nothing to bring her down, Natasha must deal with her history as a spy and the broken relationships left in her wake long before she became an Avenger.", + "release_date": "2020-10-28", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + } + ] + } +] +``` diff --git a/site/versioned_docs/version-4.5/developers/operations-api/bulk-operations.md b/site/versioned_docs/version-4.5/developers/operations-api/bulk-operations.md new file mode 100644 index 00000000..836087d3 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/bulk-operations.md @@ -0,0 +1,136 @@ +--- +title: Bulk Operations +--- + +# Bulk Operations + +## CSV Data Load +Ingests CSV data, provided directly in the operation as an `insert`, `update` or `upsert` into the specified database table. + +* operation _(required)_ - must always be `csv_data_load` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* data _(required)_ - csv data to import into Harper + +### Body +```json +{ + "operation": "csv_data_load", + "database": "dev", + "action": "insert", + "table": "breed", + "data": "id,name,section,country,image\n1,ENGLISH POINTER,British and Irish Pointers and Setters,GREAT BRITAIN,http:/www.fci.be/Nomenclature/Illustrations/001g07.jpg\n2,ENGLISH SETTER,British and Irish Pointers and Setters,GREAT BRITAIN,http:/www.fci.be/Nomenclature/Illustrations/002g07.jpg\n3,KERRY BLUE TERRIER,Large and medium sized Terriers,IRELAND,\n" +} +``` + +### Response: 200 +```json + { + "message": "Starting job with id 2fe25039-566e-4670-8bb3-2db3d4e07e69", + "job_id": "2fe25039-566e-4670-8bb3-2db3d4e07e69" + } +``` + +--- + +## CSV File Load +Ingests CSV data, provided via a path on the local filesystem, as an `insert`, `update` or `upsert` into the specified database table. + +_Note: The CSV file must reside on the same machine on which Harper is running. For example, the path to a CSV on your computer will produce an error if your Harper instance is a cloud instance._ + +* operation _(required)_ - must always be `csv_file_load` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* file_path _(required)_ - path to the csv file on the host running Harper + +### Body +```json +{ + "operation": "csv_file_load", + "action": "insert", + "database": "dev", + "table": "breed", + "file_path": "/home/user/imports/breeds.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 3994d8e2-ec6a-43c4-8563-11c1df81870e", + "job_id": "3994d8e2-ec6a-43c4-8563-11c1df81870e" +} +``` + +--- + +## CSV URL Load +Ingests CSV data, provided via URL, as an `insert`, `update` or `upsert` into the specified database table. + +* operation _(required)_ - must always be `csv_url_load` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* csv_url _(required)_ - URL to the csv + +### Body +```json +{ + "operation": "csv_url_load", + "action": "insert", + "database": "dev", + "table": "breed", + "csv_url": "https:/s3.amazonaws.com/complimentarydata/breeds.csv" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 332aa0a2-6833-46cd-88a6-ae375920436a", + "job_id": "332aa0a2-6833-46cd-88a6-ae375920436a" +} +``` + +--- + +## Import from S3 +This operation allows users to import CSV or JSON files from an AWS S3 bucket as an `insert`, `update` or `upsert`. + +* operation _(required)_ - must always be `import_from_s3` +* action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +* database _(optional)_ - name of the database where you are loading your data. The default is `data` +* table _(required)_ - name of the table where you are loading your data +* s3 _(required)_ - object containing required AWS S3 bucket info for operation: + * aws_access_key_id - AWS access key for authenticating into your S3 bucket + * aws_secret_access_key - AWS secret for authenticating into your S3 bucket + * bucket - AWS S3 bucket to import from + * key - the name of the file to import - _the file must include a valid file extension ('.csv' or '.json')_ + * region - the region of the bucket + +### Body +```json +{ + "operation": "import_from_s3", + "action": "insert", + "database": "dev", + "table": "dog", + "s3": { + "aws_access_key_id": "YOUR_KEY", + "aws_secret_access_key": "YOUR_SECRET_KEY", + "bucket": "BUCKET_NAME", + "key": "OBJECT_NAME", + "region": "BUCKET_REGION" + } +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 062a1892-6a0a-4282-9791-0f4c93b12e16", + "job_id": "062a1892-6a0a-4282-9791-0f4c93b12e16" +} +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/developers/operations-api/clustering-nats.md b/site/versioned_docs/version-4.5/developers/operations-api/clustering-nats.md new file mode 100644 index 00000000..a1157bea --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/clustering-nats.md @@ -0,0 +1,457 @@ +--- +title: Clustering using NATS +--- + +# Clustering using NATS + +## Cluster Set Routes +Adds a route/routes to either the hub or leaf server cluster configuration. This operation behaves as a PATCH/upsert, meaning it will add new routes to the configuration while leaving existing routes untouched. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_set_routes` +* server _(required)_ - must always be `hub` or `leaf`, in most cases you should use `hub` here +* routes _(required)_ - must always be an objects array with a host and port: + * host - the host of the remote instance you are clustering to + * port - the clustering port of the remote instance you are clustering to, in most cases this is the value in `clustering.hubServer.cluster.network.port` on the remote instance `harperdb-config.yaml` + +### Body +```json +{ + "operation": "cluster_set_routes", + "server": "hub", + "routes": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "cluster routes successfully set", + "set": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "skipped": [] +} +``` + +--- + +## Cluster Get Routes +Gets all the hub and leaf server routes from the config file. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_get_routes` + +### Body +```json +{ + "operation": "cluster_get_routes" +} +``` + +### Response: 200 +```json +{ + "hub": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "leaf": [] +} +``` + +--- + +## Cluster Delete Routes +Removes route(s) from hub and/or leaf server routes array in config file. Returns a deletion success message and arrays of deleted and skipped records. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_delete_routes` +* routes _required_ - Must be an array of route object(s) + +### Body + +```json +{ + "operation": "cluster_delete_routes", + "routes": [ + { + "host": "18.116.24.71", + "port": 12345 + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "cluster routes successfully deleted", + "deleted": [ + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "skipped": [] +} +``` + + +--- + +## Add Node +Registers an additional Harper instance with associated subscriptions. Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_node` +* node_name _(required)_ - the node name of the remote node +* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `schema`, `table`, `subscribe` and `publish`: + * schema - the schema to replicate from + * table - the table to replicate from + * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + * publish - a boolean which determines if transactions on the local table should be replicated on the remote table + * start_time _(optional)_ - How far back to go to get transactions from node being added. Must be in UTC YYYY-MM-DDTHH:mm:ss.sssZ format + +### Body +```json +{ + "operation": "add_node", + "node_name": "ec2-3-22-181-22", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": false, + "publish": true, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "Successfully added 'ec2-3-22-181-22' to manifest" +} +``` + +--- + +## Update Node +Modifies an existing Harper instance registration and associated subscriptions. This operation behaves as a PATCH/upsert, meaning it will insert or update the specified replication configurations while leaving other table replication configuration untouched. Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `update_node` +* node_name _(required)_ - the node name of the remote node you are updating +* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `schema`, `table`, `subscribe` and `publish`: + * schema - the schema to replicate from + * table - the table to replicate from + * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + * publish - a boolean which determines if transactions on the local table should be replicated on the remote table + * start_time _(optional)_ - How far back to go to get transactions from node being added. Must be in UTC YYYY-MM-DDTHH:mm:ss.sssZ format + +### Body +```json +{ + "operation": "update_node", + "node_name": "ec2-18-223-239-195", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": false, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "Successfully updated 'ec2-3-22-181-22'" +} +``` + +--- + +## Set Node Replication +A more adeptly named alias for add and update node. This operation behaves as a PATCH/upsert, meaning it will insert or update the specified replication configurations while leaving other table replication configuration untouched. The `database` (aka `schema`) parameter is optional, it will default to `data`. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_node_replication` +* node_name _(required)_ - the node name of the remote node you are updating +* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and `table`, `subscribe` and `publish`: + * database *(optional)* - the database to replicate from + * table *(required)* - the table to replicate from + * subscribe *(required)* - a boolean which determines if transactions on the remote table should be replicated on the local table + * publish *(required)* - a boolean which determines if transactions on the local table should be replicated on the remote table +* +### Body +```json +{ + "operation": "set_node_replication", + "node_name": "node1", + "subscriptions": [ + { + "table": "dog", + "subscribe": true, + "publish": true + } + ] +} +``` +### Response: 200 +```json +{ + "message": "Successfully updated 'ec2-3-22-181-22'" +} +``` + +--- + +## Cluster Status +Returns an array of status objects from a cluster. A status object will contain the clustering node name, whether or not clustering is enabled, and a list of possible connections. Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `cluster_status` + +### Body +```json +{ + "operation": "cluster_status" +} +``` + +### Response: 200 +```json +{ + "node_name": "ec2-18-221-143-69", + "is_enabled": true, + "connections": [ + { + "node_name": "ec2-3-22-181-22", + "status": "open", + "ports": { + "clustering": 12345, + "operations_api": 9925 + }, + "latency_ms": 13, + "uptime": "30d 1h 18m 8s", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ] + } + ] +} +``` + + +--- + +## Cluster Network +Returns an object array of enmeshed nodes. Each node object will contain the name of the node, the amount of time (in milliseconds) it took for it to respond, the names of the nodes it is enmeshed with and the routes set in its config file. Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_- must always be `cluster_network` +* timeout (_optional_) - the amount of time in milliseconds to wait for a response from the network. Must be a number +* connected_nodes (_optional_) - omit `connected_nodes` from the response. Must be a boolean. Defaults to `false` +* routes (_optional_) - omit `routes` from the response. Must be a boolean. Defaults to `false` + +### Body + +```json +{ + "operation": "cluster_network" +} +``` + +### Response: 200 +```json +{ + "nodes": [ + { + "name": "local_node", + "response_time": 4, + "connected_nodes": ["ec2-3-142-255-78"], + "routes": [ + { + "host": "3.142.255.78", + "port": 9932 + } + ] + }, + { + "name": "ec2-3-142-255-78", + "response_time": 57, + "connected_nodes": ["ec2-3-12-153-124", "ec2-3-139-236-138", "local_node"], + "routes": [] + } + ] +} +``` + +--- + +## Remove Node +Removes a Harper instance and associated subscriptions from the cluster. Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `remove_node` +* name _(required)_ - The name of the node you are de-registering + +### Body +```json +{ + "operation": "remove_node", + "node_name": "ec2-3-22-181-22" +} +``` + +### Response: 200 +```json +{ + "message": "Successfully removed 'ec2-3-22-181-22' from manifest" +} +``` + +--- + +## Configure Cluster +Bulk create/remove subscriptions for any number of remote nodes. Resets and replaces any existing clustering setup. +Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `configure_cluster` +* connections _(required)_ - must be an object array with each object containing `node_name` and `subscriptions` for that node + +### Body +```json +{ + "operation": "configure_cluster", + "connections": [ + { + "node_name": "ec2-3-137-184-8", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": false + } + ] + }, + { + "node_name": "ec2-18-223-239-195", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": true + } + ] + } + ] +} +``` + +### Response: 200 +```json +{ + "message": "Cluster successfully configured." +} +``` + +--- + +## Purge Stream + +Will purge messages from a stream + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `purge_stream` +* database _(required)_ - the name of the database where the streams table resides +* table _(required)_ - the name of the table that belongs to the stream +* options _(optional)_ - control how many messages get purged. Options are: + * `keep` - purge will keep this many most recent messages + * `seq` - purge all messages up to, but not including, this sequence + +### Body +```json +{ + "operation": "purge_stream", + "database": "dev", + "table": "dog", + "options": { + "keep": 100 + } +} +``` + +--- \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/developers/operations-api/clustering.md b/site/versioned_docs/version-4.5/developers/operations-api/clustering.md new file mode 100644 index 00000000..05963edc --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/clustering.md @@ -0,0 +1,355 @@ +--- +title: Clustering +--- + +# Clustering + +The following operations are available for configuring and managing [Harper replication](../replication/).\ + + +_**If you are using NATS for clustering, please see the**_ [_**NATS Clustering Operations**_](./clustering-nats) _**documentation.**_ + +## Add Node + +Adds a new Harper instance to the cluster. If `subscriptions` are provided, it will also create the replication relationships between the nodes. If they are not provided a fully replicating system will be created. [Learn more about adding nodes here](../replication/). + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `add_node` +* hostname or url _(required)_ - one of these fields is required. You must provide either the `hostname` or the `url` of the node you want to add +* verify\_tls _(optional)_ - a boolean which determines if the TLS certificate should be verified. This will allow the Harper default self-signed certificates to be accepted. Defaults to `true` +* authorization _(optional)_ - an object or a string which contains the authorization information for the node being added. If it is an object, it should contain `username` and `password` fields. If it is a string, it should use HTTP `Authorization` style credentials +* retain_authorization _(optional)_ - a boolean which determines if the authorization credentials should be retained/stored and used everytime a connection is made to this node. If `true`, the authorization will be stored on the node record. Generally this should not be used, as mTLS/certificate based authorization is much more secure and safe, and avoids the need for storing credentials. Defaults to `false`. +* revoked_certificates _(optional)_ - an array of revoked certificates serial numbers. If a certificate is revoked, it will not be accepted for any connections. +* shard _(optional)_ - a number which can be used to indicate which shard this node belongs to. This is only needed if you are using sharding. +* subscriptions _(optional)_ - The relationship created between nodes. If not provided a fully replicated cluster will be setup. Must be an object array and include `database`, `table`, `subscribe` and `publish`: + * database - the database to replicate + * table - the table to replicate + * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + * publish - a boolean which determines if transactions on the local table should be replicated on the remote table + +### Body + +```json +{ + "operation": "add_node", + "hostname": "server-two", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password" + } +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully added 'server-two' to cluster" +} +``` + +*** + +## Update Node + +Modifies an existing Harper instance in the cluster. + +_Operation is restricted to super\_user roles only_ + +_Note: will attempt to add the node if it does not exist_ + +* operation _(required)_ - must always be `update_node` +* hostname _(required)_ - the `hostname` of the remote node you are updating +* revoked_certificates _(optional)_ - an array of revoked certificates serial numbers. If a certificate is revoked, it will not be accepted for any connections. +* shard _(optional)_ - a number which can be used to indicate which shard this node belongs to. This is only needed if you are using sharding. +* subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `database`, `table`, `subscribe` and `publish`: + * database - the database to replicate from + * table - the table to replicate from + * subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + * publish - a boolean which determines if transactions on the local table should be replicated on the remote table + +### Body + +```json +{ + "operation": "update_node", + "hostname": "server-two", + "subscriptions": [ + { + "database": "dev", + "table": "my-table", + "subscribe": true, + "publish": true + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully updated 'server-two'" +} +``` + +*** + +## Remove Node + +Removes a Harper node from the cluster and stops replication, [Learn more about remove node here](../replication/). + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `remove_node` +* name _(required)_ - The name of the node you are removing + +### Body + +```json +{ + "operation": "remove_node", + "hostname": "server-two" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully removed 'server-two' from cluster" +} +``` + +*** + +## Cluster Status + +Returns an array of status objects from a cluster. + +`database_sockets` shows the actual websocket connections that exist between nodes. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `cluster_status` + +### Body + +```json +{ + "operation": "cluster_status" +} +``` + +### Response: 200 + +```json +{ + "type": "cluster-status", + "connections": [ + { + "replicateByDefault": true, + "replicates": true, + "url": "wss:/server-2.domain.com:9933", + "name": "server-2.domain.com", + "subscriptions": null, + "database_sockets": [ + { + "database": "data", + "connected": true, + "latency": 0.70, + "thread_id": 1, + "nodes": [ + "server-2.domain.com" + ], + "lastCommitConfirmed": "Wed, 12 Feb 2025 19:09:34 GMT", + "lastReceivedRemoteTime": "Wed, 12 Feb 2025 16:49:29 GMT", + "lastReceivedLocalTime": "Wed, 12 Feb 2025 16:50:59 GMT", + "lastSendTime": "Wed, 12 Feb 2025 16:50:59 GMT" + }, + } + ], + "node_name": "server-1.domain.com", + "is_enabled": true +} +``` +There is a separate socket for each database for each node. Each node is represented in the connections array, and each database connection to that node is represented in the `database_sockets` array. Additional timing statistics include: +* `lastCommitConfirmed`: When a commit is sent out, it should receive a confirmation from the remote server; this is the last receipt of confirmation of an outgoing commit. +* `lastReceivedRemoteTime`: This is the timestamp of the transaction that was last received. The timestamp is from when the original transaction occurred. +* `lastReceivedLocalTime`: This is local time when the last transaction was received. If there is a different between this and `lastReceivedRemoteTime`, it means there is a delay from the original transaction to * receiving it and so it is probably catching-up/behind. +* `sendingMessage`: The timestamp of transaction is actively being sent. This won't exist if the replicator is waiting for the next transaction to send. + +*** + +## Configure Cluster + +Bulk create/remove subscriptions for any number of remote nodes. Resets and replaces any existing clustering setup. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `configure_cluster` +* connections _(required)_ - must be an object array with each object following the `add_node` schema. + +### Body + +```json +{ + "operation": "configure_cluster", + "connections": [ + { + "hostname": "server-two", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password2" + }, + "subscriptions": [ + { + "schema": "dev", + "table": "my-table", + "subscribe": true, + "publish": false + } + ] + }, + { + "hostname": "server-three", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password3" + }, + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": true + } + ] + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "Cluster successfully configured." +} +``` + +*** + +## Cluster Set Routes + +Adds a route/routes to the `replication.routes` configuration. This operation behaves as a PATCH/upsert, meaning it will add new routes to the configuration while leaving existing routes untouched. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `cluster_set_routes` +* routes _(required)_ - the routes field is an array that specifies the routes for clustering. Each element in the array can be either a string or an object with `hostname` and `port` properties. + +### Body + +```json +{ + "operation": "cluster_set_routes", + "routes": [ + "wss:/server-two:9925", + { + "hostname": "server-three", + "port": 9930 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "cluster routes successfully set", + "set": [ + "wss:/server-two:9925", + { + "hostname": "server-three", + "port": 9930 + } + ], + "skipped": [] +} +``` + +*** + +## Cluster Get Routes + +Gets the replication routes from the Harper config file. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `cluster_get_routes` + +### Body + +```json +{ + "operation": "cluster_get_routes" +} +``` + +### Response: 200 + +```json +[ + "wss:/server-two:9925", + { + "hostname": "server-three", + "port": 9930 + } +] +``` + +*** + +## Cluster Delete Routes + +Removes route(s) from the Harper config file. Returns a deletion success message and arrays of deleted and skipped records. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `cluster_delete_routes` +* routes _required_ - Must be an array of route object(s) + +### Body + +```json +{ + "operation": "cluster_delete_routes", + "routes": [ + { + "hostname": "server-three", + "port": 9930 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "cluster routes successfully deleted", + "deleted": [ + { + "hostname": "server-three", + "port": 9930 + } + ], + "skipped": [] +} +``` diff --git a/site/versioned_docs/version-4.5/developers/operations-api/components.md b/site/versioned_docs/version-4.5/developers/operations-api/components.md new file mode 100644 index 00000000..8d3c79b4 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/components.md @@ -0,0 +1,519 @@ +--- +title: Components +--- + +# Components + +## Add Component + +Creates a new component project in the component root directory using a predefined template. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `add_component` +* project _(required)_ - the name of the project you wish to create +* replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "add_component", + "project": "my-component" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully added project: my-component" +} +``` + +*** + +## Deploy Component + +Will deploy a component using either a base64-encoded string representation of a `.tar` file (the output from `package_component`) or a package value, which can be any valid NPM reference, such as a GitHub repo, an NPM package, a tarball, a local directory or a website. + +If deploying with the `payload` option, Harper will decrypt the base64-encoded string, reconstitute the .tar file of your project folder, and extract it to the component root project directory. + +If deploying with the `package` option, the package value will be written to `harperdb-config.yaml`. Then npm install will be utilized to install the component in the `node_modules` directory located in the hdb root. The value is a package reference, which should generally be a [URL reference, as described here](https:/docs.npmjs.com/cli/v10/configuring-npm/package-json#urls-as-dependencies) (it is also possible to include NPM registerd packages and file paths). URL package references can directly reference tarballs that can be installed as a package. However, the most common and recommended usage is to install from a Git repository, which can be combined with a tag to deploy a specific version directly from versioned source control. When using tags, we highly recommend that you use the `semver` directive to ensure consistent and reliable installation by NPM. In addition to tags, you can also reference branches or commit numbers. Here is an example URL package reference to a (public) Git repository that doesn't require authentication: + +``` +https:/github.com/HarperDB/application-template#semver:v1.0.0 +``` + +or this can be shortened to: + +``` +HarperDB/application-template#semver:v1.0.0 +``` + +You can also install from private repository if you have an installed SSH keys on the server: + +``` +git+ssh:/git@github.com:my-org/my-app.git#semver:v1.0.0 +``` + +Or you can use a Github token: + +``` +https:/@github.com/my-org/my-app#semver:v1.0.0 +``` + +Or you can use a GitLab Project Access Token: + +``` +https:/my-project:@gitlab.com/my-group/my-project#semver:v1.0.0 +``` + +Note that your component will be installed by NPM. If your component has dependencies, NPM will attempt to download and install these as well. NPM normally uses the public registry.npmjs.org registry. If you are installing without network access to this, you may wish to define [custom registry locations](https:/docs.npmjs.com/cli/v8/configuring-npm/npmrc) if you have any dependencies that need to be installed. NPM will install the deployed component and any dependencies in node\_modules in the hdb root directory (typically `~/hdb/node_modules`). + +_Note: After deploying a component a restart may be required_ + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `deploy_component` +* project _(required)_ - the name of the project you wish to deploy +* package _(optional)_ - this can be any valid GitHub or NPM reference +* payload _(optional)_ - a base64-encoded string representation of the .tar file. Must be a string +* restart _(optional)_ - must be either a boolean or the string `rolling`. If set to `rolling`, a rolling restart will be triggered after the component is deployed, meaning that each node in the cluster will be sequentially restarted (waiting for the last restart to start the next). If set to `true`, the restart will not be rolling, all nodes will be restarted in parallel. If `replicated` is `true`, the restart operations will be replicated across the cluster. +* replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. +* install\_command _(optional)_ - A command to use when installing the component. Must be a string. This can be used to install dependencies with pnpm or yarn, for example, like: `"install_command": "npm install -g pnpm && pnpm install"` + +### Body + +```json +{ + "operation": "deploy_component", + "project": "my-component", + "payload": "A very large base64-encoded string representation of the .tar file" +} +``` + +```json +{ + "operation": "deploy_component", + "project": "my-component", + "package": "HarperDB/application-template", + "replicated": true +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deployed: my-component" +} +``` + +*** + +## Package Component + +Creates a temporary `.tar` file of the specified project folder, then reads it into a base64-encoded string and returns an object with the string and the payload. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `package_component` +* project _(required)_ - the name of the project you wish to package +* skip\_node\_modules _(optional)_ - if true, creates option for tar module that will exclude the project's node\_modules directory. Must be a boolean + +### Body + +```json +{ + "operation": "package_component", + "project": "my-component", + "skip_node_modules": true +} +``` + +### Response: 200 + +```json +{ + "project": "my-component", + "payload": "LgAAAAAAAAAAAAAAAAAAA...AAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" +} +``` + +*** + +## Drop Component + +Deletes a file from inside the component project or deletes the complete project. + +**If just `project` is provided it will delete all that projects local files and folders** + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `drop_component` +* project _(required)_ - the name of the project you wish to delete or to delete from if using the `file` parameter +* file _(optional)_ - the path relative to your project folder of the file you wish to delete +* replicated _(optional)_ - if true, Harper will replicate the component deletion to all nodes in the cluster. Must be a boolean. +* restart _(optional)_ - if true, Harper will restart after dropping the component. Must be a boolean. + +### Body + +```json +{ + "operation": "drop_component", + "project": "my-component", + "file": "utils/myUtils.js" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully dropped: my-component/utils/myUtils.js" +} +``` + +*** + +## Get Components + +Gets all local component files and folders and any component config from `harperdb-config.yaml` + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `get_components` + +### Body + +```json +{ + "operation": "get_components" +} +``` + +### Response: 200 + +```json +{ + "name": "components", + "entries": [ + { + "package": "HarperDB/application-template", + "name": "deploy-test-gh" + }, + { + "package": "@fastify/compress", + "name": "fast-compress" + }, + { + "name": "my-component", + "entries": [ + { + "name": "LICENSE", + "mtime": "2023-08-22T16:00:40.286Z", + "size": 1070 + }, + { + "name": "index.md", + "mtime": "2023-08-22T16:00:40.287Z", + "size": 1207 + }, + { + "name": "config.yaml", + "mtime": "2023-08-22T16:00:40.287Z", + "size": 1069 + }, + { + "name": "package.json", + "mtime": "2023-08-22T16:00:40.288Z", + "size": 145 + }, + { + "name": "resources.js", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 583 + }, + { + "name": "schema.graphql", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 466 + }, + { + "name": "utils", + "entries": [ + { + "name": "commonUtils.js", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 583 + } + ] + } + ] + } + ] +} +``` + +*** + +## Get Component File + +Gets the contents of a file inside a component project. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `get_component_file` +* project _(required)_ - the name of the project where the file is located +* file _(required)_ - the path relative to your project folder of the file you wish to view +* encoding _(optional)_ - the encoding that will be passed to the read file call. Defaults to `utf8` + +### Body + +```json +{ + "operation": "get_component_file", + "project": "my-component", + "file": "resources.js" +} +``` + +### Response: 200 + +```json +{ + "message": "/**export class MyCustomResource extends tables.TableName {\n\t/ we can define our own custom POST handler\n\tpost(content) {\n\t\t/ do something with the incoming content;\n\t\treturn super.post(content);\n\t}\n\t/ or custom GET handler\n\tget() {\n\t\t/ we can modify this resource before returning\n\t\treturn super.get();\n\t}\n}\n */\n/ we can also define a custom resource without a specific table\nexport class Greeting extends Resource {\n\t/ a \"Hello, world!\" handler\n\tget() {\n\t\treturn { greeting: 'Hello, world!' };\n\t}\n}" +} +``` + +*** + +## Set Component File + +Creates or updates a file inside a component project. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `set_component_file` +* project _(required)_ - the name of the project the file is located in +* file _(required)_ - the path relative to your project folder of the file you wish to set +* payload _(required)_ - what will be written to the file +* encoding _(optional)_ - the encoding that will be passed to the write file call. Defaults to `utf8` +* replicated _(optional)_ - if true, Harper will replicate the component update to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "set_component_file", + "project": "my-component", + "file": "test.js", + "payload": "console.log('hello world')" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully set component: test.js" +} +``` + +## Add SSH Key + +Adds an SSH key for deploying components from private repositories. This will also create an ssh config file that will be used when deploying the components. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `add_ssh_key` +- name _(required)_ - the name of the key +- key _(required)_ - the private key contents. Must be an ed25519 key. Line breaks must be delimited with `\n` and have a trailing `\n` +- host _(required)_ - the host for the ssh config (see below). Used as part of the `package` url when deploying a component using this key +- hostname _(required)_ - the hostname for the ssh config (see below). Used to map `host` to an actual domain (e.g. `github.com`) +- known_hosts _(optional)_ - the public SSH keys of the host your component will be retrieved from. If `hostname` is `github.com` this will be retrieved automatically. Line breaks must be delimited with `\n` +- replicated _(optional)_ - if true, HarperDB will replicate the key to all nodes in the cluster. Must be a boolean. +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `add_ssh_key` +* name _(required)_ - the name of the key +* key _(required)_ - the private key contents. Line breaks must be delimited with +* host _(required)_ - the host for the ssh config (see below). Used as part of the `package` url when deploying a component using this key +* hostname _(required)_ - the hostname for the ssh config (see below). Used to map `host` to an actual domain (e.g. `github.com`) +* known\_hosts _(optional)_ - the public SSH keys of the host your component will be retrieved from. If `hostname` is `github.com` this will be retrieved automatically. Line breaks must be delimited with +* replicated _(optional)_ - if true, Harper will replicate the key to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "add_ssh_key", + "name": "harperdb-private-component", + "key": "-----BEGIN OPENSSH PRIVATE KEY-----\nthis\nis\na\nfake\nkey\n-----END OPENSSH PRIVATE KEY-----\n", + "host": "harperdb-private-component.github.com", + "hostname": "github.com" +} +``` + +### Response: 200 + +```json +{ + "message": "Added ssh key: harperdb-private-component" +} +``` + +### Generated Config and Deploy Component "package" string examples + +``` +#harperdb-private-component +Host harperdb-private-component.github.com + HostName github.com + User git + IdentityFile /hdbroot/ssh/harperdb-private-component.key + IdentitiesOnly yes +``` + +``` +"package": "git+ssh:/git@:.git#semver:v1.2.3" + +"package": "git+ssh:/git@harperdb-private-component.github.com:HarperDB/harperdb-private-component.git#semver:v1.2.3" +``` + +Note that `deploy_component` with a package uses `npm install` so the url must be a valid npm format url. The above is an example of a url using a tag in the repo to install. + +## Update SSH Key + +Updates the private key contents of an existing SSH key. + +_Operation is restricted to super\_user roles only_ + +- operation _(required)_ - must always be `update_ssh_key` +- name _(required)_ - the name of the key to be updated +- key _(required)_ - the private key contents. Must be an ed25519 key. Line breaks must be delimited with `\n` and have a trailing `\n` +- replicated _(optional)_ - if true, Harper will replicate the key update to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "update_ssh_key", + "name": "harperdb-private-component", + "key": "-----BEGIN OPENSSH PRIVATE KEY-----\nthis\nis\na\nNEWFAKE\nkey\n-----END OPENSSH PRIVATE KEY-----\n", + "host": "harperdb-private-component.github.com", + "hostname": "github.com" +} +``` + +### Response: 200 + +```json +{ + "message": "Updated ssh key: harperdb-private-component" +} +``` + +## Delete SSH Key + +Deletes a SSH key. This will also remove it from the generated SSH config. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `delete_ssh_key` +* name _(required)_ - the name of the key to be deleted +* replicated _(optional)_ - if true, Harper will replicate the key deletion to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "name": "harperdb-private-component" +} +``` + +### Response: 200 + +```json +{ + "message": "Deleted ssh key: harperdb-private-component" +} +``` + +## List SSH Keys + +List off the names of added SSH keys + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `list_ssh_keys` + +### Body + +```json +{ + "operation": "list_ssh_keys" +} +``` + +### Response: 200 + +```json +[ + { + "name": "harperdb-private-component" + }, + ... +] +``` + +## Set SSH Known Hosts + +Sets the SSH known\_hosts file. This will overwrite the file. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `set_ssh_known_hosts` +* known\_hosts _(required)_ - The contents to set the known\_hosts to. Line breaks must be delimite d with +* replicated _(optional)_ - if true, Harper will replicate the known hosts to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "set_ssh_known_hosts", + "known_hosts": "github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=\n" +} +``` + +### Response: 200 + +```json +{ + "message": "Known hosts successfully set" +} +``` + +## Get SSH Known Hosts + +Gets the contents of the known\_hosts file + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `get_ssh_known_hosts` + +### Body + +```json +{ + "operation": "get_ssh_known_hosts" +} +``` + +### Response: 200 + +```json +{ + "known_hosts": "github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=\n" +} +``` diff --git a/site/versioned_docs/version-4.5/developers/operations-api/custom-functions.md b/site/versioned_docs/version-4.5/developers/operations-api/custom-functions.md new file mode 100644 index 00000000..2c8e906f --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/custom-functions.md @@ -0,0 +1,278 @@ +--- +title: Custom Functions +--- + +# Custom Functions + +*These operations are deprecated.* + +## Custom Functions Status + +Returns the state of the Custom functions server. This includes whether it is enabled, upon which port it is listening, and where its root project directory is located on the host machine. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `custom_function_status` + +### Body +```json +{ + "operation": "custom_functions_status" +} +``` + +### Response: 200 +```json +{ + "is_enabled": true, + "port": 9926, + "directory": "/Users/myuser/hdb/custom_functions" +} +``` + +--- + +## Get Custom Functions + +Returns an array of projects within the Custom Functions root project directory. Each project has details including each of the files in the routes and helpers directories, and the total file count in the static folder. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_custom_functions` + +### Body + +```json +{ + "operation": "get_custom_functions" +} +``` + +### Response: 200 + +```json +{ + "dogs": { + "routes": ["examples"], + "helpers":["example"], + "static":3 + } +} +``` + +--- + +## Get Custom Function + +Returns the content of the specified file as text. HarperDStudio uses this call to render the file content in its built-in code editor. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_custom_function` +* project _(required)_ - the name of the project containing the file for which you wish to get content +* type _(required)_ - the name of the sub-folder containing the file for which you wish to get content - must be either routes or helpers +* file _(required)_ - The name of the file for which you wish to get content - should not include the file extension (which is always .js) + +### Body + +```json +{ + "operation": "get_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example" +} +``` + +### Response: 200 + +```json +{ + "message": "'use strict';\n\nconst https = require('https');\n\nconst authRequest = (options) => {\n return new Promise((resolve, reject) => {\n const req = https.request(options, (res) => {\n res.setEncoding('utf8');\n let responseBody = '';\n\n res.on('data', (chunk) => {\n responseBody += chunk;\n });\n\n res.on('end', () => {\n resolve(JSON.parse(responseBody));\n });\n });\n\n req.on('error', (err) => {\n reject(err);\n });\n\n req.end();\n });\n};\n\nconst customValidation = async (request,logger) => {\n const options = {\n hostname: 'jsonplaceholder.typicode.com',\n port: 443,\n path: '/todos/1',\n method: 'GET',\n headers: { authorization: request.headers.authorization },\n };\n\n const result = await authRequest(options);\n\n /*\n * throw an authentication error based on the response body or statusCode\n */\n if (result.error) {\n const errorString = result.error || 'Sorry, there was an error authenticating your request';\n logger.error(errorString);\n throw new Error(errorString);\n }\n return request;\n};\n\nmodule.exports = customValidation;\n" +} +``` + +--- + +## Set Custom Function + +Updates the content of the specified file. Harper Studio uses this call to save any changes made through its built-in code editor. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_custom_function` +* project _(required)_ - the name of the project containing the file for which you wish to set content +* type _(required)_ - the name of the sub-folder containing the file for which you wish to set content - must be either routes or helpers +* file _(required)_ - the name of the file for which you wish to set content - should not include the file extension (which is always .js) +* function_content _(required)_ - the content you wish to save into the specified file + +### Body + +```json +{ + "operation": "set_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example", + "function_content": "'use strict';\n\nconst https = require('https');\n\nconst authRequest = (options) => {\n return new Promise((resolve, reject) => {\n const req = https.request(options, (res) => {\n res.setEncoding('utf8');\n let responseBody = '';\n\n res.on('data', (chunk) => {\n responseBody += chunk;\n });\n\n res.on('end', () => {\n resolve(JSON.parse(responseBody));\n });\n });\n\n req.on('error', (err) => {\n reject(err);\n });\n\n req.end();\n });\n};\n\nconst customValidation = async (request,logger) => {\n const options = {\n hostname: 'jsonplaceholder.typicode.com',\n port: 443,\n path: '/todos/1',\n method: 'GET',\n headers: { authorization: request.headers.authorization },\n };\n\n const result = await authRequest(options);\n\n /*\n * throw an authentication error based on the response body or statusCode\n */\n if (result.error) {\n const errorString = result.error || 'Sorry, there was an error authenticating your request';\n logger.error(errorString);\n throw new Error(errorString);\n }\n return request;\n};\n\nmodule.exports = customValidation;\n" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully updated custom function: example.js" +} +``` + +--- + +## Drop Custom Function + +Deletes the specified file. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `drop_custom_function` +* project _(required)_ - the name of the project containing the file you wish to delete +* type _(required)_ - the name of the sub-folder containing the file you wish to delete. Must be either routes or helpers +* file _(required)_ - the name of the file you wish to delete. Should not include the file extension (which is always .js) + +### Body + +```json +{ + "operation": "drop_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example" +} +``` + +### Response: 200 + +```json +{ + "message":"Successfully deleted custom function: example.js" +} +``` + +--- + +## Add Custom Function Project + +Creates a new project folder in the Custom Functions root project directory. It also inserts into the new directory the contents of our Custom Functions Project template, which is available publicly, here: https:/github.com/HarperDB/harperdb-custom-functions-template. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_custom_function_project` +* project _(required)_ - the name of the project you wish to create + +### Body + +```json +{ + "operation": "add_custom_function_project", + "project": "dogs" +} +``` + +### Response: 200 + +```json +{ + "message":"Successfully created custom function project: dogs" +} +``` + +--- + +## Drop Custom Function Project + +Deletes the specified project folder and all of its contents. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `drop_custom_function_project` +* project _(required)_ - the name of the project you wish to delete + +### Body + +```json +{ + "operation": "drop_custom_function_project", + "project": "dogs" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deleted project: dogs" +} +``` + +--- + +## Package Custom Function Project + +Creates a .tar file of the specified project folder, then reads it into a base64-encoded string and returns an object with the string, the payload and the file. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `package_custom_function_project` +* project _(required)_ - the name of the project you wish to package up for deployment +* skip_node_modules _(optional)_ - if true, creates option for tar module that will exclude the project's node_modules directory. Must be a boolean. + +### Body + +```json +{ + "operation": "package_custom_function_project", + "project": "dogs", + "skip_node_modules": true +} +``` + +### Response: 200 + +```json +{ + "project": "dogs", + "payload": "LgAAAAAAAAAAAAAAAAAAA...AAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "file": "/tmp/d27f1154-5d82-43f0-a5fb-a3018f366081.tar" +} +``` + +--- + +## Deploy Custom Function Project + +Takes the output of package_custom_function_project, decrypts the base64-encoded string, reconstitutes the .tar file of your project folder, and extracts it to the Custom Functions root project directory. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `deploy_custom_function_project` +* project _(required)_ - the name of the project you wish to deploy. Must be a string +* payload _(required)_ - a base64-encoded string representation of the .tar file. Must be a string + + +### Body + +```json +{ + "operation": "deploy_custom_function_project", + "project": "dogs", + "payload": "A very large base64-encoded string represenation of the .tar file" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deployed project: dogs" +} +``` diff --git a/site/versioned_docs/version-4.5/developers/operations-api/databases-and-tables.md b/site/versioned_docs/version-4.5/developers/operations-api/databases-and-tables.md new file mode 100644 index 00000000..27ec954e --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/databases-and-tables.md @@ -0,0 +1,364 @@ +--- +title: Databases and Tables +--- + +# Databases and Tables + +## Describe All +Returns the definitions of all databases and tables within the database. Record counts about 5000 records are estimated, as determining the exact count can be expensive. When the record count is estimated, this is indicated by the inclusion of a confidence interval of `estimated_record_range`. If you need the exact count, you can include an `"exact_count": true` in the operation, but be aware that this requires a full table scan (may be expensive). + +* operation _(required)_ - must always be `describe_all` + +### Body +```json +{ + "operation": "describe_all" +} +``` + +### Response: 200 +```json +{ + "dev": { + "dog": { + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 + } + } +} +``` + +--- + +## Describe database +Returns the definitions of all tables within the specified database. + +* operation _(required)_ - must always be `describe_database` +* database _(optional)_ - database where the table you wish to describe lives. The default is `data` + +### Body +```json +{ + "operation": "describe_database", + "database": "dev" +} +``` + +### Response: 200 +```json +{ + "dog": { + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 + } +} +``` + +--- + +## Describe Table +Returns the definition of the specified table. + +* operation _(required)_ - must always be `describe_table` +* table _(required)_ - table you wish to describe +* database _(optional)_ - database where the table you wish to describe lives. The default is `data` + +### Body +```json +{ + "operation": "describe_table", + "table": "dog" +} +``` + +### Response: 200 +```json +{ + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 +} +``` + +--- + +## Create database +Create a new database. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `create_database` +* database _(optional)_ - name of the database you are creating. The default is `data` + +### Body +```json +{ + "operation": "create_database", + "database": "dev" +} +``` + +### Response: 200 +```json +{ + "message": "database 'dev' successfully created" +} +``` + +--- + +## Drop database +Drop an existing database. NOTE: Dropping a database will delete all tables and all of their records in that database. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `drop_database` +* database _(required)_ - name of the database you are dropping +* replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. + +### Body +```json +{ + "operation": "drop_database", + "database": "dev" +} +``` + +### Response: 200 +```json +{ + "message": "successfully deleted 'dev'" +} +``` + +--- + +## Create Table +Create a new table within a database. + +_Operation is restricted to super_user roles only_ + + +* operation _(required)_ - must always be `create_table` +* database _(optional)_ - name of the database where you want your table to live. If the database does not exist, it will be created. If the `database` property is not provided it will default to `data`. +* table _(required)_ - name of the table you are creating +* primary_key _(required)_ - primary key for the table +* attributes _(optional)_ - an array of attributes that specifies the schema for the table, that is the set of attributes for the table. When attributes are supplied the table will not be considered a "dynamic schema" table, and attributes will not be auto-added when records with new properties are inserted. Each attribute is specified as: + * name _(required)_ - the name of the attribute + * indexed _(optional)_ - indicates if the attribute should be indexed + * type _(optional)_ - specifies the data type of the attribute (can be String, Int, Float, Date, ID, Any) +* expiration _(optional)_ - specifies the time-to-live or expiration of records in the table before they are evicted (records are not evicted on any timer if not specified). This is specified in seconds. + +### Body +```json +{ + "operation": "create_table", + "database": "dev", + "table": "dog", + "primary_key": "id" +} +``` + +### Response: 200 +```json +{ + "message": "table 'dev.dog' successfully created." +} +``` + +--- + +## Drop Table +Drop an existing database table. NOTE: Dropping a table will delete all associated records in that table. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `drop_table` +* database _(optional)_ - database where the table you are dropping lives. The default is `data` +* table _(required)_ - name of the table you are dropping +* replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "drop_table", + "database": "dev", + "table": "dog" +} +``` + +### Response: 200 +```json +{ + "message": "successfully deleted table 'dev.dog'" +} +``` + +--- + +## Create Attribute +Create a new attribute within the specified table. **The create_attribute operation can be used for admins wishing to pre-define database values for setting role-based permissions or for any other reason.** + +_Note: Harper will automatically create new attributes on insert and update if they do not already exist within the database._ + +* operation _(required)_ - must always be `create_attribute` +* database _(optional)_ - name of the database of the table you want to add your attribute. The default is `data` +* table _(required)_ - name of the table where you want to add your attribute to live +* attribute _(required)_ - name for the attribute + +### Body +```json +{ + "operation": "create_attribute", + "database": "dev", + "table": "dog", + "attribute": "is_adorable" +} +``` + +### Response: 200 +```json +{ + "message": "inserted 1 of 1 records", + "skipped_hashes": [], + "inserted_hashes": [ + "383c0bef-5781-4e1c-b5c8-987459ad0831" + ] +} +``` + +--- + +## Drop Attribute +Drop an existing attribute from the specified table. NOTE: Dropping an attribute will delete all associated attribute values in that table. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `drop_attribute` +* database _(optional)_ - database where the table you are dropping lives. The default is `data` +* table _(required)_ - table where the attribute you are dropping lives +* attribute _(required)_ - attribute that you intend to drop + +### Body + +```json +{ + "operation": "drop_attribute", + "database": "dev", + "table": "dog", + "attribute": "is_adorable" +} +``` + +### Response: 200 +```json +{ + "message": "successfully deleted attribute 'is_adorable'" +} +``` + +--- + +## Get Backup +This will return a snapshot of the requested database. This provides a means for backing up the database through the operations API. The response will be the raw database file (in binary format), which can later be restored as a database file by copying into the appropriate hdb/databases directory (with Harper not running). The returned file is a snapshot of the database at the moment in time that the get_backup operation begins. This also supports backing up individual tables in a database. However, this is a more expensive operation than backing up a database in whole, and will lose any transactional atomicity between writes across tables, so generally it is recommended that you backup the entire database. + +It is important to note that trying to copy a database file that is in use (Harper actively running and writing to the file) using standard file copying tools is not safe (the copied file will likely be corrupt), which is why using this snapshot operation is recommended for backups (volume snapshots are also a good way to backup Harper databases). + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this should always be `get_backup` +* database _(required)_ - this is the database that will be snapshotted and returned +* table _(optional)_ - this will specify a specific table to backup +* tables _(optional)_ - this will specify a specific set of tables to backup + +### Body + +```json +{ + "operation": "get_backup", + "database": "dev" +} +``` + +### Response: 200 +``` +The database in raw binary data format +``` diff --git a/site/versioned_docs/version-4.5/developers/operations-api/index.md b/site/versioned_docs/version-4.5/developers/operations-api/index.md new file mode 100644 index 00000000..6d4c2517 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/index.md @@ -0,0 +1,52 @@ +--- +title: Operations API +--- + +# Operations API + +The operations API provides a full set of capabilities for configuring, deploying, administering, and controlling Harper. To send operations to the operations API, you send a POST request to the operations API endpoint, which [defaults to port 9925](../../deployments/configuration#operationsapi), on the root path, where the body is the operations object. These requests need to authenticated, which can be done with [basic auth](../security/basic-auth) or [JWT authentication](../security/jwt-auth). For example, a request to create a table would be performed as: + +```http +POST http:/my-harperdb-server:9925/ +Authorization: Basic YourBase64EncodedInstanceUser:Pass +Content-Type: application/json + +{ + "operation": "create_table", + "table": "my-table" +} +``` + +The operations API reference is available below and categorized by topic: + +* [Quick Start Examples](./quickstart-examples) +* [Databases and Tables](./databases-and-tables) +* [NoSQL Operations](./nosql-operations) +* [Bulk Operations](./bulk-operations) +* [Users and Roles](./users-and-roles) +* [Clustering](./clustering) +* [Clustering with NATS](./clustering-nats) +* [Components](./components) +* [Registration](./registration) +* [Jobs](./jobs) +* [Logs](./logs) +* [Utilities](./utilities) +* [Token Authentication](./token-authentication) +* [SQL Operations](./sql-operations) +* [Advanced JSON SQL Examples](./advanced-json-sql-examples) + +• [Past Release API Documentation](https:/olddocs.harperdb.io) + +## More Examples + +Here is an example of using `curl` to make an operations API request: + +```bash +curl --location --request POST 'https:/instance-subdomain.harperdbcloud.com' \ +--header 'Authorization: Basic YourBase64EncodedInstanceUser:Pass' \ +--header 'Content-Type: application/json' \ +--data-raw '{ +"operation": "create_schema", +"schema": "dev" +}' +``` diff --git a/site/versioned_docs/version-4.5/developers/operations-api/jobs.md b/site/versioned_docs/version-4.5/developers/operations-api/jobs.md new file mode 100644 index 00000000..8b05357f --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/jobs.md @@ -0,0 +1,82 @@ +--- +title: Jobs +--- + +# Jobs + +## Get Job +Returns job status, metrics, and messages for the specified job ID. + +* operation _(required)_ - must always be `get_job` +* id _(required)_ - the id of the job you wish to view + +### Body + +```json +{ + "operation": "get_job", + "id": "4a982782-929a-4507-8794-26dae1132def" +} +``` + +### Response: 200 +```json +[ + { + "__createdtime__": 1611615798782, + "__updatedtime__": 1611615801207, + "created_datetime": 1611615798774, + "end_datetime": 1611615801206, + "id": "4a982782-929a-4507-8794-26dae1132def", + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "start_datetime": 1611615798805, + "status": "COMPLETE", + "type": "csv_url_load", + "user": "HDB_ADMIN", + "start_datetime_converted": "2021-01-25T23:03:18.805Z", + "end_datetime_converted": "2021-01-25T23:03:21.206Z" + } +] +``` + +--- + +## Search Jobs By Start Date +Returns a list of job statuses, metrics, and messages for all jobs executed within the specified time window. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `search_jobs_by_start_date` +* from_date _(required)_ - the date you wish to start the search +* to_date _(required)_ - the date you wish to end the search + +### Body +```json +{ + "operation": "search_jobs_by_start_date", + "from_date": "2021-01-25T22:05:27.464+0000", + "to_date": "2021-01-25T23:05:27.464+0000" +} +``` + +### Response: 200 +```json +[ + { + "id": "942dd5cb-2368-48a5-8a10-8770ff7eb1f1", + "user": "HDB_ADMIN", + "type": "csv_url_load", + "status": "COMPLETE", + "start_datetime": 1611613284781, + "end_datetime": 1611613287204, + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "created_datetime": 1611613284764, + "__createdtime__": 1611613284767, + "__updatedtime__": 1611613287207, + "start_datetime_converted": "2021-01-25T22:21:24.781Z", + "end_datetime_converted": "2021-01-25T22:21:27.204Z" + } +] +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/developers/operations-api/logs.md b/site/versioned_docs/version-4.5/developers/operations-api/logs.md new file mode 100644 index 00000000..b2b0e2b6 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/logs.md @@ -0,0 +1,768 @@ +--- +title: Logs +--- + +# Logs + +## Read Harper Log + +Returns log outputs from the primary Harper log based on the provided search criteria. [Read more about Harper logging here](../../administration/logging/standard-logging#read-logs-via-the-api). + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `read_Log` +* start _(optional)_ - result to start with. Default is 0, the first log in `hdb.log`. Must be a number +* limit _(optional)_ - number of results returned. Default behavior is 1000. Must be a number +* level _(optional)_ - error level to filter on. Default behavior is all levels. Must be `notify`, `error`, `warn`, `info`, `debug` or `trace` +* from _(optional)_ - date to begin showing log results. Must be `YYYY-MM-DD` or `YYYY-MM-DD hh:mm:ss`. Default is first log in `hdb.log` +* until _(optional)_ - date to end showing log results. Must be `YYYY-MM-DD` or `YYYY-MM-DD hh:mm:ss`. Default is last log in `hdb.log` +* order _(optional)_ - order to display logs desc or asc by timestamp. By default, will maintain `hdb.log` order + +### Body + +```json +{ + "operation": "read_log", + "start": 0, + "limit": 1000, + "level": "error", + "from": "2021-01-25T22:05:27.464+0000", + "until": "2021-01-25T23:05:27.464+0000", + "order": "desc" +} +``` + +### Response: 200 + +```json +[ + { + "level": "notify", + "message": "Connected to cluster server.", + "timestamp": "2021-01-25T23:03:20.710Z", + "thread": "main/0", + "tags": [] + }, + { + "level": "warn", + "message": "Login failed", + "timestamp": "2021-01-25T22:24:45.113Z", + "thread": "http/9", + "tags": [] + }, + { + "level": "error", + "message": "unknown attribute 'name and breed'", + "timestamp": "2021-01-25T22:23:24.167Z", + "thread": "http/9", + "tags": [] + } +] + +``` + +*** + +## Read Transaction Log + +Returns all transactions logged for the specified database table. You may filter your results with the optional from, to, and limit fields. [Read more about Harper transaction logs here](./logs#read-transaction-log). + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `read_transaction_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* from _(optional)_ - time format must be millisecond-based epoch in UTC +* to _(optional)_ - time format must be millisecond-based epoch in UTC +* limit _(optional)_ - max number of logs you want to receive. Must be a number + +### Body + +```json +{ + "operation": "read_transaction_log", + "schema": "dev", + "table": "dog", + "from": 1560249020865, + "to": 1660585656639, + "limit": 10 +} +``` + +### Response: 200 + +```json +[ + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619736, + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38, + "__updatedtime__": 1660165619688, + "__createdtime__": 1660165619688 + } + ] + }, + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619813, + "records": [ + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "breed_id": 346, + "age": 7, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 3, + "dog_name": "Alby", + "owner_name": "Kaylan", + "breed_id": 348, + "age": 7, + "weight_lbs": 84, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "breed_id": 347, + "age": 6, + "weight_lbs": 60, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "breed_id": 348, + "age": 8, + "weight_lbs": 15, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 6, + "dog_name": "Kato", + "owner_name": "Kyle", + "breed_id": 351, + "age": 6, + "weight_lbs": 32, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 7, + "dog_name": "Simon", + "owner_name": "Fred", + "breed_id": 349, + "age": 3, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "breed_id": 350, + "age": 5, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 9, + "dog_name": "Yeti", + "owner_name": "Jaxon", + "breed_id": 200, + "age": 5, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 10, + "dog_name": "Monkey", + "owner_name": "Aron", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 11, + "dog_name": "Bode", + "owner_name": "Margo", + "breed_id": 104, + "age": 8, + "weight_lbs": 75, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 12, + "dog_name": "Tucker", + "owner_name": "David", + "breed_id": 346, + "age": 2, + "weight_lbs": 60, + "adorable": true, + "__updatedtime__": 1660165619798, + "__createdtime__": 1660165619798 + }, + { + "id": 13, + "dog_name": "Jagger", + "owner_name": "Margo", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619798, + "__createdtime__": 1660165619798 + } + ] + }, + { + "operation": "update", + "user": "admin", + "timestamp": 1660165620040, + "records": [ + { + "id": 1, + "dog_name": "Penny B", + "__updatedtime__": 1660165620036 + } + ] + } +] +``` + +*** + +## Delete Transaction Logs Before + +Deletes transaction log data for the specified database table that is older than the specified timestamp. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `delete_transaction_log_before` +* schema _(required)_ - schema under which the transaction log resides. Must be a string +* table _(required)_ - table under which the transaction log resides. Must be a string +* timestamp _(required)_ - records older than this date will be deleted. Format is millisecond-based epoch in UTC + +### Body + +```json +{ + "operation": "delete_transaction_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1598290282817 +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id 26a6d3a6-6d77-40f9-bee7-8d6ef479a126" +} +``` + +*** + +## Read Audit Log + +AuditLog must be enabled in the Harper configuration file to make this request. Returns a verbose history of all transactions logged for the specified database table, including original data records. You may filter your results with the optional search\_type and search\_values fields. [Read more about Harper transaction logs here.](../../administration/logging/transaction-logging#read_transaction_log) + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search\_type _(optional)_ - possibilities are `hash_value`, `timestamp` and `username` +* search\_values _(optional)_ - an array of string or numbers relating to search\_type + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog" +} +``` + +### Response: 200 + +```json +[ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [ + 444 + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + } +] +``` + +*** + +## Read Audit Log by timestamp + +AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table between the specified time window. [Read more about Harper transaction logs here](./logs#read-transaction-log). + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search\_type _(optional)_ - timestamp +* search\_values _(optional)_ - an array containing a maximum of two values \[`from_timestamp`, `to_timestamp`] defining the range of transactions you would like to view. + * Timestamp format is millisecond-based epoch in UTC + * If no items are supplied then all transactions are returned + * If only one entry is supplied then all transactions after the supplied timestamp will be returned + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "timestamp", + "search_values": [ + 1660585740558, + 1660585759710.56 + ] +} +``` + +### Response: 200 + +```json +[ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [ + 444 + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } +] +``` + +*** + +## Read Audit Log by username + +AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table which were committed by the specified user. [Read more about Harper transaction logs here](../../administration/logging/transaction-logging#read_transaction_log). + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search\_type _(optional)_ - username +* search\_values _(optional)_ - the Harper user for whom you would like to view transactions + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "username", + "search_values": [ + "admin" + ] +} +``` + +### Response: 200 + +```json +{ + "admin": [ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [ + 444 + ], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [ + 444 + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "hash_values": [ + 318 + ], + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } + ] +} +``` + +*** + +## Read Audit Log by hash\_value + +AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table which were committed to the specified hash value(s). [Read more about Harper transaction logs here](../../administration/logging/transaction-logging#read_transaction_log). + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `read_audit_log` +* schema _(required)_ - schema under which the transaction log resides +* table _(required)_ - table under which the transaction log resides +* search\_type _(optional)_ - hash\_value +* search\_values _(optional)_ - an array of hash\_attributes for which you wish to see transaction logs + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "hash_value", + "search_values": [ + 318 + ] +} +``` + +### Response: 200 + +```json +{ + "318": [ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } + ] +} +``` + +*** + +## Delete Audit Logs Before + +AuditLog must be enabled in the Harper configuration file to make this request. Deletes audit log data for the specified database table that is older than the specified timestamp. + +_Operation is restricted to super\_user roles only_ + +* operation _(required)_ - must always be `delete_audit_logs_before` +* schema _(required)_ - schema under which the transaction log resides. Must be a string +* table _(required)_ - table under which the transaction log resides. Must be a string +* timestamp _(required)_ - records older than this date will be deleted. Format is millisecond-based epoch in UTC + +### Body + +```json +{ + "operation": "delete_audit_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1660585759710.56 +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id 7479e5f8-a86e-4fc9-add7-749493bc100f" +} +``` diff --git a/site/versioned_docs/version-4.5/developers/operations-api/nosql-operations.md b/site/versioned_docs/version-4.5/developers/operations-api/nosql-operations.md new file mode 100644 index 00000000..77959204 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/nosql-operations.md @@ -0,0 +1,413 @@ +--- +title: NoSQL Operations +--- + +# NoSQL Operations + +## Insert + +Adds one or more rows of data to a database table. Primary keys of the inserted JSON record may be supplied on insert. If a primary key is not provided, then a GUID or incremented number (depending on type) will be generated for each record. + +* operation _(required)_ - must always be `insert` +* database _(optional)_ - database where the table you are inserting records into lives. The default is `data` +* table _(required)_ - table where you want to insert records +* records _(required)_ - array of one or more records for insert + +### Body + +```json +{ + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 8, + "dog_name": "Harper", + "breed_id": 346, + "age": 7 + }, + { + "id": 9, + "dog_name": "Penny", + "breed_id": 154, + "age": 7 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 2 of 2 records", + "inserted_hashes": [ + 8, + 9 + ], + "skipped_hashes": [] +} +``` + +--- + +## Update + +Changes the values of specified attributes in one or more rows in a database table as identified by the primary key. NOTE: Primary key of the updated JSON record(s) MUST be supplied on update. + +* operation _(required)_ - must always be `update` +* database _(optional)_ - database of the table you are updating records in. The default is `data` +* table _(required)_ - table where you want to update records +* records _(required)_ - array of one or more records for update + +### Body + +```json +{ + "operation": "update", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 1, + "weight_lbs": 55 + }, + { + "id": 2, + "owner": "Kyle B", + "weight_lbs": 35 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "updated 2 of 2 records", + "update_hashes": [ + 1, + 3 + ], + "skipped_hashes": [] +} +``` + +--- + +## Upsert + +Changes the values of specified attributes for rows with matching primary keys that exist in the table. Adds rows to the database table for primary keys that do not exist or are not provided. + +* operation _(required)_ - must always be `upsert` +* database _(optional)_ - database of the table you are updating records in. The default is `data` +* table _(required)_ - table where you want to update records +* records _(required)_ - array of one or more records for update + +### Body + +```json +{ + "operation": "upsert", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 8, + "weight_lbs": 155 + }, + { + "name": "Bill", + "breed": "Pit Bull", + "id": 10, + "Age": 11, + "weight_lbs": 155 + }, + { + "name": "Harper", + "breed": "Mutt", + "age": 5, + "weight_lbs": 155 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "upserted 3 of 3 records", + "upserted_hashes": [ + 8, + 10, + "ea06fc8e-717b-4c6c-b69d-b29014054ab7" + ] +} +``` + +--- + +## Delete + +Removes one or more rows of data from a specified table. + +* operation _(required)_ - must always be `delete` +* database _(optional)_ - database where the table you are deleting records lives. The default is `data` +* table _(required)_ - table where you want to deleting records +* ids _(required)_ - array of one or more primary key values, which identifies records to delete + +### Body + +```json +{ + "operation": "delete", + "database": "dev", + "table": "dog", + "ids": [ + 1, + 2 + ] +} +``` + +### Response: 200 + +```json +{ + "message": "2 of 2 records successfully deleted", + "deleted_hashes": [ + 1, + 2 + ], + "skipped_hashes": [] +} +``` + +--- + +## Search By ID + +Returns data from a table for one or more primary keys. + +* operation _(required)_ - must always be `search_by_id` +* database _(optional)_ - database where the table you are searching lives. The default is `data` +* table _(required)_ - table you wish to search +* ids _(required)_ - array of primary keys to retrieve +* get_attributes _(required)_ - define which attributes you want returned. _Use `['*']` to return all attributes_ + +### Body + +```json +{ + "operation": "search_by_id", + "database": "dev", + "table": "dog", + "ids": [ + 1, + 2 + ], + "get_attributes": [ + "dog_name", + "breed_id" + ] +} +``` + +### Response: 200 + +```json +[ + { + "dog_name": "Penny", + "breed_id": 154 + }, + { + "dog_name": "Harper", + "breed_id": 346 + } +] +``` + +--- + +## Search By Value + +Returns data from a table for a matching value. + +* operation _(required)_ - must always be `search_by_value` +* database _(optional)_ - database where the table you are searching lives. The default is `data` +* table _(required)_ - table you wish to search +* search_attribute _(required)_ - attribute you wish to search can be any attribute +* search_value _(required)_ - value you wish to search - wild cards are allowed +* get_attributes _(required)_ - define which attributes you want returned. Use `['*']` to return all attributes + +### Body + +```json +{ + "operation": "search_by_value", + "database": "dev", + "table": "dog", + "search_attribute": "owner_name", + "search_value": "Ky*", + "get_attributes": [ + "id", + "dog_name" + ] +} +``` + +### Response: 200 + +```json +[ + { + "dog_name": "Penny" + }, + { + "dog_name": "Kato" + } +] +``` + +--- + +## Search By Conditions + +Returns data from a table for one or more matching conditions. This supports grouping of conditions to indicate order of operations as well. + +* operation _(required)_ - must always be `search_by_conditions` +* database _(optional)_ - database where the table you are searching lives. The default is `data` +* table _(required)_ - table you wish to search +* operator _(optional)_ - the operator used between each condition - `and`, `or`. The default is `and` +* offset _(optional)_ - the number of records that the query results will skip. The default is `0` +* limit _(optional)_ - the number of records that the query results will include. The default is `null`, resulting in no limit +* sort _optional_ - This is an object that indicates the sort order. It has the following properties: + * attribute _(required)_ - The attribute to sort by + * descending _(optional)_ - If true, will sort in descending order (defaults to ascending order) + * next _(optional)_ - This can define the next sort object that will be used to break ties for sorting when there are multiple records with the same value for the first attribute (follows the same structure as `sort`, and can recursive additional attributes). +* get_attributes _(required)_ - define which attributes you want returned. Use `['*']` to return all attributes +* conditions _(required)_ - the array of conditions objects, specified below, to filter by. Must include one or more object in the array that are a condition or a grouped set of conditions. A condition has the following properties: + * search_attribute _(required)_ - the attribute you wish to search, can be any attribute + * search_type _(required)_ - the type of search to perform - `equals`, `contains`, `starts_with`, `ends_with`, `greater_than`, `greater_than_equal`, `less_than`, `less_than_equal`, `between` + * search_value _(required)_ - case-sensitive value you wish to search. If the `search_type` is `between` then use an array of two values to search between + Or a set of grouped conditions has the following properties: + * operator _(optional)_ - the operator used between each condition - `and`, `or`. The default is `and` + * conditions _(required)_ - the array of conditions objects as described above. +### Body + +```json +{ + "operation": "search_by_conditions", + "database": "dev", + "table": "dog", + "operator": "and", + "offset": 0, + "limit": 10, + "sort": { + "attribute": "id", + "next": { + "dog_name": "age", + "descending": true + } + }, + "get_attributes": [ + "*" + ], + "conditions": [ + { + "search_attribute": "age", + "search_type": "between", + "search_value": [ + 5, + 8 + ] + }, + { + "search_attribute": "weight_lbs", + "search_type": "greater_than", + "search_value": 40 + }, + { + "operator": "or", + "conditions": [ + { + "search_attribute": "adorable", + "search_type": "equals", + "search_value": true + }, + { + "search_attribute": "lovable", + "search_type": "equals", + "search_value": true + } + ] + } + ] +} +``` + +### Response: 200 + +```json +[ + { + "__createdtime__": 1620227719791, + "__updatedtime__": 1620227719791, + "adorable": true, + "age": 7, + "breed_id": 346, + "dog_name": "Harper", + "id": 2, + "owner_name": "Stephen", + "weight_lbs": 55 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 7, + "breed_id": 348, + "dog_name": "Alby", + "id": 3, + "owner_name": "Kaylan", + "weight_lbs": 84 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 6, + "breed_id": 347, + "dog_name": "Billy", + "id": 4, + "owner_name": "Zach", + "weight_lbs": 60 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 5, + "breed_id": 250, + "dog_name": "Gemma", + "id": 8, + "owner_name": "Stephen", + "weight_lbs": 55 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 8, + "breed_id": 104, + "dog_name": "Bode", + "id": 11, + "owner_name": "Margo", + "weight_lbs": 75 + } +] +``` diff --git a/site/versioned_docs/version-4.5/developers/operations-api/quickstart-examples.md b/site/versioned_docs/version-4.5/developers/operations-api/quickstart-examples.md new file mode 100644 index 00000000..9159efca --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/quickstart-examples.md @@ -0,0 +1,387 @@ +--- +title: Quick Start Examples +--- + +# Quick Start Examples + +Harper recommends utilizing [Harper Applications](../../developers/applications/) for defining databases, tables, and other functionality. However, this guide is a great way to get started using on the Harper Operations API. + +## Create dog Table + +We first need to create a table. Since our company is named after our CEO's dog, lets create a table to store all our employees' dogs. We'll call this table, `dogs`. + +Tables in Harper are schema-less, so we don't need to add any attributes other than a primary_key (in pre 4.2 versions this was referred to as the hash_attribute) to create this table. + +Harper does offer a `database` parameter that can be used to hold logical groupings of tables. The parameter is optional and if not provided the operation will default to using a database named `data`. + +If you receive an error response, make sure your Basic Authentication user and password match those you entered during the installation process. + +### Body + +```json +{ + "operation": "create_table", + "table": "dog", + "primary_key": "id" +} +``` + +### Response: 200 + +```json +{ + "message": "table 'data.dog' successfully created." +} +``` + +--- + +## Create breed Table +Now that we have a table to store our dog data, we also want to create a table to track known breeds. Just as with the dog table, the only attribute we need to specify is the `primary_key`. + +### Body + +```json +{ + "operation": "create_table", + "table": "breed", + "primary_key": "id" +} +``` + +### Response: 200 + +```json +{ + "message": "table 'data.breed' successfully created." +} +``` + +--- + +## Insert 1 Dog + +We're ready to add some dog data. Penny is our CTO's pup, so she gets ID 1 or we're all fired. We are specifying attributes in this call, but this doesn't prevent us from specifying additional attributes in subsequent calls. + +### Body + +```json +{ + "operation": "insert", + "table": "dog", + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 1 of 1 records", + "inserted_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` + +--- + +## Insert Multiple Dogs + +Let's add some more Harper doggies! We can add as many dog objects as we want into the records collection. If you're adding a lot of objects, we would recommend using the .csv upload option (see the next section where we populate the breed table). + +### Body + +```json +{ + "operation": "insert", + "table": "dog", + "records": [ + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "breed_id": 346, + "age": 7, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 3, + "dog_name": "Alby", + "owner_name": "Kaylan", + "breed_id": 348, + "age": 7, + "weight_lbs": 84, + "adorable": true + }, + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "breed_id": 347, + "age": 6, + "weight_lbs": 60, + "adorable": true + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "breed_id": 348, + "age": 8, + "weight_lbs": 15, + "adorable": true + }, + { + "id": 6, + "dog_name": "Kato", + "owner_name": "Kyle", + "breed_id": 351, + "age": 6, + "weight_lbs": 32, + "adorable": true + }, + { + "id": 7, + "dog_name": "Simon", + "owner_name": "Fred", + "breed_id": 349, + "age": 3, + "weight_lbs": 35, + "adorable": true + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "breed_id": 350, + "age": 5, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 9, + "dog_name": "Yeti", + "owner_name": "Jaxon", + "breed_id": 200, + "age": 5, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 10, + "dog_name": "Monkey", + "owner_name": "Aron", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true + }, + { + "id": 11, + "dog_name": "Bode", + "owner_name": "Margo", + "breed_id": 104, + "age": 8, + "weight_lbs": 75, + "adorable": true + }, + { + "id": 12, + "dog_name": "Tucker", + "owner_name": "David", + "breed_id": 346, + "age": 2, + "weight_lbs": 60, + "adorable": true + }, + { + "id": 13, + "dog_name": "Jagger", + "owner_name": "Margo", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 12 of 12 records", + "inserted_hashes": [ + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13 + ], + "skipped_hashes": [] +} +``` + +--- + +## Bulk Insert Breeds Via CSV + +We need to populate the 'breed' table with some data so we can reference it later. For larger data sets, we recommend using our CSV upload option. + +Each header in a column will be considered as an attribute, and each row in the file will be a row in the table. Simply specify the file path and the table to upload to, and Harper will take care of the rest. You can pull the breeds.csv file from here: https:/s3.amazonaws.com/complimentarydata/breeds.csv + +### Body + +```json +{ + "operation": "csv_url_load", + "table": "breed", + "csv_url": "https:/s3.amazonaws.com/complimentarydata/breeds.csv" +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id e77d63b9-70d5-499c-960f-6736718a4369", + "job_id": "e77d63b9-70d5-499c-960f-6736718a4369" +} +``` + +--- + +## Update 1 Dog Using NoSQL + +Harper supports NoSQL and SQL commands. We're going to update the dog table to show Penny's last initial using our NoSQL API. + +### Body + +```json +{ + "operation": "update", + "table": "dog", + "records": [ + { + "id": 1, + "dog_name": "Penny B" + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "updated 1 of 1 records", + "update_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` + +--- + +## Select a Dog by ID Using SQL + +Now we're going to use a simple SQL SELECT call to pull Penny's updated data. Note we now see Penny's last initial in the dog name. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT * FROM data.dog where id = 1" +} +``` + +### Response: 200 + +```json +[ + { + "owner_name": "Kyle", + "adorable": null, + "breed_id": 154, + "__updatedtime__": 1610749428575, + "dog_name": "Penny B", + "weight_lbs": 38, + "id": 1, + "age": 7, + "__createdtime__": 1610749386566 + } +] +``` + +--- + +## Select Dogs and Join Breed + +Here's a more complex SQL command joining the breed table with the dog table. We will also pull only the pups belonging to Kyle, Zach, and Stephen. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT d.id, d.dog_name, d.owner_name, b.name, b.section FROM data.dog AS d INNER JOIN data.breed AS b ON d.breed_id = b.id WHERE d.owner_name IN ('Kyle', 'Zach', 'Stephen') AND b.section = 'Mutt' ORDER BY d.dog_name" +} +``` + +### Response: 200 + +```json +[ + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "name": "LABRADOR / GREAT DANE MIX", + "section": "Mutt" + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "name": "SHORT HAIRED SETTER MIX", + "section": "Mutt" + }, + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "name": "HUSKY MIX", + "section": "Mutt" + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "name": "TERRIER MIX", + "section": "Mutt" + } +] + +``` diff --git a/site/versioned_docs/version-4.5/developers/operations-api/registration.md b/site/versioned_docs/version-4.5/developers/operations-api/registration.md new file mode 100644 index 00000000..366b0189 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/registration.md @@ -0,0 +1,67 @@ +--- +title: Registration +--- + +# Registration + + +## Registration Info +Returns the registration data of the Harper instance. + +* operation _(required)_ - must always be `registration_info` + +### Body +```json +{ + "operation": "registration_info" +} +``` + +### Response: 200 +```json +{ + "registered": true, + "version": "4.2.0", + "ram_allocation": 2048, + "license_expiration_date": "2022-01-15" +} +``` + +--- + +## Get Fingerprint +Returns the Harper fingerprint, uniquely generated based on the machine, for licensing purposes. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_fingerprint` + +### Body + +```json +{ + "operation": "get_fingerprint" +} +``` + +--- + +## Set License +Sets the Harper license as generated by Harper License Management software. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_license` +* key _(required)_ - your license key +* company _(required)_ - the company that was used in the license + +### Body + +```json +{ + "operation": "set_license", + "key": "", + "company": "" +} +``` + diff --git a/site/versioned_docs/version-4.5/developers/operations-api/sql-operations.md b/site/versioned_docs/version-4.5/developers/operations-api/sql-operations.md new file mode 100644 index 00000000..9fcc6fb4 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/sql-operations.md @@ -0,0 +1,122 @@ +--- +title: SQL Operations +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL Operations + +## Select +Executes the provided SQL statement. The SELECT statement is used to query data from the database. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT * FROM dev.dog WHERE id = 1" +} +``` + +### Response: 200 +```json +[ + { + "id": 1, + "age": 7, + "dog_name": "Penny", + "weight_lbs": 38, + "breed_id": 154, + "owner_name": "Kyle", + "adorable": true, + "__createdtime__": 1611614106043, + "__updatedtime__": 1611614119507 + } +] +``` + +--- + +## Insert +Executes the provided SQL statement. The INSERT statement is used to add one or more rows to a database table. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body + +```json +{ + "operation": "sql", + "sql": "INSERT INTO dev.dog (id, dog_name) VALUE (22, 'Simon')" +} +``` + +### Response: 200 +```json +{ + "message": "inserted 1 of 1 records", + "inserted_hashes": [ + 22 + ], + "skipped_hashes": [] +} +``` +--- + +## Update +Executes the provided SQL statement. The UPDATE statement is used to change the values of specified attributes in one or more rows in a database table. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body +```json +{ + "operation": "sql", + "sql": "UPDATE dev.dog SET dog_name = 'penelope' WHERE id = 1" +} +``` + +### Response: 200 +```json +{ + "message": "updated 1 of 1 records", + "update_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` + +--- + +## Delete +Executes the provided SQL statement. The DELETE statement is used to remove one or more rows of data from a database table. + +* operation _(required)_ - must always be `sql` +* sql _(required)_ - use standard SQL + +### Body +```json +{ + "operation": "sql", + "sql": "DELETE FROM dev.dog WHERE id = 1" +} +``` + +### Response: 200 +```json +{ + "message": "1 of 1 record successfully deleted", + "deleted_hashes": [ + 1 + ], + "skipped_hashes": [] +} +``` diff --git a/site/versioned_docs/version-4.5/developers/operations-api/token-authentication.md b/site/versioned_docs/version-4.5/developers/operations-api/token-authentication.md new file mode 100644 index 00000000..161c69b5 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/token-authentication.md @@ -0,0 +1,54 @@ +--- +title: Token Authentication +--- + +# Token Authentication + +## Create Authentication Tokens +Creates the tokens needed for authentication: operation & refresh token. + +_Note - this operation does not require authorization to be set_ + +* operation _(required)_ - must always be `create_authentication_tokens` +* username _(required)_ - username of user to generate tokens for +* password _(required)_ - password of user to generate tokens for + +### Body +```json +{ + "operation": "create_authentication_tokens", + "username": "", + "password": "" +} +``` + +### Response: 200 +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6IkhEQl9BRE1JTiIsImlhdCI6MTYwNTA2Mzk0OSwiZXhwIjoxNjA1MTUwMzQ5LCJzdWIiOiJvcGVyYXRpb24ifQ.TlV93BqavQVQntXTt_WeY5IjAuCshfd6RzhihLWFWhu1qEKLHdwg9o5Z4ASaNmfuyKBqbFw65IbOYKd348EXeC_T6d0GO3yUhICYWXkqhQnxVW_T-ECKc7m5Bty9HTgfeaJ2e2yW55nbZYWG_gLtNgObUjCziX20-gGGR25sNTRm78mLQPYQkBJph6WXwAuyQrX704h0NfvNqyAZSwjxgtjuuEftTJ7FutLrQSLGIBIYq9nsHrFkheiDSn-C8_WKJ_zATa4YIofjqn9g5wA6o_7kSNaU2-gWnCm_jbcAcfvOmXh6rd89z8pwPqnC0f131qHIBps9UHaC1oozzmu_C6bsg7905OoAdFFY42Vojs98SMbfRApRvwaS4SprBsam3izODNI64ZUBREu3l4SZDalUf2kN8XPVWkI1LKq_mZsdtqr1r11Z9xslI1wVdxjunYeanjBhs7_j2HTX7ieVGn1a23cWceUk8F1HDGe_KEuPQs03R73V8acq_freh-kPhIa4eLqmcHeBw3WcyNGW8GuP8kyQRkGuO5sQSzZqbr_YSbZdSShZWTWDE6RYYC9ZV9KJtHVxhs0hexUpcoqO8OtJocyltRjtDjhSm9oUxszYRaALu-h8YadZT9dEKzsyQIt30d7LS9ETmmGWx4nKSTME2bV21PnDv_rEc5R6gnE", + "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6IkhEQl9BRE1JTiIsImlhdCI6MTYwNTA2Mzk0OSwiZXhwIjoxNjA3NjU1OTQ5LCJzdWIiOiJyZWZyZXNoIn0.znhJhkdSROBPP_GLRzAxYdjgQ3BuqpAbQB7zMSSOQJ3s83HnmZ10Bnpw_3L2aF-tOFgz_t6HUAvn26fNOLsspJD2aOvHPcVS4yLKS5nagpA6ar_pqng9f6Ebfs8ohguLCfHnHRJ8poLxuWRvWW9_9pIlDiwsj4yo3Mbxi3mW8Bbtnk2MwiNHFxTksD12Ne8EWz8q2jic5MjArqBBgR373oYoWU1oxpTM6gIsZCBRowXcc9XFy2vyRoggEUU4ISRFQ4ZY9ayJ-_jleSDCUamJSNQsdb1OUTvc6CxeYlLjCoV0ijRUB6p2XWNVezFhDu8yGqOeyGFJzArhxbVc_pl4UYd5aUVxhrO9DdhG29cY_mHV0FqfXphR9QllK--LJFTP4aFqkCxnVr7HSa17hL0ZVK1HaKrx21PAdCkVNZpD6J3RtRbTkfnIB_C3Be9jhOV3vpTf7ZGn_Bs3CPJi_sL313Z1yKSDAS5rXTPceEOcTPHjzkMP9Wz19KfFq_0kuiZdDmeYNqJeFPAgGJ-S0tO51krzyGqLyCCA32_W104GR8OoQi2gEED6HIx2G0-1rnLnefN6eHQiY5r-Q3Oj9e2y3EvqqgWOmEDw88-SjPTwQVnMbBHYN2RfluU7EmvDh6Saoe79Lhlu8ZeSJ1x6ZgA8-Cirraz1_526Tn8v5FGDfrc" +} +``` + +--- + +## Refresh Operation Token +This operation creates a new operation token. + +* operation _(required)_ - must always be `refresh_operation_token` +* refresh_token _(required)_ - the refresh token that was provided when tokens were created + +### Body +```json +{ + "operation": "refresh_operation_token", + "refresh_token": "EXISTING_REFRESH_TOKEN" +} +``` + +### Response: 200 +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6eyJfX2NyZWF0ZWR0aW1lX18iOjE2MDQ1MTc4Nzk1MjMsIl9fdXBkYXRlZHRpbWVfXyI6MTYwNDUxNzg3OTUyMywiYWN0aXZlIjp0cnVlLCJhdXRoX3Rva2VuIjpudWxsLCJyb2xlIjp7Il9fY3JlYXRlZHRpbWVfXyI6MTYwNDUxNzg3OTUyMSwiX191cGRhdGVkdGltZV9fIjoxNjA0NTE3ODc5NTIxLCJpZCI6IjZhYmRjNGJhLWU5MjQtNDlhNi1iOGY0LWM1NWUxYmQ0OTYzZCIsInBlcm1pc3Npb24iOnsic3VwZXJfdXNlciI6dHJ1ZSwic3lzdGVtIjp7InRhYmxlcyI6eyJoZGJfdGFibGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9hdHRyaWJ1dGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9zY2hlbWEiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl91c2VyIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfcm9sZSI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2pvYiI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2xpY2Vuc2UiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9pbmZvIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfbm9kZXMiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl90ZW1wIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119fX19LCJyb2xlIjoic3VwZXJfdXNlciJ9LCJ1c2VybmFtZSI6IkhEQl9BRE1JTiJ9LCJpYXQiOjE2MDUwNjQ0MjMsImV4cCI6MTYwNTE1MDgyMywic3ViIjoib3BlcmF0aW9uIn0.VVZdhlh7_xFEaGPwhAh6VJ1d7eisiF3ok3ZwLTQAMWZB6umb2S7pPSTbXAmqAGHRlFAK3BYfnwT3YWt0gZbHvk24_0x3s_dej3PYJ8khIxzMjqpkR6qSjQIC2dhKqpwRPNtoqW_xnep9L-qf5iPtqkwsqWhF1c5VSN8nFouLWMZSuJ6Mag04soNhFvY0AF6QiTyzajMTb6uurRMWOnxk8hwMrY_5xtupabqtZheXP_0DV8l10B7GFi_oWf_lDLmwRmNbeUfW8ZyCIJMj36bjN3PsfVIxog87SWKKCwbWZWfJWw0KEph-HvU0ay35deyGWPIaDQmujuh2vtz-B0GoIAC58PJdXNyQRzES_nSb6Oqc_wGZsLM6EsNn_lrIp3mK_3a5jirZ8s6Z2SfcYKaLF2hCevdm05gRjFJ6ijxZrUSOR2S415wLxmqCCWCp_-sEUz8erUrf07_aj-Bv99GUub4b_znOsQF3uABKd4KKff2cNSMhAa-6sro5GDRRJg376dcLi2_9HOZbnSo90zrpVq8RNV900aydyzDdlXkZja8jdHBk4mxSSewYBvM7up6I0G4X-ZlzFOp30T7kjdLa6480Qp34iYRMMtq0Htpb5k2jPt8dNFnzW-Q2eRy1wNBbH3cCH0rd7_BIGuTCrl4hGU8QjlBiF7Gj0_-uJYhKnhg" +} +``` diff --git a/site/versioned_docs/version-4.5/developers/operations-api/users-and-roles.md b/site/versioned_docs/version-4.5/developers/operations-api/users-and-roles.md new file mode 100644 index 00000000..d95f3ad9 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/users-and-roles.md @@ -0,0 +1,484 @@ +--- +title: Users and Roles +--- + +# Users and Roles + +## List Roles +Returns a list of all roles. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `list_roles` + +### Body +```json +{ + "operation": "list_roles" +} +``` + +### Response: 200 +```json +[ + { + "__createdtime__": 1611615061106, + "__updatedtime__": 1611615061106, + "id": "05c2ffcd-f780-40b1-9432-cfe8ba5ad890", + "permission": { + "super_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + }, + "role": "developer" + }, + { + "__createdtime__": 1610749235614, + "__updatedtime__": 1610749235614, + "id": "136f03fa-a0e9-46c3-bd5d-7f3e7dd5b564", + "permission": { + "cluster_user": true + }, + "role": "cluster_user" + }, + { + "__createdtime__": 1610749235609, + "__updatedtime__": 1610749235609, + "id": "745b3138-a7cf-455a-8256-ac03722eef12", + "permission": { + "super_user": true + }, + "role": "super_user" + } +] +``` + +--- + +## Add Role +Creates a new role with the specified permissions. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_role` +* role _(required)_ - name of role you are defining +* permission _(required)_ - object defining permissions for users associated with this role: + * super_user _(optional)_ - boolean which, if set to true, gives users associated with this role full access to all operations and methods. If not included, value will be assumed to be false. + * structure_user (optional) - boolean OR array of database names (as strings). If boolean, user can create new databases and tables. If array of strings, users can only manage tables within the specified databases. This overrides any individual table permissions for specified databases, or for all databases if the value is true. + +### Body +```json +{ + "operation": "add_role", + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + } +} +``` + +### Response: 200 +```json +{ + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + }, + "id": "0a9368b0-bd81-482f-9f5a-8722e3582f96", + "__updatedtime__": 1598549532897, + "__createdtime__": 1598549532897 +} +``` + +--- + +## Alter Role +Modifies an existing role with the specified permissions. updates permissions from an existing role. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `alter_role` +* id _(required)_ - the id value for the role you are altering +* role _(optional)_ - name value to update on the role you are altering +* permission _(required)_ - object defining permissions for users associated with this role: + * super_user _(optional)_ - boolean which, if set to true, gives users associated with this role full access to all operations and methods. If not included, value will be assumed to be false. + * structure_user (optional) - boolean OR array of database names (as strings). If boolean, user can create new databases and tables. If array of strings, users can only manage tables within the specified databases. This overrides any individual table permissions for specified databases, or for all databases if the value is true. + +### Body + +```json +{ + "operation": "alter_role", + "id": "f92162e2-cd17-450c-aae0-372a76859038", + "role": "another_developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": false, + "insert": true, + "update": true + } + ] + } + } + } + } +} +``` + +### Response: 200 +```json +{ + "id": "a7cb91e9-32e4-4dbf-a327-fab4fa9191ea", + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": false, + "insert": true, + "update": true + } + ] + } + } + } + }, + "__updatedtime__": 1598549996106 +} +``` + +--- + +## Drop Role +Deletes an existing role from the database. NOTE: Role with associated users cannot be dropped. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - this must always be `drop_role` +* id _(required)_ - this is the id of the role you are dropping + +### Body +```json +{ + "operation": "drop_role", + "id": "developer" +} +``` + +### Response: 200 +```json +{ + "message": "developer successfully deleted" +} +``` + +--- + +## List Users +Returns a list of all users. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `list_users` + +### Body +```json +{ + "operation": "list_users" +} +``` + +### Response: 200 +```json +[ + { + "__createdtime__": 1635520961165, + "__updatedtime__": 1635520961165, + "active": true, + "role": { + "__createdtime__": 1635520961161, + "__updatedtime__": 1635520961161, + "id": "7c78ef13-c1f3-4063-8ea3-725127a78279", + "permission": { + "super_user": true, + "system": { + "tables": { + "hdb_table": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_attribute": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_schema": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_user": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_role": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_job": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_license": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_info": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_nodes": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_temp": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + } + } + } + }, + "role": "super_user" + }, + "username": "HDB_ADMIN" + } +] +``` + +--- + +## User Info +Returns user data for the associated user credentials. + +* operation _(required)_ - must always be `user_info` + +### Body +```json +{ + "operation": "user_info" +} +``` + +### Response: 200 +```json +{ + "__createdtime__": 1610749235611, + "__updatedtime__": 1610749235611, + "active": true, + "role": { + "__createdtime__": 1610749235609, + "__updatedtime__": 1610749235609, + "id": "745b3138-a7cf-455a-8256-ac03722eef12", + "permission": { + "super_user": true + }, + "role": "super_user" + }, + "username": "HDB_ADMIN" +} +``` + +--- + +## Add User +Creates a new user with the specified role and credentials. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_user` +* role _(required)_ - 'role' name value of the role you wish to assign to the user. See `add_role` for more detail +* username _(required)_ - username assigned to the user. It can not be altered after adding the user. It serves as the hash +* password _(required)_ - clear text for password. Harper will encrypt the password upon receipt +* active _(required)_ - boolean value for status of user's access to your Harper instance. If set to false, user will not be able to access your instance of Harper. + +### Body +```json +{ + "operation": "add_user", + "role": "role_name", + "username": "hdb_user", + "password": "password", + "active": true +} +``` + +### Response: 200 +```json +{ + "message": "hdb_user successfully added" +} +``` + +--- + +## Alter User +Modifies an existing user's role and/or credentials. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super\_user roles only_ + + * operation _(required)_ - must always be `alter_user` + * username _(required)_ - username assigned to the user. It can not be altered after adding the user. It serves as the hash. + * password _(optional)_ - clear text for password. Harper will encrypt the password upon receipt + * role _(optional)_ - `role` name value of the role you wish to assign to the user. See `add_role` for more detail + * active _(optional)_ - status of user's access to your Harper instance. See `add_role` for more detail + +### Body +```json +{ + "operation": "alter_user", + "role": "role_name", + "username": "hdb_user", + "password": "password", + "active": true +} +``` + +### Response: 200 +```json +{ + "message": "updated 1 of 1 records", + "new_attributes": [], + "txn_time": 1611615114397.988, + "update_hashes": [ + "hdb_user" + ], + "skipped_hashes": [] +} +``` + +--- + +## Drop User +Deletes an existing user by username. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `drop_user` +* username _(required)_ - username assigned to the user + +### Body +```json +{ + "operation": "drop_user", + "username": "sgoldberg" +} +``` + +### Response: 200 +```json +{ + "message": "sgoldberg successfully deleted" +} +``` diff --git a/site/versioned_docs/version-4.5/developers/operations-api/utilities.md b/site/versioned_docs/version-4.5/developers/operations-api/utilities.md new file mode 100644 index 00000000..4d09f5cd --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/operations-api/utilities.md @@ -0,0 +1,442 @@ +--- +title: Utilities +--- + +# Utilities + +## Restart +Restarts the Harper instance. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `restart` + +### Body +```json +{ + "operation": "restart" +} +``` + +### Response: 200 +```json +{ + "message": "Restarting HarperDB. This may take up to 60 seconds." +} +``` +--- + +## Restart Service +Restarts servers for the specified Harper service. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `restart_service` +* service _(required)_ - must be one of: `http_workers`, `clustering_config` or `clustering` +* replicated _(optional)_ - must be a boolean. If set to `true`, Harper will replicate the restart service operation across all nodes in the cluster. The restart will occur as a rolling restart, ensuring that each node is fully restarted before the next node begins restarting. + +### Body +```json +{ + "operation": "restart_service", + "service": "http_workers" +} +``` + +### Response: 200 +```json +{ + "message": "Restarting http_workers" +} +``` + +--- +## System Information +Returns detailed metrics on the host system. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `system_information` +* attributes _(optional)_ - string array of top level attributes desired in the response, if no value is supplied all attributes will be returned. Available attributes are: ['system', 'time', 'cpu', 'memory', 'disk', 'network', 'harperdb_processes', 'table_size', 'metrics', 'threads', 'replication'] + +### Body +```json +{ + "operation": "system_information" +} +``` + +--- + +## Delete Records Before + +Delete data before the specified timestamp on the specified database table exclusively on the node where it is executed. Any clustered nodes with replicated data will retain that data. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `delete_records_before` +* date _(required)_ - records older than this date will be deleted. Supported format looks like: `YYYY-MM-DDThh:mm:ss.sZ` +* schema _(required)_ - name of the schema where you are deleting your data +* table _(required)_ - name of the table where you are deleting your data + +### Body +```json +{ + "operation": "delete_records_before", + "date": "2021-01-25T23:05:27.464", + "schema": "dev", + "table": "breed" +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id d3aed926-e9fe-4ec1-aea7-0fb4451bd373", + "job_id": "d3aed926-e9fe-4ec1-aea7-0fb4451bd373" +} +``` + +--- + +## Export Local +Exports data based on a given search operation to a local file in JSON or CSV format. + +* operation _(required)_ - must always be `export_local` +* format _(required)_ - the format you wish to export the data, options are `json` & `csv` +* path _(required)_ - path local to the server to export the data +* search_operation _(required)_ - search_operation of `search_by_hash`, `search_by_value`, `search_by_conditions` or `sql` +* filename _(optional)_ - the name of the file where your export will be written to (do not include extension in filename). If one is not provided it will be autogenerated based on the epoch. + +### Body +```json +{ + "operation": "export_local", + "format": "json", + "path": "/data/", + "search_operation": { + "operation": "sql", + "sql": "SELECT * FROM dev.breed" + } +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 6fc18eaa-3504-4374-815c-44840a12e7e5" +} +``` + +--- + +## Export To S3 +Exports data based on a given search operation from table to AWS S3 in JSON or CSV format. + +* operation _(required)_ - must always be `export_to_s3` +* format _(required)_ - the format you wish to export the data, options are `json` & `csv` +* s3 _(required)_ - details your access keys, bucket, bucket region and key for saving the data to S3 +* search_operation _(required)_ - search_operation of `search_by_hash`, `search_by_value`, `search_by_conditions` or `sql` + +### Body +```json +{ + "operation": "export_to_s3", + "format": "json", + "s3": { + "aws_access_key_id": "YOUR_KEY", + "aws_secret_access_key": "YOUR_SECRET_KEY", + "bucket": "BUCKET_NAME", + "key": "OBJECT_NAME", + "region": "BUCKET_REGION" + }, + "search_operation": { + "operation": "sql", + "sql": "SELECT * FROM dev.dog" + } +} +``` + +### Response: 200 +```json +{ + "message": "Starting job with id 9fa85968-4cb1-4008-976e-506c4b13fc4a", + "job_id": "9fa85968-4cb1-4008-976e-506c4b13fc4a" +} +``` + +--- + +## Install Node Modules +This operation is deprecated, as it is handled automatically by deploy_component and restart. +Executes npm install against specified custom function projects. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `install_node_modules` +* projects _(required)_ - must ba an array of custom functions projects. +* dry_run _(optional)_ - refers to the npm --dry-run flag: [https:/docs.npmjs.com/cli/v8/commands/npm-install#dry-run](https:/docs.npmjs.com/cli/v8/commands/npm-install#dry-run). Defaults to false. + +### Body +```json +{ + "operation": "install_node_modules", + "projects": [ + "dogs", + "cats" + ], + "dry_run": true +} +``` + +--- + +## Set Configuration + +Modifies the Harper configuration file parameters. Must follow with a restart or restart_service operation. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `set_configuration` +* logging_level _(example/optional)_ - one or more configuration keywords to be updated in the Harper configuration file +* clustering_enabled _(example/optional)_ - one or more configuration keywords to be updated in the Harper configuration file + +### Body +```json +{ + "operation": "set_configuration", + "logging_level": "trace", + "clustering_enabled": true +} +``` + +### Response: 200 +```json +{ + "message": "Configuration successfully set. You must restart HarperDB for new config settings to take effect." +} +``` + +--- + +## Get Configuration +Returns the Harper configuration parameters. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `get_configuration` + +### Body +```json +{ + "operation": "get_configuration" +} +``` + +### Response: 200 +```json +{ + "http": { + "compressionThreshold": 1200, + "cors": false, + "corsAccessList": [ + null + ], + "keepAliveTimeout": 30000, + "port": 9926, + "securePort": null, + "timeout": 120000 + }, + "threads": 11, + "authentication": { + "cacheTTL": 30000, + "enableSessions": true, + "operationTokenTimeout": "1d", + "refreshTokenTimeout": "30d" + }, + "analytics": { + "aggregatePeriod": 60 + }, + "replication": { + "hostname": "node1", + "databases": "*", + "routes": null, + "url": "wss:/127.0.0.1:9925" + }, + "componentsRoot": "/Users/hdb/components", + "localStudio": { + "enabled": false + }, + "logging": { + "auditAuthEvents": { + "logFailed": false, + "logSuccessful": false + }, + "auditLog": true, + "auditRetention": "3d", + "file": true, + "level": "error", + "root": "/Users/hdb/log", + "rotation": { + "enabled": false, + "compress": false, + "interval": null, + "maxSize": null, + "path": "/Users/hdb/log" + }, + "stdStreams": false + }, + "mqtt": { + "network": { + "port": 1883, + "securePort": 8883 + }, + "webSocket": true, + "requireAuthentication": true + }, + "operationsApi": { + "network": { + "cors": true, + "corsAccessList": [ + "*" + ], + "domainSocket": "/Users/hdb/operations-server", + "port": 9925, + "securePort": null + } + }, + "rootPath": "/Users/hdb", + "storage": { + "writeAsync": false, + "caching": true, + "compression": false, + "noReadAhead": true, + "path": "/Users/hdb/database", + "prefetchWrites": true + }, + "tls": { + "privateKey": "/Users/hdb/keys/privateKey.pem" + } +} +``` + +--- + +## Add Certificate + +Adds or updates a certificate in the `hdb_certificate` system table. +If a `private_key` is provided it will __not__ be stored in `hdb_certificate`, it will be written to file in `/keys/`. +If a `private_key` is not passed the operation will search for one that matches the certificate. If one is not found an error will be returned. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_certificate` +* name _(required)_ - a unique name for the certificate +* certificate _(required)_ - a PEM formatted certificate string +* is_authority _(required)_ - a boolean indicating if the certificate is a certificate authority +* hosts _(optional)_ - an array of hostnames that the certificate is valid for +* private_key _(optional)_ - a PEM formatted private key string + +### Body +```json +{ + "operation": "add_certificate", + "name": "my-cert", + "certificate": "-----BEGIN CERTIFICATE-----ZDFAay... -----END CERTIFICATE-----", + "is_authority": false, + "private_key": "-----BEGIN RSA PRIVATE KEY-----Y4dMpw5f... -----END RSA PRIVATE KEY-----" +} +``` + +### Response: 200 +```json +{ + "message": "Successfully added certificate: my-cert" +} +``` + +--- + +## Remove Certificate + +Removes a certificate from the `hdb_certificate` system table and deletes the corresponding private key file. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `remove_certificate` +* name _(required)_ - the name of the certificate + +### Body +```json +{ + "operation": "remove_certificate", + "name": "my-cert" +} +``` + +### Response: 200 +```json +{ + "message": "Successfully removed my-cert" +} +``` + +--- + +## List Certificates + +Lists all certificates in the `hdb_certificate` system table. + +_Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `list_certificates` + +### Body +```json +{ + "operation": "list_certificates" +} +``` + +### Response: 200 +```json +[ + { + "name": "HarperDB-Certificate-Authority-node1", + "certificate": "-----BEGIN CERTIFICATE-----\r\nTANBgkqhk... S34==\r\n-----END CERTIFICATE-----\r\n", + "private_key_name": "privateKey.pem", + "is_authority": true, + "details": { + "issuer": "CN=HarperDB-Certificate-Authority-node1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "subject": "CN=HarperDB-Certificate-Authority-node1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "serial_number": "5235345", + "valid_from": "Aug 27 15:00:00 2024 GMT", + "valid_to": "Aug 25 15:00:00 2034 GMT" + }, + "is_self_signed": true, + "uses": [ + "https", + "wss" + ] + }, + { + "name": "node1", + "certificate": "-----BEGIN CERTIFICATE-----\r\ngIEcSR1M... 5bv==\r\n-----END CERTIFICATE-----\r\n", + "private_key_name": "privateKey.pem", + "is_authority": false, + "details": { + "issuer": "CN=HarperDB-Certificate-Authority-node1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "subject": "CN=node.1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "subject_alt_name": "IP Address:127.0.0.1, DNS:localhost, IP Address:0:0:0:0:0:0:0:1, DNS:node.1", + "serial_number": "5243646", + "valid_from": "Aug 27 15:00:00 2024 GMT", + "valid_to": "Aug 25 15:00:00 2034 GMT" + }, + "is_self_signed": true, + "uses": [ + "https", + "wss" + ] + } +] +``` diff --git a/site/versioned_docs/version-4.5/developers/real-time.md b/site/versioned_docs/version-4.5/developers/real-time.md new file mode 100644 index 00000000..5f90e075 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/real-time.md @@ -0,0 +1,175 @@ +--- +title: Real-Time +--- + +# Real-Time + +## Real-Time + +Harper provides real-time access to data and messaging. This allows clients to monitor and subscribe to data for changes in real-time as well as handling data-oriented messaging. Harper supports multiple standardized protocols to facilitate diverse standards-based client interaction. + +Harper real-time communication is based around database tables. Declared tables are the basis for monitoring data, and defining "topics" for publishing and subscribing to messages. Declaring a table that establishes a topic can be as simple as adding a table with no attributes to your [schema.graphql in a Harper application folder](./applications/): +``` +type MyTopic @table @export +``` +You can then subscribe to records or sub-topics in this topic/namespace, as well as save data and publish messages, with the protocols discussed below. + +### Content Negotiation + +Harper is a database, not a generic broker, and therefore highly adept at handling _structured_ data. Data can be published and subscribed in all supported structured/object formats, including JSON, CBOR, and MessagePack, and the data will be stored and handled as structured data. This means that different clients can individually choose which format they prefer, both for inbound and outbound messages. One client could publish in JSON, and another client could choose to receive messages in CBOR. + +## Protocols + +### MQTT + +Harper supports MQTT as an interface to this real-time data delivery. It is important to note that MQTT in Harper is not just a generic pub/sub hub, but is deeply integrated with the database providing subscriptions directly to database records, and publishing to these records. In this document we will explain how MQTT pub/sub concepts are aligned and integrated with database functionality. + +#### Configuration + +Harper supports MQTT with its `mqtt` server module and Harper supports MQTT over standard TCP sockets or over WebSockets. This is enabled by default, but can be configured in your `harperdb-config.yaml` configuration, allowing you to change which ports it listens on, if secure TLS connections are used, and MQTT is accepted over WebSockets: + +```yaml +mqtt: + network: + port: 1883 + securePort: 8883 # for TLS + webSocket: true # will also enable WS support through the default HTTP interface/port + mTLS: false + requireAuthentication: true +``` + +Note that if you are using WebSockets for MQTT, the sub-protocol should be set to "mqtt" (this is required by the MQTT specification, and should be included by any conformant client): `Sec-WebSocket-Protocol: mqtt`. mTLS is also supported by enabling it in the configuration and using the certificate authority from the TLS section of the configuration. See the [configuration documentation for more information](../deployments/configuration). + +#### Capabilities + +Harper's MQTT capabilities includes support for MQTT versions v3.1 and v5 with standard publish and subscription capabilities with multi-level topics, QoS 0 and 1 levels, and durable (non-clean) sessions. MQTT supports QoS 2 interaction, but doesn't guarantee exactly once delivery (although any guarantees of exactly once over unstable networks is a fictional aspiration). MQTT doesn't currently support last will, nor single-level wildcards (only multi-level wildcards). + +### Topics + +In MQTT, messages are published to, and subscribed from, topics. In Harper topics are aligned with resource endpoint paths in exactly the same way as the REST endpoints. If you define a table or resource in your schema, with a path/endpoint of "my-resource", that means that this can be addressed as a topic just like a URL path. So a topic of "my-resource/some-id" would correspond to the record in the my-resource table (or custom resource) with a record id of "some-id". + +This means that you can subscribe to "my-resource/some-id" and making this subscription means you will receive notification messages for any updates to this record. If this record is modified or deleted, a message will be sent to listeners of this subscription. + +The current value of this record is also treated as the "retained" message for this topic. When you subscribe to "my-resource/some-id", you will immediately receive the record for this id, through a "publish" command from the server, as the initial "retained" message that is first delivered. This provides a simple and effective way to get the current state of a record and future updates to that record without having to worry about timing issues of aligning a retrieval and subscription separately. + +Similarly, publishing a message to a "topic" also interacts with the database. Publishing a message with "retain" flag enabled is interpreted as an update or put to that record. The published message will replace the current record with the contents of the published message. + +If a message is published without a `retain` flag, the message will not alter the record at all, but will still be published to any subscribers to that record. + +Harper supports QoS 0 and 1 for publishing and subscribing. + +Harper supports multi-level topics, both for subscribing and publishing. Harper also supports multi-level wildcards, so you can subscribe to /`my-resource/#` to receive notifications for `my-resource/some-id` as well as `my-resource/nested/id`, or you can subscribe to `my-resource/nested/#` and receive the latter, but not the former, topic messages. Harper currently only supports trailing multi-level wildcards (no single-level wildcards with '\*'). + +#### Events +JavaScript components can also listen for MQTT events. This is available on the server.mqtt.events object. For example, to set up a listener/callback for when MQTT clients connect and authorize, we can do: + +```javascript +server.mqtt.events.on('connected', (session, socket) => { + console.log('client connected with id', session.clientId); +}); +``` +The following MQTT events are available: +* `connection` - When a client initially establishes a TCP or WS connection to the server +* `connected` - When a client establishes an authorized MQTT connection +* `auth-failed` - When a client fails to authenticate +* `disconnected` - When a client disconnects from the server + +### Ordering + +Harper is designed to be a distributed database, and an intrinsic characteristic of distributed servers is that messages may take different amounts of time to traverse the network and may arrive in a different order depending on server location and network topology. Harper is designed for distributed data with minimal latency, and so messages are delivered to subscribers immediately when they arrive, Harper does not delay messages for coordinating confirmation or consensus among other nodes, which would significantly increase latency, messages are delivered as quickly as possible. + +As an example, let's consider message #1 is published to node A, which then sends the message to node B and node C, but the message takes a while to get there. Slightly later, while the first message is still in transit, message #2 is published to node B, which then replicates it to A and C, and because of network conditions, message #2 arrives at node C before message #1. Because Harper prioritizes low latency, when node C receives message #2, it immediately publishes it to all its local subscribers (it has no knowledge that message #1 is in transit). + +When message #1 is received by node C, the behavior of what it does with this message is dependent on whether the message is a "retained" message (was published with a retain flag set to true, or was put/update/upsert/inserted into the database) or was a non-retained message. In the case of a non-retained message, this message will be delivered to all local subscribers (even though it had been published earlier), thereby prioritizing the delivery of every message. On the other hand, a retained message will not deliver the earlier out-of-order message to clients, and Harper will keep the message with the latest timestamp as the "winning" record state (and will be retained message for any subsequent subscriptions). Retained messages maintain (eventual) consistency across the entire cluster of servers, all nodes will converge to the same message as the being the latest and retained message (#2 in this case). + +Non-retained messages are generally a good choice for applications like chat, where every message needs to be delivered even if they might arrive out-of-order (the order may not be consistent across all servers). Retained messages can be thought of a "superseding" messages, and are a good fit for applications like instrument measurements like temperature readings, where the priority to provide the _latest_ temperature and older temperature readings are not important to publish after a new reading, and consistency of the most-recent record (across the network) is important. + +### WebSockets + +WebSockets are supported through the REST interface and go through the `connect(incomingMessages)` method on resources. By default, making a WebSockets connection to a URL will subscribe to the referenced resource. For example, making a WebSocket connection to `new WebSocket('wss:/server/my-resource/341')` will access the resource defined for 'my-resource' and the resource id of 341 and connect to it. On the web platform this could be: + +```javascript +let ws = new WebSocket('wss:/server/my-resource/341'); +ws.onmessage = (event) => { + / received a notification from the server + let data = JSON.parse(event.data); +}; +``` + +By default, the resources will make a subscription to that resource, monitoring any changes to the records or messages published to it, and will return events on the WebSockets connection. You can also override `connect(incomingMessages)` with your own handler. The `connect` method simply needs to return an iterable (asynchronous iterable) that represents the stream of messages to be sent to the client. One easy way to create an iterable stream is to define the `connect` method as a generator and `yield` messages as they become available. For example, a simple WebSockets echo server for a resource could be written: + +```javascript +export class Echo extends Resource { + async *connect(incomingMessages) { + for await (let message of incomingMessages) { / wait for each incoming message from the client + / and send the message back to the client + yield message; + } + } +``` + +You can also call the default `connect` and it will provide a convenient streaming iterable with events for the outgoing messages, with a `send` method that you can call to send messages on the iterable, and a `close` event for determining when the connection is closed. The incoming messages iterable is also an event emitter, and you can listen for `data` events to get the incoming messages using event style: + +```javascript +export class Example extends Resource { + connect(incomingMessages) { + let outgoingMessages = super.connect(); + let timer = setInterval(() => { + outgoingMessages.send({greeting: 'hi again!'}); + }, 1000); / send a message once a second + incomingMessages.on('data', (message) => { + / another way of echo-ing the data back to the client + outgoingMessages.send(message); + }); + outgoingMessages.on('close', () => { + / make sure we end the timer once the connection is closed + clearInterval(timer); + }); + return outgoingMessages; + } +``` + +### Server Sent Events + +Server Sent Events (SSE) are also supported through the REST server interface, and provide a simple and efficient mechanism for web-based applications to receive real-time updates. For consistency of push delivery, SSE connections go through the `connect()` method on resources, much like WebSockets. The primary difference is that `connect` is called without any `incomingMessages` argument, since SSE is a one-directional transport mechanism. This can be used much like WebSockets, specifying a resource URL path will connect to that resource, and by default provides a stream of messages for changes and messages for that resource. For example, you can connect to receive notification in a browser for a resource like: + +```javascript +let eventSource = new EventSource('https:/server/my-resource/341', { withCredentials: true }); +eventSource.onmessage = (event) => { + / received a notification from the server + let data = JSON.parse(event.data); +}; +``` + +### MQTT Feature Support Matrix + +| Feature | Support | +|--------------------------------------------------------------------|----------------------------------------------------------------| +| Connections, protocol negotiation, and acknowledgement with v3.1.1 | :heavy_check_mark: | +| Connections, protocol negotiation, and acknowledgement with v5 | :heavy_check_mark: | +| Secure MQTTS | :heavy_check_mark: | +| MQTTS over WebSockets | :heavy_check_mark: | +| MQTT authentication via user/pass | :heavy_check_mark: | +| MQTT authentication via mTLS | :heavy_check_mark: | +| Publish | :heavy_check_mark: | +| Subscribe | :heavy_check_mark: | +| Multi-level wildcard | :heavy_check_mark: | +| Single-level wildcard | :heavy_check_mark: | +| QoS 0 | :heavy_check_mark: | +| QoS 1 | :heavy_check_mark: | +| QoS 2 | Not fully supported, can perform conversation but does persist | +| Keep-Alive monitoring | :heavy_check_mark: | +| Clean session | :heavy_check_mark: | +| Durable session | :heavy_check_mark: | +| Distributed durable session | | +| Will | :heavy_check_mark: | +| MQTT V5 User properties | | +| MQTT V5 Will properties | | +| MQTT V5 Connection properties | | +| MQTT V5 Connection acknowledgement properties | | +| MQTT V5 Publish properties | | +| MQTT V5 Subscribe properties retain handling | :heavy_check_mark: | +| MQTT V5 Subscribe properties | | +| MQTT V5 Ack properties | | +| MQTT V5 AUTH command | | +| MQTT V5 Shared Subscriptions | | diff --git a/site/versioned_docs/version-4.5/developers/replication/index.md b/site/versioned_docs/version-4.5/developers/replication/index.md new file mode 100644 index 00000000..1d16ee62 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/replication/index.md @@ -0,0 +1,276 @@ +--- +title: Replication/Clustering +--- + +# Replication/Clustering + +Harper’s replication system is designed to make distributed data replication fast and reliable across multiple nodes. This means you can easily build a distributed database that ensures high availability, disaster recovery, and data localization. The best part? It’s simple to set up, configure, and manage. You can easily add or remove nodes, choose which data to replicate, and monitor the system’s health without jumping through hoops. + +### Replication Overview + +Harper replication uses a peer-to-peer model where every node in your cluster can send and subscribe to data. Each node connects through WebSockets, allowing data to flow seamlessly in both directions. By default, Harper takes care of managing these connections and subscriptions, so you don’t have to worry about data consistency. The system is designed to maintain secure, reliable connections between nodes, ensuring that your data is always safe. + +### Replication Configuration + +To connect your nodes, you need to provide hostnames or URLs for the nodes to connect to each other. This can be done via configuration or through operations. To configure replication, you can specify connection information the `replication` section of the [harperdb-config.yaml](../../deployments/configuration). Here, you can specify the host name of the current node, and routes to connect to other nodes, for example: + +```yaml +replication: + hostname: server-one + routes: + - server-two + - server-three +``` + +In this example, the current node is `server-one`, and it will connect to `server-two` and `server-three`. Routes to other nodes can also be configured with URLs or ports: + +```yaml +replication: + hostname: server-one + routes: + - wss:/server-two:9933 # URL based route + - hostname: server-three # define a hostname and port + port: 9933 +``` + +You can also use the [operations API](../operations-api/clustering) to dynamically add and remove nodes from the cluster. This is useful for adding new nodes to a running cluster or removing nodes that are no longer needed. For example (note this is the basic form, you would also need to provide the necessary credentials for the operation, see the section on securing connections for more details): + +```json +{ + "operation": "add_node", + "hostname": "server-two" +} + +``` + +These operations will also dynamically generating certificates as needed, if there are no existing signed certificates, or if the existing certificates are not valid for the new node. + +Harper will also automatically replicate node information to other nodes in a cluster ([gossip-style discovery](https:/highscalability.com/gossip-protocol-explained/)). This means that you only need to connect to one node in an existing cluster, and Harper will automatically detect and connect to other nodes in the cluster (bidirectionally). + +By default, Harper will replicate all the data in all the databases. You can configure which databases are replicated, and then override this behavior on a per-table basis. For example, you can indicate which databases should be replicated by default, here indicating you want to replicate the `data` and `system` databases: + +```yaml +replication: + databases: + - data + - system +``` + +By default, all tables within a replicated database will be replicated. Transactions are replicated atomically, which may involve data across multiple tables. However, you can also configure replication for individual tables, and disable and exclude replication for specific tables in a database by setting `replicate` to `false` in the table definition: + +```graphql +type LocalTableForNode @table(replicate: false) { + id: ID! + name: String! +} +``` + +You can also control which nodes data is replicated to, and how many nodes data is replicated to. By default, Harper will replicate data to all nodes in the cluster, but you can control where data is replicated to with the [sharding configuration and APIs](./sharding). + +By default, replication connects to the secure port 9933. You can configure the replication port in the `replication` section. + +```yaml +replication: + securePort: 9933 +``` + +### Securing Connections + +Harper supports the highest levels of security through public key infrastructure based security and authorization. Depending on your security configuration, you can configure Harper in several different ways to build a connected cluster. + +#### Provide your own certificates + +If you want to secure your Harper connections with your own signed certificates, you can easily do so. Whether you have certificates from a public authority (like Let's Encrypt or Digicert) or a corporate certificate authority, you can use them to authenticate nodes securely. You can then allow nodes to authorize each other by checking the certificate against the standard list of root certificate authorities by enabling the `enableRootCAs` option in the config: +``` +replication + enableRootCAs: true +``` + +And then just make sure the certificate’s common name (CN) matches the node's hostname. + +#### Setting Up Custom Certificates + +There are two ways to configure Harper with your own certificates: + +1. Use the `add_certificate` operation to upload them. +1. Or, specify the certificate paths directly in the `replication` section of the `harperdb-config.yaml` file. + +If your certificate is signed by a trusted public authority, just provide the path to the certificate and private key. If you're using self-signed certificates or a private certificate authority, you’ll also need to provide the certificate authority (CA) details to complete the setup.\ +\ +Example configuration: + +```yaml +tls: + certificate: /path/to/certificate.pem + certificateAuthority: /path/to/ca.pem + privateKey: /path/to/privateKey.pem +``` + +With this in place, Harper will load the provided certificates into the certificate table and use these to secure and authenticate connections between nodes. + +You have the option to skip providing a specific certificate authority (CA) and instead verify your certificate against the root certificates included in the bundled Mozilla CA store. This bundled CA store, provided by Node.js, is a snapshot of Mozilla's CA certificates that is fixed at the time of each Node.js release. + +To enable the root certificates set `replication.enableRootCAs` to `true` in the `harperdb-config.yaml` file: + +```yaml +replication: + enableRootCAs: true +``` + +#### Cross-generated certificates + +Harper can also generate its own certificates for secure connections. This is useful for setting up secure connections between nodes when no existing certificates are available, and can be used in development, testing, or production environments. Certificates will be automatically requested and signed between nodes to support a form of distributed certificate generation and signing. To establish secure connections between nodes using cross-generated certificates, you simply use the [`add_node` operation](../operations-api/clustering) over SSL, and specify the temporary authentication credentials to use for connecting and authorizing the certificate generation and signing. \ +\ +Example configuration: + +```json +{ + "operation": "add_node", + "hostname": "server-two", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password" + } +} +``` + +When you connect to another node (e.g., `server-two`), Harper uses secure WebSockets and the provided credentials to establish the connection. + +If you’re working with a fresh install, you’ll need to set `verify_tls` to `false` temporarily, so the self-signed certificate is accepted. Once the connection is made, Harper will automatically handle the certificate signing process: + +* It creates a certificate signing request (CSR), sends it to `server-two`, which then signs it and returns the signed certificate along with the certificate authority (CA). +* The signed certificate is stored for future connections between the nodes, ensuring secure communication. + +**Important:** Your credentials are not stored—they are discarded immediately after use. + +You can also provide credentials in HTTP Authorization format (Basic auth, Token auth, or JWT). This is helpful for handling authentication with the required permissions to generate and sign certificates. + +Additionally, you can use `set_node` as an alias for the `add_node` operation if you prefer. + +#### Revoking Certificates + +Certificates used in replication can be revoked by using the certificate serial number and either the `revoked_certificates` attribute in the `hdb_nodes` system table or route config in `harperdb-config.yaml`. + +To utilize the `revoked_certificates` attribute in the `hdb_nodes` table, you can use the `add_node` or `update_node` operation to add the certificate serial number to the `revoked_certificates` array. For example: + +```json +{ + "operation": "update_node", + "hostname": "server-two", + "revoked_certificates": ["1769F7D6A"] +} +``` + +To utilize the replication route config in `harperdb-config.yaml`, you can add the certificate serial number to the `revokedCertificates` array. For example: + +```yaml +replication: + routes: + - hostname: server-three + port: 9930 + revokedCertificates: + - 1769F7D6A + - QA69C7E2S +``` + +#### Removing Nodes + +Nodes can be removed from the cluster using the [`remove_node` operation](../operations-api/clustering). This will remove the node from the cluster, and stop replication to and from the node. For example: + +```json +{ + "operation": "remove_node", + "hostname": "server-two" +} +``` + +#### Insecure Connection IP-based Authentication + +You can completely disable secure connections and use IP addresses to authenticate nodes with each other. This can be useful for development and testing, or within a secure private network, but should never be used for production with publicly accessible servers. To disable secure connections, simply configure replication within an insecure port, either by [configuring the operations API](../../deployments/configuration) to run on an insecure port or replication to run on an insecure port. And then set up IP-based routes to connect to other nodes: + +```yaml +replication: + port: 9933 + routes: + - 127.0.0.2 + - 127.0.0.3 +``` + +Note that in this example, we are using loop back addresses, which can be a convenient way to run multiple nodes on a single machine for testing and development. + +#### Explicit Subscriptions + +#### Managing Node Connections and Subscriptions in Harper + +By default, Harper automatically handles connections and subscriptions between nodes, ensuring data consistency across your cluster. It even uses data routing to manage node failures. But if you want more control, you can manage these connections manually by explicitly subscribing to nodes. This is useful for advanced configurations, testing, or debugging. + +#### Important Notes on Explicit Subscriptions + +If you choose to manage subscriptions manually, Harper will no longer handle data consistency for you. This means there’s no guarantee that all nodes will have consistent data if subscriptions don’t fully replicate in all directions. If a node goes down, it’s possible that some data wasn’t replicated before the failure. + +#### How to Subscribe to Nodes + +To explicitly subscribe to a node, you can use operations like `add_node` and define the subscriptions. For example, you can configure a node (e.g., `server-two`) to publish transactions on a specific table (e.g., `dev.my-table`) without receiving data from that node. + +Example configuration: + +```json +{ + "operation": "add_node", + "hostname": "server-two", + "subscriptions": [{ + "database": "dev", + "table": "my-table", + "publish": true, + "subscribe": false + }] +} +``` + +To update an explicit subscription you can use the [`update_node` operation](../operations-api/clustering). + +Here we are updating the subscription to receive transactions on the `dev.my-table` table from the `server-two` node. + +```json +{ + "operation": "update_node", + "hostname": "server-two", + "subscriptions": [{ + "database": "dev", + "table": "my-table", + "publish": true, + "subscribe": true + }] +} +``` + +#### Monitoring Replication + +You can monitor the status of replication through the operations API. You can use the [`cluster_status` operation](../operations-api/clustering) to get the status of replication. For example: + +```json +{ + "operation": "cluster_status" +} +``` + +#### Database Initial Synchronization and Resynchronization + +When a new node is added to the cluster, if its database has not previously been synced, it will initially download the database from the first node it connects to. This will copy every record from the source database to the new node. Once the initial synchronization is complete, the new node will enter replication mode and receive records from each node as they are created, updated, or deleted. If a node goes down and comes back up, it will also resynchronize with the other nodes in the cluster, to ensure that it has the most up-to-date data. + +You may also specify a `start_time` in the `add_node` to specify that when a database connects, that it should not download the entire database, but only data since a given starting time. + +**Advanced Configuration** + +You can also check the configuration of the replication system, including the current known nodes and certificates, by querying the hdb\_nodes and hdb\_certificate table: + +```json +{ + "operation": "search_by_value", + "database": "system", + "table": "hdb_nodes", + "search_attribute": "name", + "search_value": "*" +} +``` diff --git a/site/versioned_docs/version-4.5/developers/replication/sharding.md b/site/versioned_docs/version-4.5/developers/replication/sharding.md new file mode 100644 index 00000000..c9d747ff --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/replication/sharding.md @@ -0,0 +1,135 @@ +--- +title: Sharding +--- + +Harper's replication system supports various levels of replication or sharding. Harper can be configured or set up to replicate to different data to different subsets of nodes. This can be used facilitate horizontally scalability of storage and write performance, while maintaining optimal strategies of data locality and data consistency. When sharding is configured, Harper will replicate data to only a subset of nodes, based on the sharding configuration, and can then retrieve data from the appropriate nodes as needed to fulfill requests for data. + +There are two main ways to setup sharding in Harper. The approach is to use dynamic sharding, where the location or residency of records is determined dynamically based on where the record was written and record data, and records can be dynamically relocated based on where they are accessed. This residency information can be specific to each record, and can vary based on the computed residency and where the data is written and accessed. + +The second approach is define specific shards, where each node is assigned to a specific shard, and each record is replicated to the nodes in that shard based on the primary key, regardless of where the data was written or accessed, or content. This approach is more static, but can be more efficient for certain use cases, and means that the location of data can always be predictably determined based on the primary key. + +## Configuration For Dynamic Sharding +By default, Harper will replicate all data to all nodes. However, replication can easily be configured for "sharding", or storing different data in different locations or nodes. The simplest way to configure sharding and limit replication to improve performance and efficiency is to configure a replication-to count. This will limit the number of nodes that data is replicated to. For example, to specify that writes should replicate to 2 other nodes besides the node that first stored the data, you can set the `replicateTo` to 2 in the `replication` section of the `harperdb-config.yaml` file: +```yaml +replication: + replicateTo: 2 +``` +This will ensure that data is replicated to two other nodes, so that each record will be stored on three nodes in total. + +With a sharding configuration (or customization below) in place, requests will for records that don't reside on the server handling requests will automatically be forwarded to the appropriate node. This will be done transparently, so that the client will not need to know where the data is stored. + +## Replication Control with Headers +With the REST interface, replication levels and destinations can also specified with the `X-Replicate-To` header. This can be used to indicate the number of additional nodes that data should be replicated to, or to specify the nodes that data should be replicated to. The `X-Replicate-To` header can be used with the `POST` and `PUT` methods. This header can also specify if the response should wait for confirmation from other nodes, and how many, with the `confirm` parameter. For example, to specify that data should be replicated to two other nodes, and the response should be returned once confirmation is received from one other node, you can use the following header: +```http +PUT /MyTable/3 +X-Replicate-To: 2;confirm=1 + +... +``` + +You can also explicitly specify destination nodes by providing a comma-separated list of node hostnames. For example, to specify that data should be replicated to nodes `node1` and `node2`, you can use the following header: +```http +PUT /MyTable/3 +X-Replicate-To: node1,node2 +``` +(This can also be used with the `confirm` parameter.) + +## Replication Control with Operations +Likewise, you can specify replicateTo and confirm parameters in the operation object when using the Harper API. For example, to specify that data should be replicated to two other nodes, and the response should be returned once confirmation is received from one other node, you can use the following operation object: +```json +{ + "operation": "update", + "schema": "dev", + "table": "MyTable", + "hashValues": [3], + "record": { + "name": "John Doe" + }, + "replicateTo": 2, + "replicatedConfirmation": 1 +} +``` +or you can specify nodes: +```json +..., + "replicateTo": ["node-1", "node-2"] +... +``` +## Programmatic Replication Control +Additionally, you can specify `replicateTo` and `replicatedConfirmation` parameters programmatically in the context of a resource. For example, you can define a put method: +```javascript +class MyTable extends tables.MyTable { + put(record) { + const context = this.getContext(); + context.replicateTo = 2; / or an array of node names + context.replicatedConfirmation = 1; + return super.put(record); + } +} +``` + +## Configuration for Static Sharding +Alternatively, you can configure static sharding, where each node is assigned to a specific shard, and each record is replicated to the nodes in that shard based on the primary key. The `shard` is identified by a number. To configure the shard for each node, you can specify the shard number in the `replication`'s `shard` in the configuration: +```yaml +replication: + shard: 1 +``` +Alternatively, you can configure the `shard` under the `replication` `routes`. This allows you to assign a specific shard id based on the routing configuration. +```yaml +replication: + routes: + - hostname: node1 + shard: 1 + - hostname: node2 + shard: 2 +``` +Or you can specify a `shard` number by including that property in an `add_node` operation or `set_node` operation, to dynamically assign a node to a shard. + +You can then specify shard number in the `setResidency` or `setResidencyById` functions below. + +## Custom Sharding +You can also define a custom sharding strategy by specifying a function to compute the "residency" or location of where records should be stored and reside. To do this we use the `setResidency` method, providing a function that will determine the residency of each record. The function you provide will be called with the record entry, and should return an array of nodes that the record should be replicated to (using their hostname). For example, to shard records based on the value of the `id` field, you can use the following code: +```javascript +MyTable.setResidency((record) => { + return record.id % 2 === 0 ? ['node1'] : ['node2']; +}); +``` +With this approach, the record metadata, which includes the residency information, and any indexed properties, will be replicated to all nodes, but the full record will only be replicated to the nodes specified by the residency function. + +The `setResidency` function can alternately return a shard number, which will replicate the data to all the nodes in that shard: +```javascript +MyTable.setResidency((record) => { + return record.id % 2 === 0 ? 1 : 2; +}); +``` + +### Custom Sharding By Primary Key +Alternately you can define a custom sharding strategy based on the primary key alone. This allows records to be retrieved without needing access to the record data or metadata. With this approach, data will only be replicated to the nodes specified by the residency function (the record metadata doesn't need to replicated to all nodes). To do this, you can use the `setResidencyById` method, providing a function that will determine the residency or shard of each record based on the primary key. The function you provide will be called with the primary key, and should return a `shard` number or an array of nodes that the record should be replicated to (using their hostname). For example, to shard records based on the value of the primary key, you can use the following code: + +```javascript +MyTable.setResidencyById((id) => { + return id % 2 === 0 ? 1 : 2; / return shard number +}); +``` +or +```javascript +MyTable.setResidencyById((id) => { + return id % 2 === 0 ? ['node1'] : ['node2']; / return array of node hostnames +}); +``` + +### Disabling Cross-Node Access +Normally sharding allows data to be stored in specific nodes, but still allows access to the data from any node. However, you can also disable cross-node access so that data is only returned if is stored on the node where it is accessed. To do this, you can set the `replicateFrom` property on the context of operation to `false`: +```json +{ + "operation": "search_by_id", + "table": "MyTable", + "ids": [3], + "replicateFrom": false +} +``` +Or use a header with the REST API: +```http +GET /MyTable/3 +X-Replicate-From: none +``` diff --git a/site/versioned_docs/version-4.5/developers/rest.md b/site/versioned_docs/version-4.5/developers/rest.md new file mode 100644 index 00000000..753d1fc4 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/rest.md @@ -0,0 +1,404 @@ +--- +title: REST +--- + +# REST + +## REST + +Harper provides a powerful, efficient, and standard-compliant HTTP REST interface for interacting with tables and other resources. The REST interface is the recommended interface for data access, querying, and manipulation (for HTTP interactions), providing the best performance and HTTP interoperability with different clients. + +Resources, including tables, can be configured as RESTful endpoints. Make sure you review the [application introduction](./applications/) and [defining schemas](./applications/defining-schemas) to properly define your schemas and select which tables are exported and available through REST interface, as tables are not exported by default. The name of the [exported](./applications/defining-schemas#export) resource defines the basis of the endpoint path available at the application HTTP server port [configured here](../deployments/configuration#http) (the default being `9926`). From there, a record id or query can be appended. Following uniform interface principles, HTTP methods define different actions with resources. For each method, this describes the default action. + +The default path structure provides access to resources at several levels: + +* `/my-resource` - The root path of a resource usually has a description of the resource (like a describe operation for a table). +* `/my-resource/` - The trailing slash in a path indicates it is a collection of the records. The root collection for a table represents all the records in a table, and usually you will append query parameters to query and search for more specific records. +* `/my-resource/record-id` - This resource locator represents a specific record, referenced by its id. This is typically how you can retrieve, update, and delete individual records. +* `/my-resource/record-id/` - Again, a trailing slash indicates a collection; here it is the collection of the records that begin with the specified id prefix. +* `/my-resource/record-id/with/multiple/parts` - A record id can consist of multiple path segments. + +### GET + +These can be used to retrieve individual records or perform searches. This is handled by the Resource method `get()` (and can be overridden). + +#### `GET /my-resource/` + +This can be used to retrieve a record by its primary key. The response will include the record as the body. + +**Caching/Conditional Requests** + +A `GET` response for a record will include an encoded version, a timestamp of the last modification, of this record in the `ETag` request headers (or any accessed record when used in a custom get method). On subsequent requests, a client (that has a cached copy) may include an `If-None-Match` request header with this tag. If the record has not been updated since this date, the response will have a 304 status and no body. This facilitates significant performance gains since the response data doesn't need to be serialized and transferred over the network. + +#### `GET /my-resource/?property=value` + +This can be used to search for records by the specified property name and value. See the querying section for more information. + +#### `GET /my-resource/.property` + +This can be used to retrieve the specified property of the specified record. Note that this will only work for properties that are declared in the schema. + +### PUT + +This can be used to create or update a record with the provided object/data (similar to an "upsert") with a specified key. This is handled by the Resource method `put(record)`. + +#### `PUT /my-resource/` + +This will create or update the record with the URL path that maps to the record's primary key. The record will be replaced with the contents of the data in the request body. The new record will exactly match the data that was sent (this will remove any properties that were present in the previous record and not included in the body). Future GETs will return the exact data that was provided by PUT (what you PUT is what you GET). For example: + +```http +PUT /MyTable/123 +Content-Type: application/json + +{ "name": "some data" } +``` + +This will create or replace the record with a primary key of "123" with the object defined by the JSON in the body. This is handled by the Resource method `put()`. + +### DELETE + +This can be used to delete a record or records. + +### `DELETE /my-resource/` + +This will delete a record with the given primary key. This is handled by the Resource's `delete` method. For example: + +```http +DELETE /MyTable/123 +``` + +This will delete the record with the primary key of "123". + +### `DELETE /my-resource/?property=value` + +This will delete all the records that match the provided query. + +### POST + +Generally the POST method can be used for custom actions since POST has the broadest semantics. For tables that are expost\ed as endpoints, this also can be used to create new records. + +#### `POST /my-resource/` + +This is handled by the Resource method `post(data)`, which is a good method to extend to make various other types of modifications. Also, with a table you can create a new record without specifying a primary key, for example: + +````http +````http +POST /MyTable/ +Content-Type: application/json + +`{ "name": "some data" }` +```` + +This will create a new record, auto-assigning a primary key, which will be returned in the `Location` header. + +### Querying through URL query parameters + +URL query parameters provide a powerful language for specifying database queries in Harper. This can be used to search by a single attribute name and value, to find all records which provide value for the given property/attribute. It is important to note that this attribute must be configured to be indexed to search on it. For example: + +````http +GET /my-resource/?property=value +``` + +We can specify multiple properties that must match: + +```http +GET /my-resource/?property=value&property2=another-value +``` + +Note that only one of the attributes needs to be indexed for this query to execute. + +We can also specify different comparators such as less than and greater than queries using [FIQL](https:/datatracker.ietf.org/doc/html/draft-nottingham-atompub-fiql-00) syntax. If we want to specify records with an `age` value greater than 20: + +```http +GET /my-resource/?age=gt=20 +``` + +Or less than or equal to 20: + +```http +GET /my-resource/?age=le=20 +``` + +The comparison operators include standard FIQL operators, `lt` (less than), `le` (less than or equal), `gt` (greater than), `ge` (greater than or equal), and `ne` (not equal). These comparison operators can also be combined with other query parameters with `&`. For example, if we wanted products with a category of software and price between 100 and 200, we could write: + +```http +GET /Product/?category=software&price=gt=100&price=lt=200 +``` + +Comparison operators can also be used on Date fields, however, we have to ensure that the date format is properly escaped. For example, if we are looking for a listing date greater than `2017-03-08T09:00:00.000Z` we must escape the colons as `%3A`: + +``` +GET /Product/?listDate=gt=2017-03-08T09%3A30%3A00.000Z +``` + +You can also search for attributes that start with a specific string, by using the == comparator and appending a `*` to the attribute value: + +```http +GET /Product/?name==Keyboard* +``` + +**Chained Conditions** + +You can also specify that a range condition must be met for a single attribute value by chaining conditions. This is done by omitting the name in the name-value pair. For example, to find products with a price between 100 and 200, you could write: + +```http +GET /Product/?price=gt=100<=200 +``` + +Chaining can be used to combined `gt` or `ge` with `lt` or `le` to specify a range of values. Currently, no other types of chaining are supported. + +Note that some HTTP clients may be overly aggressive in encoding query parameters, and you may need to disable extra encoding of query parameters, to ensure operators are passed through without manipulation. + +Here is a full list of the supported FIQL-style operators/comparators: + +* `==`: equal +* `=lt=`: less than +* `=le=`: less than or equal +* `=gt=`: greater than +* `=ge=`: greater than or equal +* `=ne=`, !=: not equal +* `=ct=`: contains the value (for strings) +* `=sw=`, `==*`: starts with the value (for strings) +* `=ew=`: ends with the value (for strings) +* `=`, `===`: strict equality (no type conversion) +* `!==`: strict inequality (no type conversion) + +#### Unions + +Conditions can also be applied with `OR` logic, returning the union of records that match either condition. This can be specified by using the `|` operator instead of `&`. For example, to return any product a rating of `5` _or_ a `featured` attribute that is `true`, we could write: + +```http +GET /Product/?rating=5|featured=true +``` + +#### Grouping of Operators + +Multiple conditions with different operators can be combined with grouping of conditions to indicate the order of operation. Grouping conditions can be done with parenthesis, with standard grouping conventions as used in query and mathematical expressions. For example, a query to find products with a rating of 5 OR a price between 100 and 200 could be written: + +```http +GET /Product/?rating=5|(price=gt=100&price=lt=200) +``` + +Grouping conditions can also be done with square brackets, which function the same as parenthesis for grouping conditions. The advantage of using square brackets is that you can include user provided values that might have parenthesis in them, and use standard URI component encoding functionality, which will safely escape/encode square brackets, but not parenthesis. For example, if we were constructing a query for products with a rating of a 5 and matching one of a set of user provided tags, a query could be built like: + +```http +GET /Product/?rating=5&[tag=fast|tag=scalable|tag=efficient] +``` + +And the tags could be safely generated from user inputs in a tag array like: + +```javascript +let url = `/Product/?rating=5[${tags.map(encodeURIComponent).join('|')}]` +``` + +More complex queries can be created by further nesting groups: + +```http +GET /Product/?price=lt=100|[rating=5&[tag=fast|tag=scalable|tag=efficient]&inStock=true] +``` + +### Query Calls + +Harper has several special query functions that use "call" syntax. These can be included in the query string as its own query entry (separated from other query conditions with an `&`). These include: + +#### `select(properties)` + +This function allows you to specify which properties should be included in the responses. This takes several forms: + +* `?select(property)`: This will return the values of the specified property directly in the response (will not be put in an object). +* `?select(property1,property2)`: This returns the records as objects, but limited to the specified properties. +* `?select([property1,property2,...])`: This returns the records as arrays of the property values in the specified properties. +* `?select(property1,)`: This can be used to specify that objects should be returned with the single specified property. +* `?select(property{subProperty1,subProperty2{subSubProperty,..}},...)`: This can be used to specify which sub-properties should be included in nested objects and joined/references records. + +To get a list of product names with a category of software: + +```http +GET /Product/?category=software&select(name) +``` + +#### `limit(start,end)` or `limit(end)` + +This function specifies a limit on the number of records returned, optionally providing a starting offset. + +For example, to find the first twenty records with a `rating` greater than 3, `inStock` equal to true, only returning the `rating` and `name` properties, you could use: + +```http +GET /Product/?rating=gt=3&inStock=true&select(rating,name)&limit(20) +``` + +#### `sort(property)`, `sort(+property,-property,...)` + +This function allows you to indicate the sort order for the returned results. The argument for `sort()` is one or more properties that should be used to sort. If the property is prefixed with '+' or no prefix, the sort will be performed in ascending order by the indicated attribute/property. If the property is prefixed with '-', it will be sorted in descending order. If the multiple properties are specified, the sort will be performed on the first property, and for records with the same value for that property, the next property will be used to break the tie and sort results. This tie breaking will continue through any provided properties. + +For example, to sort by product name (in ascending order): + +```http +GET /Product?rating=gt=3&sort(+name) +``` + +To sort by rating in ascending order, then by price in descending order for products with the same rating: + +```http +GET /Product?sort(+rating,-price) +``` + +## Relationships + +Harper supports relationships in its data models, allowing for tables to define a relationship with data from other tables (or even itself) through foreign keys. These relationships can be one-to-many, many-to-one, or many-to-many (and even with ordered relationships). These relationships are defined in the schema, and then can easily be queried through chained attributes that act as "join" queries, allowing related attributes to referenced in conditions and selected for returned results. + +### Chained Attributes and Joins + +To support relationships and hierarchical data structures, in addition to querying on top-level attributes, you can also query on chained attributes. Most importantly, this provides Harper's "join" functionality, allowing related tables to be queried and joined in the results. Chained properties are specified by using dot syntax. In order to effectively leverage join functionality, you need to define a relationship in your schema: + +```graphql +type Product @table @export { + id: ID @primaryKey + name: String + brandId: ID @indexed + brand: Brand @relationship(from: "brandId") +} +type Brand @table @export { + id: ID @primaryKey + name: String + products: [Product] @relationship(to: "brandId") +} +``` + +And then you could query a product by brand name: + +```http +GET /Product/?brand.name=Microsoft +``` + +This will query for products for which the `brandId` references a `Brand` record with a `name` of `"Microsoft"`. + +The `brand` attribute in `Product` is a "computed" attribute from the foreign key (`brandId`), for the many-to-one relationship to the `Brand`. In the schema above, we also defined the reverse one-to-many relationship from a `Brand` to a `Product`, and we could likewise query that: + +```http +GET /Brand/?products.name=Keyboard +``` + +This would return any `Brand` with at least one product with a name `"Keyboard"`. Note, that both of these queries are effectively acting as an "INNER JOIN". + +#### Chained/Nested Select + +Computed relationship attributes are not included by default in query results. However, we can include them by specifying them in a select: + +```http +GET /Product/?brand.name=Microsoft&select(name,brand) +``` + +We can also do a "nested" select and specify which sub-attributes to include. For example, if we only wanted to include the name property from the brand, we could do so: + +```http +GET /Product/?brand.name=Microsoft&select(name,brand{name}) +``` + +Or to specify multiple sub-attributes, we can comma delimit them. Note that selects can "join" to another table without any constraint/filter on the related/joined table: + +```http +GET /Product/?name=Keyboard&select(name,brand{name,id}) +``` + +When selecting properties from a related table without any constraints on the related table, this effectively acts like a "LEFT JOIN" and will omit the `brand` property if the brandId is `null` or references a non-existent brand. + +#### Many-to-many Relationships (Array of Foreign Keys) + +Many-to-many relationships are also supported, and can easily be created using an array of foreign key values, without requiring the traditional use of a junction table. This can be done by simply creating a relationship on an array-typed property that references a local array of foreign keys. For example, we could create a relationship to the resellers of a product (each product can have multiple resellers, each ) + +```graphql +type Product @table @export { + id: ID @primaryKey + name: String + resellerIds: [ID] @indexed + resellers: [Reseller] @relationship(from: "resellerId") +} +type Reseller @table { + id: ID @primaryKey + name: String + ... +} +``` + +The product record can then hold an array of the reseller ids. When the `reseller` property is accessed (either through code or through select, conditions), the array of ids is resolved to an array of reseller records. We can also query through the resellers relationships like with the other relationships. For example, to query the products that are available through the "Cool Shop": + +```http +GET /Product/?resellers.name=Cool Shop&select(id,name,resellers{name,id}) +``` + +One of the benefits of using an array of foreign key values is that the this can be manipulated using standard array methods (in JavaScript), and the array can dictate an order to keys and therefore to the resulting records. For example, you may wish to define a specific order to the resellers and how they are listed (which comes first, last): + +```http +PUT /Product/123 +Content-Type: application/json + +{ "id": "123", "resellerIds": ["first-reseller-id", "second-reseller-id", "last-reseller-id"], +...} +``` + +#### Type Conversion + +Queries parameters are simply text, so there are several features for converting parameter values to properly typed values for performing correct searches. For the FIQL comparators, which includes `==`, `!=`, `=gt=`, `=lt=`, `=ge=`, `=gt=`, the parser will perform type conversion, according to the following rules: + +* `name==null`: Will convert the value to `null` for searching. +* `name==123`: Will convert the value to a number _if_ the attribute is untyped (there is no type specified in a GraphQL schema, or the type is specified to be `Any`). +* `name==true`: Will convert the value to a boolean _if_ the attribute is untyped (there is no type specified in a GraphQL schema, or the type is specified to be `Any`). +* `name==number:123`: Will explicitly convert the value after "number:" to a number. +* `name==boolean:true`: Will explicitly convert the value after "boolean:" to a boolean. +* `name==string:some%20text`: Will explicitly keep the value after "string:" as a string (and perform URL component decoding) +* `name==date:2024-01-05T20%3A07%3A27.955Z`: Will explicitly convert the value after "date:" to a Date object. + +If the attribute specifies a type (like `Float`) in the schema definition, the value will always be converted to the specified type before searching. + +For "strict" operators, which includes `=`, `===`, and `!==`, no automatic type conversion will be applied, the value will be decoded as string with URL component decoding, and have type conversion applied if the attribute specifies a type, in which case the attribute type will specify the type conversion. + +#### Content Types and Negotiation + +HTTP defines a couple of headers for indicating the (preferred) content type of the request and response. The `Content-Type` request header can be used to specify the content type of the request body (for PUT, PATCH, and POST). The `Accept` request header indicates the preferred content type of the response. For general records with object structures, Harper supports the following content types: `application/json` - Common format, easy to read, with great tooling support. `application/cbor` - Recommended binary format for optimal encoding efficiency and performance. `application/x-msgpack` - This is also an efficient format, but CBOR is preferable, as it has better streaming capabilities and faster time-to-first-byte. `text/csv` - CSV, lacks explicit typing, not well suited for heterogeneous data structures, but good for moving data to and from a spreadsheet. + +CBOR is generally the most efficient and powerful encoding format, with the best performance, most compact encoding, and most expansive ability to encode different data types like Dates, Maps, and Sets. MessagePack is very similar and tends to have broader adoption. However, JSON can be easier to work with and may have better tooling. Also, if you are using compression for data transfer (gzip or brotli), JSON will often result in more compact compressed data due to character frequencies that better align with Huffman coding, making JSON a good choice for web applications that do not require specific data types beyond the standard JSON types. + +Requesting a specific content type can also be done in a URL by suffixing the path with extension for the content type. If you want to retrieve a record in CSV format, you could request: + +```http +GET /product/some-id.csv +``` + +Or you could request a query response in MessagePack: + +```http +GET /product/.msgpack?category=software +``` + +However, generally it is not recommended that you use extensions in paths and it is best practice to use the `Accept` header to specify acceptable content types. + +#### Specific Content Objects + +You can specify other content types, and the data will be stored as a record or object that holds the type and contents of the data. For example, if you do: + +``` +PUT /my-resource/33 +Content-Type: text/calendar + +BEGIN:VCALENDAR +VERSION:2.0 +... +``` + +This would store a record equivalent to JSON: + +``` +{ "contentType": "text/calendar", data: "BEGIN:VCALENDAR\nVERSION:2.0\n... +``` + +Retrieving a record with `contentType` and `data` properties will likewise return a response with the specified `Content-Type` and body. If the `Content-Type` is not of the `text` family, the data will be treated as binary data (a Node.js `Buffer`). + +You can also use `application/octet-stream` to indicate that the request body should be preserved in binary form. This also useful for uploading to a specific property: + +``` +PUT /my-resource/33/image +Content-Type: image/gif + +...image data... +``` diff --git a/site/versioned_docs/version-4.5/developers/security/basic-auth.md b/site/versioned_docs/version-4.5/developers/security/basic-auth.md new file mode 100644 index 00000000..6736f2c8 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/security/basic-auth.md @@ -0,0 +1,62 @@ +--- +title: Basic Authentication +--- + +# Basic Authentication + +Harper uses Basic Auth and JSON Web Tokens (JWTs) to secure our HTTP requests. In the context of an HTTP transaction, **basic access authentication** is a method for an HTTP user agent to provide a username and password when making a request. + +** _**You do not need to log in separately. Basic Auth is added to each HTTP request like create\_database, create\_table, insert etc… via headers.**_ ** + +A header is added to each HTTP request. The header key is **“Authorization”** the header value is **“Basic <<your username and password buffer token>>”** + +## Authentication in Harper Studio + +In the below code sample, you can see where we add the authorization header to the request. This needs to be added for each and every HTTP request for Harper. + +_Note: This function uses btoa. Learn about_ [_btoa here_](https:/developer.mozilla.org/en-US/docs/Web/API/btoa)_._ + +```javascript +function callHarperDB(call_object, operation, callback){ + + const options = { + "method": "POST", + "hostname": call_object.endpoint_url, + "port": call_object.endpoint_port, + "path": "/", + "headers": { + "content-type": "application/json", + "authorization": "Basic " + btoa(call_object.username + ':' + call_object.password), + "cache-control": "no-cache" + + } + }; + + const http_req = http.request(options, function (hdb_res) { + let chunks = []; + + hdb_res.on("data", function (chunk) { + chunks.push(chunk); + }); + + hdb_res.on("end", function () { + const body = Buffer.concat(chunks); + if (isJson(body)) { + return callback(null, JSON.parse(body)); + } else { + return callback(body, null); + + } + + }); + }); + + http_req.on("error", function (chunk) { + return callback("Failed to connect", null); + }); + + http_req.write(JSON.stringify(operation)); + http_req.end(); + +} +``` diff --git a/site/versioned_docs/version-4.5/developers/security/certificate-management.md b/site/versioned_docs/version-4.5/developers/security/certificate-management.md new file mode 100644 index 00000000..fdc8cc22 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/security/certificate-management.md @@ -0,0 +1,74 @@ +--- +title: Certificate Management +--- + +# Certificate Management + +This document is information on managing certificates for Harper external facing APIs. For information on certificate management for clustering see [clustering certificate management](../clustering/certificate-management). + +## Development + +An out of the box install of Harper does not have HTTPS enabled (see [configuration](../../deployments/configuration#http) for relevant configuration file settings.) This is great for local development. If you are developing using a remote server and your requests are traversing the Internet, we recommend that you enable HTTPS. + +To enable HTTPS, set `http.securePort` in `harperdb-config.yaml` to the port you wish to use for HTTPS connections and restart Harper. + +By default Harper will generate certificates and place them at `/keys/`. These certificates will not have a valid Common Name (CN) for your Harper node, so you will be able to use HTTPS, but your HTTPS client must be configured to accept the invalid certificate. + +## Production + +For production deployments, in addition to using HTTPS, we recommend using your own certificate authority (CA) or a public CA such as Let's Encrypt, to generate certificates with CNs that match the Fully Qualified Domain Name (FQDN) of your Harper node. + +We have a few recommended options for enabling HTTPS in a production setting. + +### Option: Enable Harper HTTPS and Replace Certificates + +To enable HTTPS, set `http.securePort` in `harperdb-config.yaml` to the port you wish to use for HTTPS connections and restart Harper. + +To replace the certificates, either replace the contents of the existing certificate files at `/keys/`, or update the Harper configuration with the path of your new certificate files, and then restart Harper. + +```yaml +tls: + certificate: ~/hdb/keys/certificate.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`operationsApi.tls` configuration is optional. If it is not set Harper will default to the values in the `tls` section. + +```yaml +operationsApi: + tls: + certificate: ~/hdb/keys/certificate.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +### mTLS + +Mutual TLS (mTLS) is a security protocol that requires both the client and the server to present certificates to each other. Requiring a client certificate can be useful for authenticating clients and ensuring that only authorized clients can access your Harper instance. This can be enabled by setting the `http.mtls` configuration in `harperdb-config.yaml` to `true` and providing a certificate authority in the TLS section: + +```yaml + +http: + mtls: true + ... +tls: + certificateAuthority: ~/hdb/keys/ca.pem + ... +``` + +### Option: Nginx Reverse Proxy + +Instead of enabling HTTPS for Harper, Nginx can be used as a reverse proxy for Harper. + +Install Nginx, configure Nginx to use certificates issued from your own CA or a public CA, then configure Nginx to listen for HTTPS requests and forward to Harper as HTTP requests. + +[Certbot](https:/certbot.eff.org/) is a great tool for automatically requesting and renewing Let’s Encrypt certificates used by Nginx. + +### Option: External Reverse Proxy + +Instead of enabling HTTPS for Harper, a number of different external services can be used as a reverse proxy for Harper. These services typically have integrated certificate management. Configure the service to listen for HTTPS requests and forward (over a private network) to Harper as HTTP requests. + +Examples of these types of services include an AWS Application Load Balancer or a GCP external HTTP(S) load balancer. + +### Additional Considerations + +It is possible to use different certificates for the Operations API and the Custom Functions API. In scenarios where only your Custom Functions endpoints need to be exposed to the Internet and the Operations API is reserved for Harper administration, you may want to use a private CA to issue certificates for the Operations API and a public CA for the Custom Functions API certificates. diff --git a/site/versioned_docs/version-4.5/developers/security/configuration.md b/site/versioned_docs/version-4.5/developers/security/configuration.md new file mode 100644 index 00000000..f21eb9b2 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/security/configuration.md @@ -0,0 +1,39 @@ +--- +title: Configuration +--- + +# Configuration + +Harper was set up to require very minimal configuration to work out of the box. There are, however, some best practices we encourage for anyone building an app with Harper. + +## CORS + +Harper allows for managing [cross-origin HTTP requests](https:/developer.mozilla.org/en-US/docs/Web/HTTP/Access\_control\_CORS). By default, Harper enables CORS for all domains if you need to disable CORS completely or set up an access list of domains you can do the following: + +1. Open the harperdb-config.yaml file, which can be found in \, the location you specified during install. +1. In harperdb-config.yaml there should be 2 entries under `operationsApi.network`: cors and corsAccessList. + * `cors` + 1. To turn off, change to: `cors: false` + 1. To turn on, change to: `cors: true` + * `corsAccessList` + 1. The `corsAccessList` will only be recognized by the system when `cors` is `true` + 1. To create an access list you set `corsAccessList` to a comma-separated list of domains. + + i.e. `corsAccessList` is `http:/harpersystems.dev,http:/products.harpersystems.dev` + 1. To clear out the access list and allow all domains: `corsAccessList` is `[null]` + +## SSL + +HarperDprovides the option to use an HTTP or HTTPS and HTTP/2 interface. The default port for the server is 9925. + +These default ports can be changed by updating the `operationsApi.network.port` value in `/harperdb-config.yaml` + +By default, HTTPS is turned off and HTTP is turned on. It is recommended that you never directly expose Harper's HTTP interface through a publicly available port. HTTP is intended for local or private network use. + +You can toggle HTTPS and HTTP in the settings file. By setting `operationsApi.network.https` to true/false. When `https` is set to `false`, the server will use HTTP (version 1.1). Enabling HTTPS will enable both HTTPS/1.1 and HTTPS/2. + +Harper automatically generates a certificate (certificate.pem), a certificate authority (ca.pem) and a private key file (privateKey.pem) which live at `/keys/`. + +You can replace these with your own certificates and key. + +**Changes to these settings require a restart. Use operation `harperdb restart` from Harper Operations API.** diff --git a/site/versioned_docs/version-4.5/developers/security/index.md b/site/versioned_docs/version-4.5/developers/security/index.md new file mode 100644 index 00000000..55897945 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/security/index.md @@ -0,0 +1,13 @@ +--- +title: Security +--- + +# Security + +Harper uses role-based, attribute-level security to ensure that users can only gain access to the data they’re supposed to be able to access. Our granular permissions allow for unparalleled flexibility and control, and can actually lower the total cost of ownership compared to other database solutions, since you no longer have to replicate subsets of your data to isolate use cases. + +* [JWT Authentication](./jwt-auth) +* [Basic Authentication](./basic-auth) +* [mTLS Authentication](./mtls-auth) +* [Configuration](./configuration) +* [Users and Roles](./users-and-roles) diff --git a/site/versioned_docs/version-4.5/developers/security/jwt-auth.md b/site/versioned_docs/version-4.5/developers/security/jwt-auth.md new file mode 100644 index 00000000..a62d2841 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/security/jwt-auth.md @@ -0,0 +1,96 @@ +--- +title: JWT Authentication +--- + +# JWT Authentication + +Harper uses token based authentication with JSON Web Tokens, JWTs. + +This consists of two primary operations `create_authentication_tokens` and `refresh_operation_token`. These generate two types of tokens, as follows: + +* The `operation_token` which is used to authenticate all Harper operations in the Bearer Token Authorization Header. The default expiry is one day. +* The `refresh_token` which is used to generate a new `operation_token` upon expiry. This token is used in the Bearer Token Authorization Header for the `refresh_operation_token` operation only. The default expiry is thirty days. + +The `create_authentication_tokens` operation can be used at any time to refresh both tokens in the event that both have expired or been lost. + +## Create Authentication Tokens + +Users must initially create tokens using their Harper credentials. The following POST body is sent to Harper. No headers are required for this POST operation. + +```json +{ + "operation": "create_authentication_tokens", + "username": "username", + "password": "password" +} +``` + +A full cURL example can be seen here: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "operation": "create_authentication_tokens", + "username": "username", + "password": "password" +}' +``` + +An example expected return object is: + +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDUwNjQ2MDAsInN1YiI6Im9wZXJhdGlvbiJ9.MpQA-9CMjA-mn-7mHyUXSuSC_-kqMqJXp_NDiKLFtbtMRbodCuY3DzH401rvy_4vb0yCELf0B5EapLVY1545sv80nxSl6FoZFxQaDWYXycoia6zHpiveR8hKlmA6_XTWHJbY2FM1HAFrdtt3yUTiF-ylkdNbPG7u7fRjTmHfsZ78gd2MNWIDkHoqWuFxIyqk8XydQpsjULf2Uacirt9FmHfkMZ-Jr_rRpcIEW0FZyLInbm6uxLfseFt87wA0TbZ0ofImjAuaW_3mYs-3H48CxP152UJ0jByPb0kHsk1QKP7YHWx1-Wce9NgNADfG5rfgMHANL85zvkv8sJmIGZIoSpMuU3CIqD2rgYnMY-L5dQN1fgfROrPMuAtlYCRK7r-IpjvMDQtRmCiNG45nGsM4DTzsa5GyDrkGssd5OBhl9gr9z9Bb5HQVYhSKIOiy72dK5dQNBklD4eGLMmo-u322zBITmE0lKaBcwYGJw2mmkYcrjDOmsDseU6Bf_zVUd9WF3FqwNkhg4D7nrfNSC_flalkxPHckU5EC_79cqoUIX2ogufBW5XgYbU4WfLloKcIpb51YTZlZfwBHlHPSyaq_guaXFaeCUXKq39_i1n0HRF_mRaxNru0cNDFT9Fm3eD7V8axFijSVAMDyQs_JR7SY483YDKUfN4l-vw-EVynImr4", + "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDc1NzAyMDAsInN1YiI6InJlZnJlc2gifQ.acaCsk-CJWIMLGDZdGnsthyZsJfQ8ihXLyE8mTji8PgGkpbwhs7e1O0uitMgP_pGjHq2tey1BHSwoeCL49b18WyMIB10hK-q2BXGKQkykltjTrQbg7VsdFi0h57mGfO0IqAwYd55_hzHZNnyJMh4b0iPQFDwU7iTD7x9doHhZAvzElpkWbc_NKVw5_Mw3znjntSzbuPN105zlp4Niurin-_5BnukwvoJWLEJ-ZlF6hE4wKhaMB1pWTJjMvJQJE8khTTvlUN8tGxmzoaDYoe1aCGNxmDEQnx8Y5gKzVd89sylhqi54d2nQrJ2-ElfEDsMoXpR01Ps6fNDFtLTuPTp7ixj8LvgL2nCjAg996Ga3PtdvXJAZPDYCqqvaBkZZcsiqOgqLV0vGo3VVlfrcgJXQImMYRr_Inu0FCe47A93IAWuQTs-KplM1KdGJsHSnNBV6oe6QEkROJT5qZME-8xhvBYvOXqp9Znwg39bmiBCMxk26Ce66_vw06MNgoa3D5AlXPWemfdVKPZDnj_aLVjZSs0gAfFElcVn7l9yjWJOaT2Muk26U8bJl-2BEq_DSclqKHODuYM5kkPKIdE4NFrsqsDYuGxcA25rlNETFyl0q-UXj1aoz_joy5Hdnr4mFELmjnoo4jYQuakufP9xeGPsj1skaodKl0mmoGcCD6v1F60" +} +``` + +## Using JWT Authentication Tokens + +The `operation_token` value is used to authenticate all operations in place of our standard Basic auth. In order to pass the token you will need to create an Bearer Token Authorization Header like the following request: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDUwNjQ2MDAsInN1YiI6Im9wZXJhdGlvbiJ9.MpQA-9CMjA-mn-7mHyUXSuSC_-kqMqJXp_NDiKLFtbtMRbodCuY3DzH401rvy_4vb0yCELf0B5EapLVY1545sv80nxSl6FoZFxQaDWYXycoia6zHpiveR8hKlmA6_XTWHJbY2FM1HAFrdtt3yUTiF-ylkdNbPG7u7fRjTmHfsZ78gd2MNWIDkHoqWuFxIyqk8XydQpsjULf2Uacirt9FmHfkMZ-Jr_rRpcIEW0FZyLInbm6uxLfseFt87wA0TbZ0ofImjAuaW_3mYs-3H48CxP152UJ0jByPb0kHsk1QKP7YHWx1-Wce9NgNADfG5rfgMHANL85zvkv8sJmIGZIoSpMuU3CIqD2rgYnMY-L5dQN1fgfROrPMuAtlYCRK7r-IpjvMDQtRmCiNG45nGsM4DTzsa5GyDrkGssd5OBhl9gr9z9Bb5HQVYhSKIOiy72dK5dQNBklD4eGLMmo-u322zBITmE0lKaBcwYGJw2mmkYcrjDOmsDseU6Bf_zVUd9WF3FqwNkhg4D7nrfNSC_flalkxPHckU5EC_79cqoUIX2ogufBW5XgYbU4WfLloKcIpb51YTZlZfwBHlHPSyaq_guaXFaeCUXKq39_i1n0HRF_mRaxNru0cNDFT9Fm3eD7V8axFijSVAMDyQs_JR7SY483YDKUfN4l-vw-EVynImr4' \ +--data-raw '{ + "operation":"search_by_hash", + "schema":"dev", + "table":"dog", + "hash_values":[1], + "get_attributes": ["*"] +}' +``` + +## Token Expiration + +`operation_token` expires at a set interval. Once it expires it will no longer be accepted by Harper. This duration defaults to one day, and is configurable in [harperdb-config.yaml](../../deployments/configuration). To generate a new `operation_token`, the `refresh_operation_token` operation is used, passing the `refresh_token` in the Bearer Token Authorization Header. A full cURL example can be seen here: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDc1NzAyMDAsInN1YiI6InJlZnJlc2gifQ.acaCsk-CJWIMLGDZdGnsthyZsJfQ8ihXLyE8mTji8PgGkpbwhs7e1O0uitMgP_pGjHq2tey1BHSwoeCL49b18WyMIB10hK-q2BXGKQkykltjTrQbg7VsdFi0h57mGfO0IqAwYd55_hzHZNnyJMh4b0iPQFDwU7iTD7x9doHhZAvzElpkWbc_NKVw5_Mw3znjntSzbuPN105zlp4Niurin-_5BnukwvoJWLEJ-ZlF6hE4wKhaMB1pWTJjMvJQJE8khTTvlUN8tGxmzoaDYoe1aCGNxmDEQnx8Y5gKzVd89sylhqi54d2nQrJ2-ElfEDsMoXpR01Ps6fNDFtLTuPTp7ixj8LvgL2nCjAg996Ga3PtdvXJAZPDYCqqvaBkZZcsiqOgqLV0vGo3VVlfrcgJXQImMYRr_Inu0FCe47A93IAWuQTs-KplM1KdGJsHSnNBV6oe6QEkROJT5qZME-8xhvBYvOXqp9Znwg39bmiBCMxk26Ce66_vw06MNgoa3D5AlXPWemfdVKPZDnj_aLVjZSs0gAfFElcVn7l9yjWJOaT2Muk26U8bJl-2BEq_DSclqKHODuYM5kkPKIdE4NFrsqsDYuGxcA25rlNETFyl0q-UXj1aoz_joy5Hdnr4mFELmjnoo4jYQuakufP9xeGPsj1skaodKl0mmoGcCD6v1F60' \ +--data-raw '{ + "operation":"refresh_operation_token" +}' +``` + +This will return a new `operation_token`. An example expected return object is: + +```bash +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6eyJfX2NyZWF0ZWR0aW1lX18iOjE2MDQ5NzgxODkxNTEsIl9fdXBkYXRlZHRpbWVfXyI6MTYwNDk3ODE4OTE1MSwiYWN0aXZlIjp0cnVlLCJyb2xlIjp7Il9fY3JlYXRlZHRpbWVfXyI6MTYwNDk0NDE1MTM0NywiX191cGRhdGVkdGltZV9fIjoxNjA0OTQ0MTUxMzQ3LCJpZCI6IjdiNDNlNzM1LTkzYzctNDQzYi05NGY3LWQwMzY3Njg5NDc4YSIsInBlcm1pc3Npb24iOnsic3VwZXJfdXNlciI6dHJ1ZSwic3lzdGVtIjp7InRhYmxlcyI6eyJoZGJfdGFibGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9hdHRyaWJ1dGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9zY2hlbWEiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl91c2VyIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfcm9sZSI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2pvYiI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2xpY2Vuc2UiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9pbmZvIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfbm9kZXMiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl90ZW1wIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119fX19LCJyb2xlIjoic3VwZXJfdXNlciJ9LCJ1c2VybmFtZSI6InVzZXJuYW1lIn0sImlhdCI6MTYwNDk3ODcxMywiZXhwIjoxNjA1MDY1MTEzLCJzdWIiOiJvcGVyYXRpb24ifQ.qB4FS7fzryCO5epQlFCQe4mQcUEhzXjfsXRFPgauXrGZwSeSr2o2a1tE1xjiI3qjK0r3f2bdi2xpFlDR1thdY-m0mOpHTICNOae4KdKzp7cyzRaOFurQnVYmkWjuV_Ww4PJgr6P3XDgXs5_B2d7ZVBR-BaAimYhVRIIShfpWk-4iN1XDk96TwloCkYx01BuN87o-VOvAnOG-K_EISA9RuEBpSkfUEuvHx8IU4VgfywdbhNMh6WXM0VP7ZzSpshgsS07MGjysGtZHNTVExEvFh14lyfjfqKjDoIJbo2msQwD2FvrTTb0iaQry1-Wwz9QJjVAUtid7tJuP8aBeNqvKyMIXRVnl5viFUr-Gs-Zl_WtyVvKlYWw0_rUn3ucmurK8tTy6iHyJ6XdUf4pYQebpEkIvi2rd__e_Z60V84MPvIYs6F_8CAy78aaYmUg5pihUEehIvGRj1RUZgdfaXElw90-m-M5hMOTI04LrzzVnBu7DcMYg4UC1W-WDrrj4zUq7y8_LczDA-yBC2-bkvWwLVtHLgV5yIEuIx2zAN74RQ4eCy1ffWDrVxYJBau4yiIyCc68dsatwHHH6bMK0uI9ib6Y9lsxCYjh-7MFcbP-4UBhgoDDXN9xoUToDLRqR9FTHqAHrGHp7BCdF5d6TQTVL5fmmg61MrLucOo-LZBXs1NY" +} +``` + +The `refresh_token` also expires at a set interval, but a longer interval. Once it expires it will no longer be accepted by Harper. This duration defaults to thirty days, and is configurable in [harperdb-config.yaml](../../deployments/configuration). To generate a new `operation_token` and a new `refresh_token` the `create_authentication_tokensoperation` is called. + +## Configuration + +Token timeouts are configurable in [harperdb-config.yaml](../../deployments/configuration) with the following parameters: + +* `operationsApi.authentication.operationTokenTimeout`: Defines the length of time until the operation\_token expires (default 1d). +* `operationsApi.authentication.refreshTokenTimeout`: Defines the length of time until the refresh\_token expires (default 30d). + +A full list of valid values for both parameters can be found [here](https:/github.com/vercel/ms). diff --git a/site/versioned_docs/version-4.5/developers/security/mtls-auth.md b/site/versioned_docs/version-4.5/developers/security/mtls-auth.md new file mode 100644 index 00000000..0d4538aa --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/security/mtls-auth.md @@ -0,0 +1,7 @@ +--- +title: mTLS Authentication +--- + +# mTLS Authentication + +Harper supports mTLS authentication for incoming connections. When enabled in the [HTTP config settings](../../deployments/configuration#http) the client certificate will be checked against the certificate authority specified with `tls.certificateAuthority`. If the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. The [HTTP config settings](../../deployments/configuration#http) allow you to determine if mTLS is required for all connections or optional. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/developers/security/users-and-roles.md b/site/versioned_docs/version-4.5/developers/security/users-and-roles.md new file mode 100644 index 00000000..b1b5ffc3 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/security/users-and-roles.md @@ -0,0 +1,267 @@ +--- +title: Users & Roles +--- + +# Users & Roles + +Harper utilizes a Role-Based Access Control (RBAC) framework to manage access to Harper instances. A user is assigned a role that determines the user’s permissions to access database resources and run core operations. + +## Roles in Harper + +Role permissions in Harper are broken into two categories – permissions around database manipulation and permissions around database definition. + +**Database Manipulation**: A role defines CRUD (create, read, update, delete) permissions against database resources (i.e. data) in a Harper instance. + +1. At the table-level access, permissions must be explicitly defined when adding or altering a role – _i.e. Harper will assume CRUD access to be FALSE if not explicitly provided in the permissions JSON passed to the `add_role` and/or `alter_role` API operations._ +1. At the attribute-level, permissions for attributes in all tables included in the permissions set will be assigned based on either the specific attribute-level permissions defined in the table’s permission set or, if there are no attribute-level permissions defined, permissions will be based on the table’s CRUD set. + +**Database Definition**: Permissions related to managing databases, tables, roles, users, and other system settings and operations are restricted to the built-in `super_user` role. + +**Built-In Roles** + +There are three built-in roles within Harper. See full breakdown of operations restricted to only super\_user roles [here](./users-and-roles#role-based-operation-restrictions). + +* `super_user` - This role provides full access to all operations and methods within a Harper instance, this can be considered the admin role. + * This role provides full access to all Database Definition operations and the ability to run Database Manipulation operations across the entire database schema with no restrictions. +* `cluster_user` - This role is an internal system role type that is managed internally to allow clustered instances to communicate with one another. + * This role is an internally managed role to facilitate communication between clustered instances. +* `structure_user` - This role provides specific access for creation and deletion of data. + * When defining this role type you can either assign a value of true which will allow the role to create and drop databases & tables. Alternatively the role type can be assigned a string array. The values in this array are databases and allows the role to only create and drop tables in the designated databases. + +**User-Defined Roles** + +In addition to built-in roles, admins (i.e. users assigned to the super\_user role) can create customized roles for other users to interact with and manipulate the data within explicitly defined tables and attributes. + +* Unless the user-defined role is given `super_user` permissions, permissions must be defined explicitly within the request body JSON. +* Describe operations will return metadata for all databases, tables, and attributes that a user-defined role has CRUD permissions for. + +**Role Permissions** + +When creating a new, user-defined role in a Harper instance, you must provide a role name and the permissions to assign to that role. _Reminder, only super users can create and manage roles._ + +* `role` name used to easily identify the role assigned to individual users. + + _Roles can be altered/dropped based on the role name used in and returned from a successful `add_role` , `alter_role`, or `list_roles` operation._ +* `permissions` used to explicitly define CRUD access to existing table data. + +Example JSON for `add_role` request + +```json +{ + "operation":"add_role", + "role":"software_developer", + "permission":{ + "super_user":false, + "database_name":{ + "tables": { + "table_name1": { + "read":true, + "insert":true, + "update":true, + "delete":false, + "attribute_permissions":[ + { + "attribute_name":"attribute1", + "read":true, + "insert":true, + "update":true + } + ] + }, + "table_name2": { + "read":true, + "insert":true, + "update":true, + "delete":false, + "attribute_permissions":[] + } + } + } + } +} +``` + +**Setting Role Permissions** + +There are two parts to a permissions set: + +* `super_user` – boolean value indicating if role should be provided super\_user access. + + _If `super_user` is set to true, there should be no additional database-specific permissions values included since the role will have access to the entire database schema. If permissions are included in the body of the operation, they will be stored within Harper, but ignored, as super\_users have full access to the database._ +* `permissions`: Database tables that a role should have specific CRUD access to should be included in the final, database-specific `permissions` JSON. + + _For user-defined roles (i.e. non-super\_user roles, blank permissions will result in the user being restricted from accessing any of the database schema._ + +**Table Permissions JSON** + +Each table that a role should be given some level of CRUD permissions to must be included in the `tables` array for its database in the roles permissions JSON passed to the API (_see example above_). + +```json +{ + "table_name": { / the name of the table to define CRUD perms for + "read": boolean, / access to read from this table + "insert": boolean, / access to insert data to table + "update": boolean, / access to update data in table + "delete": boolean, / access to delete row data in table + "attribute_permissions": [ / permissions for specific table attributes + { + "attribute_name": "attribute_name", / attribute to assign permissions to + "read": boolean, / access to read this attribute from table + "insert": boolean, / access to insert this attribute into the table + "update": boolean / access to update this attribute in the table + } + ] +} +``` + +**Important Notes About Table Permissions** + +1. If a database and/or any of its tables are not included in the permissions JSON, the role will not have any CRUD access to the database and/or tables. +1. If a table-level CRUD permission is set to false, any attribute-level with that same CRUD permission set to true will return an error. + +**Important Notes About Attribute Permissions** + +1. If there are attribute-specific CRUD permissions that need to be enforced on a table, those need to be explicitly described in the `attribute_permissions` array. +1. If a non-hash attribute is given some level of CRUD access, that same access will be assigned to the table’s `hash_attribute` (also referred to as the `primary_key`), even if it is not explicitly defined in the permissions JSON. + + _See table\_name1’s permission set for an example of this – even though the table’s hash attribute is not specifically defined in the attribute\_permissions array, because the role has CRUD access to ‘attribute1’, the role will have the same access to the table’s hash attribute._ +1. If attribute-level permissions are set – _i.e. attribute\_permissions.length > 0_ – any table attribute not explicitly included will be assumed to have not CRUD access (with the exception of the `hash_attribute` described in #2). + + _See table\_name1’s permission set for an example of this – in this scenario, the role will have the ability to create, insert and update ‘attribute1’ and the table’s hash attribute but no other attributes on that table._ +1. If an `attribute_permissions` array is empty, the role’s access to a table’s attributes will be based on the table-level CRUD permissions. + + _See table\_name2’s permission set for an example of this._ +1. The `__createdtime__` and `__updatedtime__` attributes that Harper manages internally can have read perms set but, if set, all other attribute-level permissions will be ignored. +1. Please note that DELETE permissions are not included as a part of an individual attribute-level permission set. That is because it is not possible to delete individual attributes from a row, rows must be deleted in full. + * If a role needs the ability to delete rows from a table, that permission should be set on the table-level. + * The practical approach to deleting an individual attribute of a row would be to set that attribute to null via an update statement. + +## `Role-Based Operation Restrictions ` + +The table below includes all API operations available in Harper and indicates whether or not the operation is restricted to super\_user roles. + +_Keep in mind that non-super\_user roles will also be restricted within the operations they do have access to by the database-level CRUD permissions set for the roles._ + +| Databases and Tables | Restricted to Super\_Users | +|----------------------| :------------------------: | +| describe\_all | | +| describe\_database | | +| describe\_table | | +| create\_database | X | +| drop\_database | X | +| create\_table | X | +| drop\_table | X | +| create\_attribute | | +| drop\_attribute | X | + +| NoSQL Operations | Restricted to Super\_Users | +| ---------------------- | :------------------------: | +| insert | | +| update | | +| upsert | | +| delete | | +| search\_by\_hash | | +| search\_by\_value | | +| search\_by\_conditions | | + +| SQL Operations | Restricted to Super\_Users | +| -------------- | :------------------------: | +| select | | +| insert | | +| update | | +| delete | | + +| Bulk Operations | Restricted to Super\_Users | +| ---------------- | :------------------------: | +| csv\_data\_load | | +| csv\_file\_load | | +| csv\_url\_load | | +| import\_from\_s3 | | + +| Users and Roles | Restricted to Super\_Users | +| --------------- | :------------------------: | +| list\_roles | X | +| add\_role | X | +| alter\_role | X | +| drop\_role | X | +| list\_users | X | +| user\_info | | +| add\_user | X | +| alter\_user | X | +| drop\_user | X | + +| Clustering | Restricted to Super\_Users | +| ----------------------- | :------------------------: | +| cluster\_set\_routes | X | +| cluster\_get\_routes | X | +| cluster\_delete\_routes | X | +| add\_node | X | +| update\_node | X | +| cluster\_status | X | +| remove\_node | X | +| configure\_cluster | X | + +| Components | Restricted to Super\_Users | +| -------------------- | :------------------------: | +| get\_components | X | +| get\_component\_file | X | +| set\_component\_file | X | +| drop\_component | X | +| add\_component | X | +| package\_component | X | +| deploy\_component | X | + +| Custom Functions | Restricted to Super\_Users | +| ---------------------------------- | :------------------------: | +| custom\_functions\_status | X | +| get\_custom\_functions | X | +| get\_custom\_function | X | +| set\_custom\_function | X | +| drop\_custom\_function | X | +| add\_custom\_function\_project | X | +| drop\_custom\_function\_project | X | +| package\_custom\_function\_project | X | +| deploy\_custom\_function\_project | X | + +| Registration | Restricted to Super\_Users | +| ------------------ | :------------------------: | +| registration\_info | | +| get\_fingerprint | X | +| set\_license | X | + +| Jobs | Restricted to Super\_Users | +| ----------------------------- | :------------------------: | +| get\_job | | +| search\_jobs\_by\_start\_date | X | + +| Logs | Restricted to Super\_Users | +| --------------------------------- | :------------------------: | +| read\_log | X | +| read\_transaction\_log | X | +| delete\_transaction\_logs\_before | X | +| read\_audit\_log | X | +| delete\_audit\_logs\_before | X | + +| Utilities | Restricted to Super\_Users | +| ----------------------- | :------------------------: | +| delete\_records\_before | X | +| export\_local | X | +| export\_to\_s3 | X | +| system\_information | X | +| restart | X | +| restart\_service | X | +| get\_configuration | X | +| configure\_cluster | X | + +| Token Authentication | Restricted to Super\_Users | +| ------------------------------ | :------------------------: | +| create\_authentication\_tokens | | +| refresh\_operation\_token | | + +## Error: Must execute as User + +**You may have gotten an error like,** `Error: Must execute as <>`. + +This means that you installed Harper as `<>`. Because Harper stores files natively on the operating system, we only allow the Harper executable to be run by a single user. This prevents permissions issues on files. + +For example if you installed as user\_a, but later wanted to run as user\_b. User\_b may not have access to the hdb files Harper needs. This also keeps Harper more secure as it allows you to lock files down to a specific user and prevents other users from accessing your files. diff --git a/site/versioned_docs/version-4.5/developers/sql-guide/date-functions.md b/site/versioned_docs/version-4.5/developers/sql-guide/date-functions.md new file mode 100644 index 00000000..4ce2c203 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/sql-guide/date-functions.md @@ -0,0 +1,226 @@ +--- +title: SQL Date Functions +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL Date Functions + +Harper utilizes [Coordinated Universal Time (UTC)](https:/en.wikipedia.org/wiki/Coordinated_Universal_Time) in all internal SQL operations. This means that date values passed into any of the functions below will be assumed to be in UTC or in a format that can be translated to UTC. + +When parsing date values passed to SQL date functions in HDB, we first check for [ISO 8601](https:/en.wikipedia.org/wiki/ISO_8601) formats, then for [RFC 2822](https:/tools.ietf.org/html/rfc2822#section-3.3) date-time format and then fall back to new Date(date_string)if a known format is not found. + +### CURRENT_DATE() + +Returns the current date in UTC in `YYYY-MM-DD` String format. + +``` +"SELECT CURRENT_DATE() AS current_date_result" returns + { + "current_date_result": "2020-04-22" + } +``` + +### CURRENT_TIME() + +Returns the current time in UTC in `HH:mm:ss.SSS` String format. + +``` +"SELECT CURRENT_TIME() AS current_time_result" returns + { + "current_time_result": "15:18:14.639" + } +``` + +### CURRENT_TIMESTAMP + +Referencing this variable will evaluate as the current Unix Timestamp in milliseconds. + +``` +"SELECT CURRENT_TIMESTAMP AS current_timestamp_result" returns + { + "current_timestamp_result": 1587568845765 + } +``` +### DATE([date_string]) + +Formats and returns the date_string argument in UTC in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. + +If a date_string is not provided, the function will return the current UTC date/time value in the return format defined above. + +``` +"SELECT DATE(1587568845765) AS date_result" returns + { + "date_result": "2020-04-22T15:20:45.765+0000" + } +``` + +``` +"SELECT DATE(CURRENT_TIMESTAMP) AS date_result2" returns + { + "date_result2": "2020-04-22T15:20:45.765+0000" + } +``` + +### DATE_ADD(date, value, interval) + +Adds the defined amount of time to the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted interval values: Either string value (key or shorthand) can be passed as the interval argument. + + +| Key | Shorthand | +|--------------|-----------| +| years | y | +| quarters | Q | +| months | M | +| weeks | w | +| days | d | +| hours | h | +| minutes | m | +| seconds | s | +| milliseconds | ms | + + +``` +"SELECT DATE_ADD(1587568845765, 1, 'days') AS date_add_result" AND +"SELECT DATE_ADD(1587568845765, 1, 'd') AS date_add_result" both return + { + "date_add_result": 1587655245765 + } +``` + +``` +"SELECT DATE_ADD(CURRENT_TIMESTAMP, 2, 'years') +AS date_add_result2" returns + { + "date_add_result2": 1650643129017 + } +``` + +### DATE_DIFF(date_1, date_2[, interval]) + +Returns the difference between the two date values passed based on the interval as a Number. If an interval is not provided, the function will return the difference value in milliseconds. + +Accepted interval values: +* years +* months +* weeks +* days +* hours +* minutes +* seconds + +``` +"SELECT DATE_DIFF(CURRENT_TIMESTAMP, 1650643129017, 'hours') +AS date_diff_result" returns + { + "date_diff_result": -17519.753333333334 + } +``` + +### DATE_FORMAT(date, format) + +Formats and returns a date value in the String format provided. Find more details on accepted format values in the [moment.js docs](https:/momentjs.com/docs/#/displaying/format/). + +``` +"SELECT DATE_FORMAT(1524412627973, 'YYYY-MM-DD HH:mm:ss') +AS date_format_result" returns + { + "date_format_result": "2018-04-22 15:57:07" + } +``` + +### DATE_SUB(date, value, interval) + +Subtracts the defined amount of time from the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted date_sub interval values- Either string value (key or shorthand) can be passed as the interval argument. + +| Key | Shorthand | +|--------------|-----------| +| years | y | +| quarters | Q | +| months | M | +| weeks | w | +| days | d | +| hours | h | +| minutes | m | +| seconds | s | +| milliseconds | ms | + + +``` +"SELECT DATE_SUB(1587568845765, 2, 'years') AS date_sub_result" returns + { + "date_sub_result": 1524410445765 + } +``` + +### EXTRACT(date, date_part) + +Extracts and returns the date_part requested as a String value. Accepted date_part values below show value returned for date = “2020-03-26T15:13:02.041+000” + +| date_part | Example return value* | +|--------------|------------------------| +| year | “2020” | +| month | “3” | +| day | “26” | + | hour | “15” | +| minute | “13” | +| second | “2” | +| millisecond | “41” | + +``` +"SELECT EXTRACT(1587568845765, 'year') AS extract_result" returns + { + "extract_result": "2020" + } +``` + +### GETDATE() + +Returns the current Unix Timestamp in milliseconds. + +``` +"SELECT GETDATE() AS getdate_result" returns + { + "getdate_result": 1587568845765 + } +``` + +### GET_SERVER_TIME() +Returns the current date/time value based on the server’s timezone in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. + +``` +"SELECT GET_SERVER_TIME() AS get_server_time_result" returns + { + "get_server_time_result": "2020-04-22T15:20:45.765+0000" + } +``` + +### OFFSET_UTC(date, offset) +Returns the UTC date time value with the offset provided included in the return String value formatted as `YYYY-MM-DDTHH:mm:ss.SSSZZ`. The offset argument will be added as minutes unless the value is less than 16 and greater than -16, in which case it will be treated as hours. + +``` +"SELECT OFFSET_UTC(1587568845765, 240) AS offset_utc_result" returns + { + "offset_utc_result": "2020-04-22T19:20:45.765+0400" + } +``` + +``` +"SELECT OFFSET_UTC(1587568845765, 10) AS offset_utc_result2" returns + { + "offset_utc_result2": "2020-04-23T01:20:45.765+1000" + } +``` + +### NOW() +Returns the current Unix Timestamp in milliseconds. + +``` +"SELECT NOW() AS now_result" returns + { + "now_result": 1587568845765 + } +``` + diff --git a/site/versioned_docs/version-4.5/developers/sql-guide/features-matrix.md b/site/versioned_docs/version-4.5/developers/sql-guide/features-matrix.md new file mode 100644 index 00000000..f4225cf9 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/sql-guide/features-matrix.md @@ -0,0 +1,88 @@ +--- +title: SQL Features Matrix +--- + +# SQL Features Matrix + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +## SQL Features Matrix + +Harper provides access to most SQL functions, and we’re always expanding that list. Check below to see if we cover what you need. + +| INSERT | | +| ---------------------------------- | - | +| Values - multiple values supported | ✔ | +| Sub-SELECT | ✗ | + +| UPDATE | | +| ---------------- | - | +| SET | ✔ | +| Sub-SELECT | ✗ | +| Conditions | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | + +| DELETE | | +| ---------- | - | +| FROM | ✔ | +| Sub-SELECT | ✗ | +| Conditions | ✔ | + +| SELECT | | +| -------------------- | - | +| Column SELECT | ✔ | +| Aliases | ✔ | +| Aggregator Functions | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | +| Constant Values | ✔ | +| Distinct | ✔ | +| Sub-SELECT | ✗ | + +| FROM | | +| ---------------- | - | +| Multi-table JOIN | ✔ | +| INNER JOIN | ✔ | +| LEFT OUTER JOIN | ✔ | +| LEFT INNER JOIN | ✔ | +| RIGHT OUTER JOIN | ✔ | +| RIGHT INNER JOIN | ✔ | +| FULL JOIN | ✔ | +| UNION | ✗ | +| Sub-SELECT | ✗ | +| TOP | ✔ | + +| WHERE | | +| -------------------------- | - | +| Multi-Conditions | ✔ | +| Wildcards | ✔ | +| IN | ✔ | +| LIKE | ✔ | +| Bit-wise Operators AND, OR | ✔ | +| Bit-wise Operators NOT | ✔ | +| NULL | ✔ | +| BETWEEN | ✔ | +| EXISTS,ANY,ALL | ✔ | +| Compare columns | ✔ | +| Compare constants | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | +| Sub-SELECT | ✗ | + +| GROUP BY | | +| --------------------- | - | +| Multi-Column GROUP BY | ✔ | + +| HAVING | | +| ----------------------------- | - | +| Aggregate function conditions | ✔ | + +| ORDER BY | | +| --------------------- | - | +| Multi-Column ORDER BY | ✔ | +| Aliases | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | diff --git a/site/versioned_docs/version-4.5/developers/sql-guide/functions.md b/site/versioned_docs/version-4.5/developers/sql-guide/functions.md new file mode 100644 index 00000000..eeebd8b4 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/sql-guide/functions.md @@ -0,0 +1,157 @@ +--- +title: Harper SQL Functions +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# Harper SQL Functions + +This SQL keywords reference contains the SQL functions available in Harper. + +## Functions +### Aggregate + +| Keyword | Syntax | Description | +|-----------------|---------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------| +| AVG | AVG(_expression_) | Returns the average of a given numeric expression. | +| COUNT | SELECT COUNT(_column_name_) FROM _database.table_ WHERE _condition_ | Returns the number records that match the given criteria. Nulls are not counted. | +| GROUP_CONCAT | GROUP_CONCAT(_expression_) | Returns a string with concatenated values that are comma separated and that are non-null from a group. Will return null when there are non-null values. | +| MAX | SELECT MAX(_column_name_) FROM _database.table_ WHERE _condition_ | Returns largest value in a specified column. | +| MIN | SELECT MIN(_column_name_) FROM _database.table_ WHERE _condition_ | Returns smallest value in a specified column. | +| SUM | SUM(_column_name_) | Returns the sum of the numeric values provided. | +| ARRAY* | ARRAY(_expression_) | Returns a list of data as a field. | +| DISTINCT_ARRAY* | DISTINCT_ARRAY(_expression_) | When placed around a standard ARRAY() function, returns a distinct (deduplicated) results set. | + +*For more information on ARRAY() and DISTINCT_ARRAY() see [this blog](https:/www.harperdb.io/post/sql-queries-to-complex-objects). + +### Conversion + +| Keyword | Syntax | Description | +|---------|--------------------------------------------------|------------------------------------------------------------------------| +| CAST | CAST(_expression AS datatype(length)_) | Converts a value to a specified datatype. | +| CONVERT | CONVERT(_data_type(length), expression, style_) | Converts a value from one datatype to a different, specified datatype. | + + +### Date & Time + +| Keyword | Syntax | Description | +|-------------------|-----------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CURRENT_DATE | CURRENT_DATE() | Returns the current date in UTC in “YYYY-MM-DD” String format. | +| CURRENT_TIME | CURRENT_TIME() | Returns the current time in UTC in “HH:mm:ss.SSS” string format. | +| CURRENT_TIMESTAMP | CURRENT_TIMESTAMP | Referencing this variable will evaluate as the current Unix Timestamp in milliseconds. For more information, go here. | +| +| DATE | DATE([_date_string_]) | Formats and returns the date_string argument in UTC in ‘YYYY-MM-DDTHH:mm:ss.SSSZZ’ string format. If a date_string is not provided, the function will return the current UTC date/time value in the return format defined above. For more information, go here. | +| +| DATE_ADD | DATE_ADD(_date, value, interval_) | Adds the defined amount of time to the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted interval values: Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | +| +| DATE_DIFF | DATEDIFF(_date_1, date_2[, interval]_) | Returns the difference between the two date values passed based on the interval as a Number. If an interval is not provided, the function will return the difference value in milliseconds. For more information, go here. | +| +| DATE_FORMAT | DATE_FORMAT(_date, format_) | Formats and returns a date value in the String format provided. Find more details on accepted format values in the moment.js docs. For more information, go here. | +| +| DATE_SUB | DATE_SUB(_date, format_) | Subtracts the defined amount of time from the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted date_sub interval values- Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | +| +| DAY | DAY(_date_) | Return the day of the month for the given date. | +| +| DAYOFWEEK | DAYOFWEEK(_date_) | Returns the numeric value of the weekday of the date given(“YYYY-MM-DD”).NOTE: 0=Sunday, 1=Monday, 2=Tuesday, 3=Wednesday, 4=Thursday, 5=Friday, and 6=Saturday. | +| EXTRACT | EXTRACT(_date, date_part_) | Extracts and returns the date_part requested as a String value. Accepted date_part values below show value returned for date = “2020-03-26T15:13:02.041+000” For more information, go here. | +| +| GETDATE | GETDATE() | Returns the current Unix Timestamp in milliseconds. | +| GET_SERVER_TIME | GET_SERVER_TIME() | Returns the current date/time value based on the server’s timezone in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. | +| OFFSET_UTC | OFFSET_UTC(_date, offset_) | Returns the UTC date time value with the offset provided included in the return String value formatted as `YYYY-MM-DDTHH:mm:ss.SSSZZ`. The offset argument will be added as minutes unless the value is less than 16 and greater than -16, in which case it will be treated as hours. | +| NOW | NOW() | Returns the current Unix Timestamp in milliseconds. | +| +| HOUR | HOUR(_datetime_) | Returns the hour part of a given date in range of 0 to 838. | +| +| MINUTE | MINUTE(_datetime_) | Returns the minute part of a time/datetime in range of 0 to 59. | +| +| MONTH | MONTH(_date_) | Returns month part for a specified date in range of 1 to 12. | +| +| SECOND | SECOND(_datetime_) | Returns the seconds part of a time/datetime in range of 0 to 59. | +| YEAR | YEAR(_date_) | Returns the year part for a specified date. | +| + +### Logical + +| Keyword | Syntax | Description | +|---------|--------------------------------------------------|--------------------------------------------------------------------------------------------| +| IF | IF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | +| IIF | IIF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | +| IFNULL | IFNULL(_expression, alt_value_) | Returns a specified value if the expression is null. | +| NULLIF | NULLIF(_expression_1, expression_2_) | Returns null if expression_1 is equal to expression_2, if not equal, returns expression_1. | + +### Mathematical + +| Keyword | Syntax | Description | +|---------|---------------------------------|-----------------------------------------------------------------------------------------------------| +| ABS | ABS(_expression_) | Returns the absolute value of a given numeric expression. | +| CEIL | CEIL(_number_) | Returns integer ceiling, the smallest integer value that is bigger than or equal to a given number. | +| EXP | EXP(_number_) | Returns e to the power of a specified number. | +| FLOOR | FLOOR(_number_) | Returns the largest integer value that is smaller than, or equal to, a given number. | +| RANDOM | RANDOM(_seed_) | Returns a pseudo random number. | +| ROUND | ROUND(_number,decimal_places_) | Rounds a given number to a specified number of decimal places. | +| SQRT | SQRT(_expression_) | Returns the square root of an expression. | + + +### String + +| Keyword | Syntax | Description | +|-------------|------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CONCAT | CONCAT(_string_1, string_2, ...., string_n_) | Concatenates, or joins, two or more strings together, resulting in a single string. | +| CONCAT_WS | CONCAT_WS(_separator, string_1, string_2, ...., string_n_) | Concatenates, or joins, two or more strings together with a separator, resulting in a single string. | +| INSTR | INSTR(_string_1, string_2_) | Returns the first position, as an integer, of string_2 within string_1. | +| LEN | LEN(_string_) | Returns the length of a string. | +| LOWER | LOWER(_string_) | Converts a string to lower-case. | +| REGEXP | SELECT _column_name_ FROM _database.table_ WHERE _column_name_ REGEXP _pattern_ | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | +| REGEXP_LIKE | SELECT _column_name_ FROM _database.table_ WHERE REGEXP_LIKE(_column_name, pattern_) | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | +| REPLACE | REPLACE(_string, old_string, new_string_) | Replaces all instances of old_string within new_string, with string. | +| SUBSTRING | SUBSTRING(_string, string_position, length_of_substring_) | Extracts a specified amount of characters from a string. | +| TRIM | TRIM([_character(s) FROM_] _string_) | Removes leading and trailing spaces, or specified character(s), from a string. | +| UPPER | UPPER(_string_) | Converts a string to upper-case. | + +## Operators +### Logical Operators + +| Keyword | Syntax | Description | +|----------|--------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------| +| BETWEEN | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ BETWEEN _value_1_ AND _value_2_ | (inclusive) Returns values(numbers, text, or dates) within a given range. | +| IN | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IN(_value(s)_) | Used to specify multiple values in a WHERE clause. | +| LIKE | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_n_ LIKE _pattern_ | Searches for a specified pattern within a WHERE clause. | + +## Queries +### General + +| Keyword | Syntax | Description | +|-----------|--------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------| +| DISTINCT | SELECT DISTINCT _column_name(s)_ FROM _database.table_ | Returns only unique values, eliminating duplicate records. | +| FROM | FROM _database.table_ | Used to list the database(s), table(s), and any joins required for a SQL statement. | +| GROUP BY | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ GROUP BY _column_name(s)_ ORDER BY _column_name(s)_ | Groups rows that have the same values into summary rows. | +| HAVING | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ GROUP BY _column_name(s)_ HAVING _condition_ ORDER BY _column_name(s)_ | Filters data based on a group or aggregate function. | +| SELECT | SELECT _column_name(s)_ FROM _database.table_ | Selects data from table. | +| WHERE | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ | Extracts records based on a defined condition. | + +### Joins + +| Keyword | Syntax | Description | +|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| CROSS JOIN | SELECT _column_name(s)_ FROM _database.table_1_ CROSS JOIN _database.table_2_ | Returns a paired combination of each row from _table_1_ with row from _table_2_. _Note: CROSS JOIN can return very large result sets and is generally considered bad practice._ | +| FULL OUTER | SELECT _column_name(s)_ FROM _database.table_1_ FULL OUTER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ WHERE _condition_ | Returns all records when there is a match in either _table_1_ (left table) or _table_2_ (right table). | +| [INNER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ INNER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return only matching records from _table_1_ (left table) and _table_2_ (right table). The INNER keyword is optional and does not affect the result. | +| LEFT [OUTER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ LEFT OUTER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return all records from _table_1_ (left table) and matching data from _table_2_ (right table). The OUTER keyword is optional and does not affect the result. | +| RIGHT [OUTER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ RIGHT OUTER JOIN _database.table_2_ ON _table_1.column_name = table_2.column_name_ | Return all records from _table_2_ (right table) and matching data from _table_1_ (left table). The OUTER keyword is optional and does not affect the result. | + +### Predicates + +| Keyword | Syntax | Description | +|--------------|------------------------------------------------------------------------------|----------------------------| +| IS NOT NULL | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IS NOT NULL | Tests for non-null values. | +| IS NULL | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IS NULL | Tests for null values. | + +### Statements + +| Keyword | Syntax | Description | +|---------|---------------------------------------------------------------------------------------------|-------------------------------------| +| DELETE | DELETE FROM _database.table_ WHERE condition | Deletes existing data from a table. | +| INSERT | INSERT INTO _database.table(column_name(s))_ VALUES(_value(s)_) | Inserts new records into a table. | +| UPDATE | UPDATE _database.table_ SET _column_1 = value_1, column_2 = value_2, ....,_ WHERE _condition_ | Alters existing records in a table. | diff --git a/site/versioned_docs/version-4.5/developers/sql-guide/index.md b/site/versioned_docs/version-4.5/developers/sql-guide/index.md new file mode 100644 index 00000000..941be5d0 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/sql-guide/index.md @@ -0,0 +1,88 @@ +--- +title: SQL Guide +--- + +# SQL Guide + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +## Harper SQL Guide + +The purpose of this guide is to describe the available functionality of Harper as it relates to supported SQL functionality. The SQL parser is still actively being developed, many SQL features may not be optimized or utilize indexes. This document will be updated as more features and functionality becomes available. Generally, the REST interface provides a more stable, secure, and performant interface for data interaction, but the SQL functionality can be useful for administrative ad-hoc querying, and utilizing existing SQL statements. **A high-level view of supported features can be found** [**here**](./features-matrix)**.** + +Harper adheres to the concept of database & tables. This allows developers to isolate table structures from each other all within one database. + +## Select + +Harper has robust SELECT support, from simple queries all the way to complex joins with multi-conditions, aggregates, grouping & ordering. + +All results are returned as JSON object arrays. + +Query for all records and attributes in the dev.dog table: + +``` +SELECT * FROM dev.dog +``` + +Query specific columns from all rows in the dev.dog table: + +``` +SELECT id, dog_name, age FROM dev.dog +``` + +Query for all records and attributes in the dev.dog table ORDERED BY age in ASC order: + +``` +SELECT * FROM dev.dog ORDER BY age +``` + +_The ORDER BY keyword sorts in ascending order by default. To sort in descending order, use the DESC keyword._ + +## Insert + +Harper supports inserting 1 to n records into a table. The primary key must be unique (not used by any other record). If no primary key is provided, it will be assigned an auto-generated UUID. Harper does not support selecting from one table to insert into another at this time. + +``` +INSERT INTO dev.dog (id, dog_name, age, breed_id) + VALUES(1, 'Penny', 5, 347), (2, 'Kato', 4, 347) +``` + +## Update + +Harper supports updating existing table row(s) via UPDATE statements. Multiple conditions can be applied to filter the row(s) to update. At this time selecting from one table to update another is not supported. + +``` +UPDATE dev.dog + SET owner_name = 'Kyle' + WHERE id IN (1, 2) +``` + +## Delete + +Harper supports deleting records from a table with condition support. + +``` +DELETE FROM dev.dog + WHERE age < 4 +``` + +## Joins + +Harper allows developers to join any number of tables and currently supports the following join types: + +* INNER JOIN LEFT +* INNER JOIN LEFT +* OUTER JOIN + +Here’s a basic example joining two tables from our Get Started example- joining a dogs table with a breeds table: + +``` +SELECT d.id, d.dog_name, d.owner_name, b.name, b.section + FROM dev.dog AS d + INNER JOIN dev.breed AS b ON d.breed_id = b.id + WHERE d.owner_name IN ('Kyle', 'Zach', 'Stephen') + AND b.section = 'Mutt' + ORDER BY d.dog_name +``` diff --git a/site/versioned_docs/version-4.5/developers/sql-guide/json-search.md b/site/versioned_docs/version-4.5/developers/sql-guide/json-search.md new file mode 100644 index 00000000..0727b07f --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/sql-guide/json-search.md @@ -0,0 +1,177 @@ +--- +title: SQL JSON Search +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL JSON Search + +Harper automatically indexes all top level attributes in a row / object written to a table. However, any attributes which hold JSON data do not have their nested attributes indexed. In order to make searching and/or transforming these JSON documents easy, Harper offers a special SQL function called SEARCH\_JSON. The SEARCH\_JSON function works in SELECT & WHERE clauses allowing queries to perform powerful filtering on any element of your JSON by implementing the [JSONata library](http:/docs.jsonata.org/overview.html) into our SQL engine. + +## Syntax + +SEARCH\_JSON(_expression, attribute_) + +Executes the supplied string _expression_ against data of the defined top level _attribute_ for each row. The expression both filters and defines output from the JSON document. + +### Example 1 + +#### Search a string array + +Here are two records in the database: + +```json +[ + { + "id": 1, + "name": ["Harper", "Penny"] + }, + { + "id": 2, + "name": ["Penny"] + } +] +``` + +Here is a simple query that gets any record with "Harper" found in the name. + +``` +SELECT * +FROM dev.dog +WHERE search_json('"Harper" in *', name) +``` + +### Example 2 + +The purpose of this query is to give us every movie where at least two of our favorite actors from Marvel films have acted together. The results will return the movie title, the overview, release date and an object array of the actor’s name and their character name in the movie. + +Both function calls evaluate the credits.cast attribute, this attribute is an object array of every cast member in a movie. + +``` +SELECT m.title, + m.overview, + m.release_date, + SEARCH_JSON($[name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"]].{"actor": name, "character": character}, c.`cast`) AS characters +FROM movies.credits c + INNER JOIN movies.movie m + ON c.movie_id = m.id +WHERE SEARCH_JSON($count($[name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"]]), c.`cast`) >= 2 +``` + +A sample of this data from the movie The Avengers looks like + +```json +[ + { + "cast_id": 46, + "character": "Tony Stark / Iron Man", + "credit_id": "52fe4495c3a368484e02b251", + "gender": "male", + "id": 3223, + "name": "Robert Downey Jr.", + "order": 0 + }, + { + "cast_id": 2, + "character": "Steve Rogers / Captain America", + "credit_id": "52fe4495c3a368484e02b19b", + "gender": "male", + "id": 16828, + "name": "Chris Evans", + "order": 1 + }, + { + "cast_id": 307, + "character": "Bruce Banner / The Hulk", + "credit_id": "5e85e8083344c60015411cfa", + "gender": "male", + "id": 103, + "name": "Mark Ruffalo", + "order": 2 + } +] +``` + +Let’s break down the SEARCH\_JSON function call in the SELECT: + +``` +SEARCH_JSON( + $[name in [ + "Robert Downey Jr.", + "Chris Evans", + "Scarlett Johansson", + "Mark Ruffalo", + "Chris Hemsworth", + "Jeremy Renner", + "Clark Gregg", + "Samuel L. Jackson", + "Gwyneth Paltrow", + "Don Cheadle" + ]].{ + "actor": name, + "character": character + }, + c.`cast` +) +``` + +The first argument passed to SEARCH\_JSON is the expression to execute against the second argument which is the cast attribute on the credits table. This expression will execute for every row. Looking into the expression it starts with “$\[…]” this tells the expression to iterate all elements of the cast array. + +Then the expression tells the function to only return entries where the name attribute matches any of the actors defined in the array: + +``` +name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"] +``` + +So far, we’ve iterated the array and filtered out rows, but we also want the results formatted in a specific way, so we’ve chained an expression on our filter with: `{“actor”: name, “character”: character}`. This tells the function to create a specific object for each matching entry. + +**Sample Result** + +```json +[ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + } +] +``` + +Just having the SEARCH\_JSON function in our SELECT is powerful, but given our criteria it would still return every other movie that doesn’t have our matching actors, in order to filter out the movies we do not want we also use SEARCH\_JSON in the WHERE clause. + +This function call in the WHERE clause is similar, but we don’t need to perform the same transformation as occurred in the SELECT: + +``` +SEARCH_JSON( + $count( + $[name in [ + "Robert Downey Jr.", + "Chris Evans", + "Scarlett Johansson", + "Mark Ruffalo", + "Chris Hemsworth", + "Jeremy Renner", + "Clark Gregg", + "Samuel L. Jackson", + "Gwyneth Paltrow", + "Don Cheadle" + ]] + ), + c.`cast` +) >= 2 +``` + +As seen above we execute the same name filter against the cast array, the primary difference is we are wrapping the filtered results in $count(…). As it looks this returns a count of the results back which we then use against our SQL comparator of >= 2. + +To see further SEARCH\_JSON examples in action view our Postman Collection that provides a [sample database & data with query examples](../operations-api/advanced-json-sql-examples). + +To learn more about how to build expressions check out the JSONata documentation: [http:/docs.jsonata.org/overview](http:/docs.jsonata.org/overview) diff --git a/site/versioned_docs/version-4.5/developers/sql-guide/reserved-word.md b/site/versioned_docs/version-4.5/developers/sql-guide/reserved-word.md new file mode 100644 index 00000000..8ce9f025 --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/sql-guide/reserved-word.md @@ -0,0 +1,207 @@ +--- +title: Harper SQL Reserved Words +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# Harper SQL Reserved Words + +This is a list of reserved words in the SQL Parser. Use of these words or symbols may result in unexpected behavior or inaccessible tables/attributes. If any of these words must be used, any SQL call referencing a database, table, or attribute must have backticks (`…`) or brackets ([…]) around the variable. + +For Example, for a table called `ASSERT` in the `data` database, a SQL select on that table would look like: + +``` +SELECT * from data.`ASSERT` +``` + +Alternatively: + +``` +SELECT * from data.[ASSERT] +``` + +### RESERVED WORD LIST + +* ABSOLUTE +* ACTION +* ADD +* AGGR +* ALL +* ALTER +* AND +* ANTI +* ANY +* APPLY +* ARRAY +* AS +* ASSERT +* ASC +* ATTACH +* AUTOINCREMENT +* AUTO_INCREMENT +* AVG +* BEGIN +* BETWEEN +* BREAK +* BY +* CALL +* CASE +* CAST +* CHECK +* CLASS +* CLOSE +* COLLATE +* COLUMN +* COLUMNS +* COMMIT +* CONSTRAINT +* CONTENT +* CONTINUE +* CONVERT +* CORRESPONDING +* COUNT +* CREATE +* CROSS +* CUBE +* CURRENT_TIMESTAMP +* CURSOR +* DATABASE +* DECLARE +* DEFAULT +* DELETE +* DELETED +* DESC +* DETACH +* DISTINCT +* DOUBLEPRECISION +* DROP +* ECHO +* EDGE +* END +* ENUM +* ELSE +* EXCEPT +* EXISTS +* EXPLAIN +* FALSE +* FETCH +* FIRST +* FOREIGN +* FROM +* GO +* GRAPH +* GROUP +* GROUPING +* HAVING +* HDB_HASH +* HELP +* IF +* IDENTITY +* IS +* IN +* INDEX +* INNER +* INSERT +* INSERTED +* INTERSECT +* INTO +* JOIN +* KEY +* LAST +* LET +* LEFT +* LIKE +* LIMIT +* LOOP +* MATCHED +* MATRIX +* MAX +* MERGE +* MIN +* MINUS +* MODIFY +* NATURAL +* NEXT +* NEW +* NOCASE +* NO +* NOT +* NULL +* OFF +* ON +* ONLY +* OFFSET +* OPEN +* OPTION +* OR +* ORDER +* OUTER +* OVER +* PATH +* PARTITION +* PERCENT +* PLAN +* PRIMARY +* PRINT +* PRIOR +* QUERY +* READ +* RECORDSET +* REDUCE +* REFERENCES +* RELATIVE +* REPLACE +* REMOVE +* RENAME +* REQUIRE +* RESTORE +* RETURN +* RETURNS +* RIGHT +* ROLLBACK +* ROLLUP +* ROW +* SCHEMA +* SCHEMAS +* SEARCH +* SELECT +* SEMI +* SET +* SETS +* SHOW +* SOME +* SOURCE +* STRATEGY +* STORE +* SYSTEM +* SUM +* TABLE +* TABLES +* TARGET +* TEMP +* TEMPORARY +* TEXTSTRING +* THEN +* TIMEOUT +* TO +* TOP +* TRAN +* TRANSACTION +* TRIGGER +* TRUE +* TRUNCATE +* UNION +* UNIQUE +* UPDATE +* USE +* USING +* VALUE +* VERTEX +* VIEW +* WHEN +* WHERE +* WHILE +* WITH +* WORK diff --git a/site/versioned_docs/version-4.5/developers/sql-guide/sql-geospatial-functions.md b/site/versioned_docs/version-4.5/developers/sql-guide/sql-geospatial-functions.md new file mode 100644 index 00000000..17ea789a --- /dev/null +++ b/site/versioned_docs/version-4.5/developers/sql-guide/sql-geospatial-functions.md @@ -0,0 +1,384 @@ +--- +title: SQL Geospatial Functions +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL Geospatial Functions + +Harper geospatial features require data to be stored in a single column using the [GeoJSON standard](http:/geojson.org/), a standard commonly used in geospatial technologies. Geospatial functions are available to be used in SQL statements. + + + +If you are new to GeoJSON you should check out the full specification here: http:/geojson.org/. There are a few important things to point out before getting started. + + + +1) All GeoJSON coordinates are stored in `[longitude, latitude]` format. +2) Coordinates or GeoJSON geometries must be passed as string when written directly in a SQL statement. +3) Note if you are using Postman for you testing. Due to limitations in the Postman client, you will need to escape quotes in your strings and your SQL will need to be passed on a single line. + + +In the examples contained in the left-hand navigation, database and table names may change, but all GeoJSON data will be stored in a column named geo_data. + +# geoArea + +The geoArea() function returns the area of one or more features in square meters. + +### Syntax +geoArea(_geoJSON_) + +### Parameters +| Parameter | Description | +|-----------|---------------------------------| +| geoJSON | Required. One or more features. | + +#### Example 1 +Calculate the area, in square meters, of a manually passed GeoJSON polygon. + +``` +SELECT geoArea('{ + "type":"Feature", + "geometry":{ + "type":"Polygon", + "coordinates":[[ + [0,0], + [0.123456,0], + [0.123456,0.123456], + [0,0.123456] + ]] + } +}') +``` + +#### Example 2 +Find all records that have an area less than 1 square mile (or 2589988 square meters). + +``` +SELECT * FROM dev.locations +WHERE geoArea(geo_data) < 2589988 +``` + +# geoLength +Takes a GeoJSON and measures its length in the specified units (default is kilometers). + +## Syntax +geoLength(_geoJSON_[_, units_]) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------| +| geoJSON | Required. GeoJSON to measure. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 +Calculate the length, in kilometers, of a manually passed GeoJSON linestring. + +``` +SELECT geoLength('{ + "type": "Feature", + "geometry": { + "type": "LineString", + "coordinates": [ + [-104.97963309288025,39.76163265441438], + [-104.9823260307312,39.76365323407955], + [-104.99193906784058,39.75616442110704] + ] + } +}') +``` + +### Example 2 +Find all data plus the calculated length in miles of the GeoJSON, restrict the response to only lengths less than 5 miles, and return the data in order of lengths smallest to largest. + +``` +SELECT *, geoLength(geo_data, 'miles') as length +FROM dev.locations +WHERE geoLength(geo_data, 'miles') < 5 +ORDER BY length ASC +``` +# geoDifference +Returns a new polygon with the difference of the second polygon clipped from the first polygon. + +## Syntax +geoDifference(_polygon1, polygon2_) + +## Parameters +| Parameter | Description | +|------------|----------------------------------------------------------------------------| +| polygon1 | Required. Polygon or MultiPolygon GeoJSON feature. | +| polygon2 | Required. Polygon or MultiPolygon GeoJSON feature to remove from polygon1. | + +### Example +Return a GeoJSON Polygon that removes City Park (_polygon2_) from Colorado (_polygon1_). + +``` +SELECT geoDifference('{ + "type": "Feature", + "properties": { + "name":"Colorado" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-109.072265625,37.00255267215955], + [-102.01904296874999,37.00255267215955], + [-102.01904296874999,41.0130657870063], + [-109.072265625,41.0130657870063], + [-109.072265625,37.00255267215955] + ]] + } + }', + '{ + "type": "Feature", + "properties": { + "name":"City Park" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.95973110198975,39.7543828214657], + [-104.95955944061278,39.744781185675386], + [-104.95904445648193,39.74422022399989], + [-104.95835781097412,39.74402223643582], + [-104.94097709655762,39.74392324244047], + [-104.9408483505249,39.75434982844515], + [-104.95973110198975,39.7543828214657] + ]] + } + }' +) +``` + +# geoDistance +Calculates the distance between two points in units (default is kilometers). + +## Syntax +geoDistance(_point1, point2_[_, units_]) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------| +| point1 | Required. GeoJSON Point specifying the origin. | +| point2 | Required. GeoJSON Point specifying the destination. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 +Calculate the distance, in miles, between Harper’s headquarters and the Washington Monument. + +``` +SELECT geoDistance('[-104.979127,39.761563]', '[-77.035248,38.889475]', 'miles') +``` + +### Example 2 +Find all locations that are within 40 kilometers of a given point, return that distance in miles, and sort by distance in an ascending order. + +``` +SELECT *, geoDistance('[-104.979127,39.761563]', geo_data, 'miles') as distance +FROM dev.locations +WHERE geoDistance('[-104.979127,39.761563]', geo_data, 'kilometers') < 40 +ORDER BY distance ASC +``` + +# geoNear +Determines if point1 and point2 are within a specified distance from each other, default units are kilometers. Returns a Boolean. + +## Syntax +geoNear(_point1, point2, distance_[_, units_]) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------------------------------------------| +| point1 | Required. GeoJSON Point specifying the origin. | +| point2 | Required. GeoJSON Point specifying the destination. | +| distance | Required. The maximum distance in units as an integer or decimal. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 +Return all locations within 50 miles of a given point. + +``` +SELECT * +FROM dev.locations +WHERE geoNear('[-104.979127,39.761563]', geo_data, 50, 'miles') +``` + +### Example 2 +Return all locations within 2 degrees of the earth of a given point. (Each degree lat/long is about 69 miles [111 kilometers]). Return all data and the distance in miles, sorted by ascending distance. + +``` +SELECT *, geoDistance('[-104.979127,39.761563]', geo_data, 'miles') as distance +FROM dev.locations +WHERE geoNear('[-104.979127,39.761563]', geo_data, 2, 'degrees') +ORDER BY distance ASC +``` + +# geoContains +Determines if geo2 is completely contained by geo1. Returns a Boolean. + +## Syntax +geoContains(_geo1, geo2_) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------------------------------------------------| +| geo1 | Required. Polygon or MultiPolygon GeoJSON feature. | +| geo2 | Required. Polygon or MultiPolygon GeoJSON feature tested to be contained by geo1. | + +### Example 1 +Return all locations within the state of Colorado (passed as a GeoJSON string). + +``` +SELECT * +FROM dev.locations +WHERE geoContains('{ + "type": "Feature", + "properties": { + "name":"Colorado" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-109.072265625,37.00255267], + [-102.01904296874999,37.00255267], + [-102.01904296874999,41.01306579], + [-109.072265625,41.01306579], + [-109.072265625,37.00255267] + ]] + } +}', geo_data) +``` + +### Example 2 +Return all locations which contain Harper Headquarters. + +``` +SELECT * +FROM dev.locations +WHERE geoContains(geo_data, '{ + "type": "Feature", + "properties": { + "name": "Harper Headquarters" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.98060941696167,39.760704817357905], + [-104.98053967952728,39.76065120861263], + [-104.98055577278137,39.760642961109674], + [-104.98037070035934,39.76049450588716], + [-104.9802714586258,39.76056254790385], + [-104.9805235862732,39.76076461167841], + [-104.98060941696167,39.760704817357905] + ]] + } +}') +``` + +# geoEqual +Determines if two GeoJSON features are the same type and have identical X,Y coordinate values. For more information see https:/developers.arcgis.com/documentation/spatial-references/. Returns a Boolean. + +## Syntax +geoEqual(_geo1_, _geo2_) + +## Parameters +| Parameter | Description | +|------------|----------------------------------------| +| geo1 | Required. GeoJSON geometry or feature. | +| geo2 | Required. GeoJSON geometry or feature. | + +### Example +Find Harper Headquarters within all locations within the database. + +``` +SELECT * +FROM dev.locations +WHERE geoEqual(geo_data, '{ + "type": "Feature", + "properties": { + "name": "Harper Headquarters" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.98060941696167,39.760704817357905], + [-104.98053967952728,39.76065120861263], + [-104.98055577278137,39.760642961109674], + [-104.98037070035934,39.76049450588716], + [-104.9802714586258,39.76056254790385], + [-104.9805235862732,39.76076461167841], + [-104.98060941696167,39.760704817357905] + ]] + } +}') +``` + +# geoCrosses +Determines if the geometries cross over each other. Returns boolean. + +## Syntax +geoCrosses(_geo1, geo2_) + +## Parameters +| Parameter | Description | +|------------|-----------------------------------------| +| geo1 | Required. GeoJSON geometry or feature. | +| geo2 | Required. GeoJSON geometry or feature. | + +### Example +Find all locations that cross over a highway. + +``` +SELECT * +FROM dev.locations +WHERE geoCrosses( + geo_data, + '{ + "type": "Feature", + "properties": { + "name": "Highway I-25" + }, + "geometry": { + "type": "LineString", + "coordinates": [ + [-104.9139404296875,41.00477542222947], + [-105.0238037109375,39.715638134796336], + [-104.853515625,39.53370327008705], + [-104.853515625,38.81403111409755], + [-104.61181640625,38.39764411353178], + [-104.8974609375,37.68382032669382], + [-104.501953125,37.00255267215955] + ] + } + }' +) +``` + +# geoConvert + +Converts a series of coordinates into a GeoJSON of the specified type. + +## Syntax +geoConvert(_coordinates, geo_type_[, _properties_]) + +## Parameters +| Parameter | Description | +|--------------|------------------------------------------------------------------------------------------------------------------------------------| +| coordinates | Required. One or more coordinates | +| geo_type | Required. GeoJSON geometry type. Options are ‘point’, ‘lineString’, ‘multiLineString’, ‘multiPoint’, ‘multiPolygon’, and ‘polygon’ | +| properties | Optional. Escaped JSON array with properties to be added to the GeoJSON output. | + +### Example +Convert a given coordinate into a GeoJSON point with specified properties. + +``` +SELECT geoConvert( + '[-104.979127,39.761563]', + 'point', + '{ + "name": "Harper Headquarters" + }' +) +``` diff --git a/site/versioned_docs/version-4.5/getting-started/first-harper-app.md b/site/versioned_docs/version-4.5/getting-started/first-harper-app.md new file mode 100644 index 00000000..1244113f --- /dev/null +++ b/site/versioned_docs/version-4.5/getting-started/first-harper-app.md @@ -0,0 +1,162 @@ +--- +title: Create Your First Application +--- + +# Create Your First Application +Now that you've set up Harper, let's build a simple API. Harper lets you build powerful APIs with minimal effort. In just a few minutes, you'll have a functional REST API with automatic validation, indexing, and querying—all without writing a single line of code. + +## Setup Your Project +Start by cloning the Harper application template: + +```bash +git clone https:/github.com/HarperDB/application-template my-app +cd my-app +``` + +## Creating our first Table +The core of a Harper application is the database, so let's create a database table. + +A quick and expressive way to define a table is through a [GraphQL Schema](https:/graphql.org/learn/schema). Using your editor of choice, edit the file named `schema.graphql` in the root of the application directory, `my-app`, that we created above. To create a table, we will need to add a `type` of `@table` named `Dog` (and you can remove the example table in the template): + +```graphql +type Dog @table { + # properties will go here soon +} +``` + +And then we'll add a primary key named `id` of type `ID`: + +_(Note: A GraphQL schema is a fast method to define tables in Harper, but you are by no means required to use GraphQL to query your application, nor should you necessarily do so)_ + +```graphql +type Dog @table { + id: ID @primaryKey +} +``` + +Now we tell Harper to run this as an application: + +```bash +harperdb dev . # tell Harper cli to run current directory as an application in dev mode +``` +Harper will now create the `Dog` table and its `id` attribute we just defined. Not only is this an easy way to create a table, but this schema is included in our application, which will ensure that this table exists wherever we deploy this application (to any Harper instance). + +## Adding Attributes to our Table +Next, let's expand our `Dog` table by adding additional typed attributes for dog `name`, `breed` and `age`. +```graphql +type Dog @table { + id: ID @primaryKey + name: String + breed: String + age: Int +} +``` + +This will ensure that new records must have these properties with these types. + +Because we ran `harperdb dev .` earlier (dev mode), Harper is now monitoring the contents of our application directory for changes and reloading when they occur. This means that once we save our schema file with these new attributes, Harper will automatically reload our application, read `my-app/schema.graphql` and update the `Dog` table and attributes we just defined. The dev mode will also ensure that any logging or errors are immediately displayed in the console (rather only in the log file). + +As a document database, Harper supports heterogeneous records, so you can freely specify additional properties on any record. If you do want to restrict the records to only defined properties, you can always do that by adding the sealed directive: + +```graphql +type Dog @table @sealed { + id: ID @primaryKey + name: String + breed: String + age: Int + tricks: [String] +} +``` + +## Adding an Endpoint +Now that we have a running application with a database (with data if you imported any data), let's make this data accessible from a RESTful URL by adding an endpoint. To do this, we simply add the `@export` directive to our `Dog` table: + +```graphql +type Dog @table @export { + id: ID @primaryKey + name: String + breed: String + age: Int + tricks: [String] +} +``` + +By default the application HTTP server port is `9926` (this can be [configured here](../deployments/configuration#http)), so the local URL would be http:/localhost:9926/Dog/ with a full REST API. We can PUT or POST data into this table using this new path, and then GET or DELETE from it as well (you can even view data directly from the browser). If you have not added any records yet, we could use a PUT or POST to add a record. PUT is appropriate if you know the id, and POST can be used to assign an id: + +```json +POST /Dog/ +Content-Type: application/json + +{ + "name": "Harper", + "breed": "Labrador", + "age": 3, + "tricks": ["sits"] +} +``` + +With this a record will be created and the auto-assigned id will be available through the `Location` header. If you added a record, you can visit the path `/Dog/` to view that record. Alternately, the curl command curl `http:/localhost:9926/Dog/` will achieve the same thing. + +## Authenticating Endpoints +Now that you've created your first API endpoints, it's important to ensure they're protected. Without authentication, anyone could potentially access, misuse, or overload your APIs, whether by accident or malicious intent. Authentication verifies who is making the request and enables you to control access based on identity, roles, or permissions. It’s a foundational step in building secure, reliable applications. + +Endpoints created with Harper automatically support `Basic`, `Cookie`, and `JWT` authentication methods. See the documentation on [security](../developers/security/) for more information on different levels of access. + +By default, Harper also automatically authorizes all requests from loopback IP addresses (from the same computer) as the superuser, to make it simple to interact for local development. If you want to test authentication/authorization, or enforce stricter security, you may want to disable the [`authentication.authorizeLocal` setting](../deployments/configuration#authentication). + +### Content Negotiation +These endpoints support various content types, including `JSON`, `CBOR`, `MessagePack` and `CSV`. Simply include an `Accept` header in your requests with the preferred content type. We recommend `CBOR` as a compact, efficient encoding with rich data types, but `JSON` is familiar and great for web application development, and `CSV` can be useful for exporting data to spreadsheets or other processing. + +Harper works with other important standard HTTP headers as well, and these endpoints are even capable of caching interaction: + +``` +Authorization: Basic +Accept: application/cbor +If-None-Match: "etag-id" # browsers can automatically provide this +``` + +## Querying + +Querying your application database is straightforward and easy, as tables exported with the `@export` directive are automatically exposed via [REST endpoints](../developers/rest). Simple queries can be crafted through [URL query parameters](https:/en.wikipedia.org/wiki/Query_string). + +In order to maintain reasonable query speed on a database as it grows in size, it is critical to select and establish the proper indexes. So, before we add the `@export` declaration to our `Dog` table and begin querying it, let's take a moment to target some table properties for indexing. We'll use `name` and `breed` as indexed table properties on our `Dog` table. All we need to do to accomplish this is tag these properties with the `@indexed` directive: + +```graphql +type Dog @table { + id: ID @primaryKey + name: String @indexed + breed: String @indexed + owner: String + age: Int + tricks: [String] +} +``` + +And finally, we'll add the `@export` directive to expose the table as a RESTful endpoint + +```graphql +type Dog @table @export { + id: ID @primaryKey + name: String @indexed + breed: String @indexed + owner: String + age: Int + tricks: [String] +} +``` + +Now we can start querying. Again, we just simply access the endpoint with query parameters (basic GET requests), like: + +``` +http:/localhost:9926/Dog/?name=Harper +http:/localhost:9926/Dog/?breed=Labrador +http:/localhost:9926/Dog/?breed=Husky&name=Balto&select(id,name,breed) +``` + +Congratulations, you now have created a secure database application backend with a table, a well-defined structure, access controls, and a functional REST endpoint with query capabilities! See the [REST documentation for more information on HTTP access](../developers/rest) and see the [Schema reference](../developers/applications/defining-schemas) for more options for defining schemas. + +> Additionally, you may now use GraphQL (over HTTP) to create queries. See the documentation for that new feature [here](../../technical-details/reference/graphql). + + +## Key Takeaway +Harper's schema-driven approach means you can build production-ready APIs in minutes, not hours. Start with pure schema definitions to get 90% of your functionality, then add custom code only where needed. This gives you the best of both worlds: rapid development with the flexibility to customize when required. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/getting-started/harper-concepts.md b/site/versioned_docs/version-4.5/getting-started/harper-concepts.md new file mode 100644 index 00000000..492a81ba --- /dev/null +++ b/site/versioned_docs/version-4.5/getting-started/harper-concepts.md @@ -0,0 +1,26 @@ +--- +title: Harper Concepts +--- + +# Harper Concepts + +As you begin your journey with Harper, there are a few concepts and definitions that you should understand. + +## Components +Harper components are a core Harper concept defined as flexible JavaScript based extensions of the highly extensible core Harper platform. They are executed by Harper directly and have complete access to the Harper [Global APIs](https:/docs.harperdb.io/technical-details/reference/globals) (such as Resource, databases, and tables). + +A key aspect to components are their extensibility; components can be built on other components. For example, a [Harper Application](https:/docs.harperdb.io/developers/applications) is a component that uses many other components. The [application template](https:/github.com/HarperDB/application-template) demonstrates many of Harper's built-in components such as [rest](https:/docs.harperdb.io/developers/components/built-in#rest) (for automatic REST endpoint generation), [graphqlSchema](https:/docs.harperdb.io/developers/components/built-in#graphqlschema) (for table schema definitions), and many more. + +## Applications +Applications are a subset of components that cannot be used directly and must depend on other extensions. Examples include defining schemas (using [graphqlSchema](https:/docs.harperdb.io/developers/components/built-in#graphqlschema) built-in extension), defining custom resources (using [jsResource](https:/docs.harperdb.io/developers/components/built-in#jsresource) built-in extension), hosting static files (using [static](https:/docs.harperdb.io/developers/components/built-in#static) built-in extension), enabling REST querying of resources (using [rest](https:/docs.harperdb.io/developers/components/built-in#rest) built-in extension), and running [Next.js](https:/github.com/HarperDB/nextjs), [Astro](https:/github.com/HarperDB/astro), or [Apollo](https:/github.com/HarperDB/apollo) applications through their respective extensions. + +## Resources +Resources in Harper encompass databases, tables, and schemas that store and structure data within the system. The concept is central to Harper's data management capabilities, with custom resources being enabled by the built-in jsResource extension. Resources represent the data layer of the Harper ecosystem and provide the foundation for data operations across applications built with the platform. + +## Server +Harper is a multi-protocol server, handling incoming requests from clients and serving data from the data model. Harper supports multiple server protocols, with components for serving REST/HTTP (including Server-Sent Events), MQTT, WebSockets, and the Operations API (and custom server components can be added). Harper uses separate layers for the data model and the servers. The data model, which is defined with resources, can be exported and be used as the source for any of the servers. A single table or other resource can then be accessed and modified through REST, MQTT, SSE, or any other server protocol, for a powerful integrated model with multiple forms of access. +Networking in Harper handles different communication protocols including HTTP, WebSocket, and MQTT, as well as event-driven systems. These networking capabilities enable Harper applications to communicate with other services, receive requests, send responses, and participate in real-time data exchange. The networking layer is fundamental to Harper's functionality as a versatile application platform. + +__ + +As you go through Harper, you will pick up more knowledge of other advanced areas along the way, but with these concepts, you're now ready to create your first application. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/getting-started/index.md b/site/versioned_docs/version-4.5/getting-started/index.md new file mode 100644 index 00000000..041d3b21 --- /dev/null +++ b/site/versioned_docs/version-4.5/getting-started/index.md @@ -0,0 +1,45 @@ +--- +title: Getting Started +--- + +# Getting Started + +If you're new to Harper, this section will guide you through the essential resources you need to get started. + +Follow the steps in this documentation to discover how Harper can simplify your backend stack, eliminate many inter-process communication delays, and achieve a more predictable and performant application experience. + +For more advanced concepts in Harper, see our [blog](https:/www.harpersystems.dev/blog). + +## Harper Basics +
+
+

+ + Install Harper + +

+

+ Pick the installation method that best suits your environment +

+
+
+

+ + What is Harper + +

+

+ Learn about Harper, how it works, and some of its usecases +

+
+
+

+ + Harper Concepts + +

+

+ Learn about Harper's fundamental concepts and how they interact +

+
+
\ No newline at end of file diff --git a/site/versioned_docs/version-4.5/getting-started/install-harper.md b/site/versioned_docs/version-4.5/getting-started/install-harper.md new file mode 100644 index 00000000..ca9caa75 --- /dev/null +++ b/site/versioned_docs/version-4.5/getting-started/install-harper.md @@ -0,0 +1,127 @@ +--- +title: Install Harper +--- + +# Install Harper + +There are three ways to install a Harper instance: using a package manager like npm, deploying it as a Docker container, and offline installation. Below is a step-by-step tutorial for each method. + +## Installing via NPM +Before you begin, ensure you have [Node.js](https:/nodejs.org/) LTS version or newer. Node.js comes with npm, which will be used to install Harper. + +Open your terminal or command prompt and install Harper globally by executing the command below. Installing globally allows the `harperdb` command to be accessible from anywhere on your machine, making it easier to manage multiple projects. + +```bash +npm install -g harperdb +``` + +Once the installation finishes, simply start your Harper instance by running the command below in your terminal. + +```bash +harperdb +``` + +This launches Harper as a standalone, where you can define your schemas, endpoints, and application logic within a single integrated environment. The first time you set this up, you will need to set up your Harper destination, username, password, config, and hostname. + +At this point, your local Harper instance is up and running, giving you the ability to develop and test your database applications using your favorite local development tools, including debuggers and version control systems. + +## Installing via Docker +Using Docker to run Harper is an efficient way to manage a containerized instance that encapsulates all of Harper’s functionality. First, ensure that Docker is installed and running on your system. If it isn’t, download it from the [official Docker website](https:/docs.docker.com/engine/install/) and complete the installation process. + +Next, open your terminal and pull the latest Harper image by running the following command: + +```bash +docker pull harperdb/harperdb +``` + +This command downloads the official Harper image from Docker Hub, ensuring you have the most recent version of the containerized instance. Once the image is downloaded, you can start a new Harper container with the following command: + +```bash +docker run -d -p 9925:9925 harperdb/harperdb +``` + +In this command, the `-d` flag runs the container in detached mode, allowing it to operate in the background, and the `-p 9925:9925` flag maps port 9925 on your local machine to port 9925 within the container, which is Harper’s default port. This port mapping lets you interact with the Harper instance directly from your local environment. + +### How to Use this Image +[Harper configuration settings⁠](https:/harperdb.io/docs/reference/configuration-file/) can be passed as Docker run environment variables. If no environment variables are provided, Harper will operate with default configuration settings, such as: +- ROOTPATH=/home/harperdb/hdb +- OPERATIONSAPI_NETWORK_PORT=9925 +- HDB_ADMIN_USERNAME=HDB_ADMIN +- HDB_ADMIN_PASSWORD=password +- LOGGING_STDSTREAMS=true + +These defaults allow you to quickly start an instance, though you can customize your configuration to better suit your needs. + +Containers created from this image store all data and Harper configuration at `/home/harperdb/hdb`. To ensure that your data persists beyond the lifecycle of a container, you should mount this directory to a directory on the container host using a Docker volume. This ensures that your database remains available and your settings are not lost when the container is stopped or removed. + +:::info +Test your Harper instance is up and running by querying `curl http:/localhost:9925/health` +::: + +### Example Deployments +To run a Harper container in the background with persistent storage and exposed ports, you can use a command like this: + +```bash +docker run -d \ + -v :/home/harperdb/hdb \ + -e HDB_ADMIN_USERNAME=HDB_ADMIN \ + -e HDB_ADMIN_PASSWORD=password \ + -e THREADS=4 \ + -p 9925:9925 \ + -p 9926:9926 \ + harperdb/harperdb +``` + +Here, the `` should be replaced with an actual directory path on your system where you want to store the persistent data. This command also exposes both the Harper Operations API (port 9925) and an additional HTTP port (9926). +For a more advanced setup, enabling HTTPS and clustering, you can run: + +```bash +docker run -d \ + -v :/home/harperdb/hdb \ + -e HDB_ADMIN_USERNAME=HDB_ADMIN \ + -e HDB_ADMIN_PASSWORD=password \ + -e THREADS=4 \ + -e OPERATIONSAPI_NETWORK_PORT=null \ + -e OPERATIONSAPI_NETWORK_SECUREPORT=9925 \ + -e HTTP_SECUREPORT=9926 \ + -e CLUSTERING_ENABLED=true \ + -e CLUSTERING_USER=cluster_user \ + -e CLUSTERING_PASSWORD=password \ + -e CLUSTERING_NODENAME=hdb1 \ + -p 9925:9925 \ + -p 9926:9926 \ + -p 9932:9932 \ + harperdb/harperdb +``` + +In this setup, additional environment variables disable the unsecure Operations API port and enable secure ports for HTTPS, along with clustering parameters such as the clustering user, password, and node name. The port 9932 is also exposed for Harper clustering communication. + +Finally, if you simply wish to check the Harper version using the container, execute: + +```bash +docker run --rm harperdb/harperdb /bin/bash -c "harperdb version" +``` + +This command runs the container momentarily to print the version information, then removes the container automatically when finished. + +### Logs and Troubleshooting +To verify that the container is running properly, you can check your running containers with: + +```bash +docker ps +``` + +If you want to inspect the logs to ensure that Harper has started correctly, use this command (be sure to replace `` with the actual ID from the previous command): + +```bash +docker logs +``` + +Once verified, you can access your Harper instance by opening your web browser and navigating to http:/localhost:9925 (or the appropriate port based on your configuration). + +### Raw binary installation +There's a different way to install Harper. You can choose your version and download the npm package and install it directly (you’ll still need Node.js and NPM). Click [this link](https:/products-harperdb-io.s3.us-east-2.amazonaws.com/index.html) to download and install the package. Once you’ve downloaded the .tgz file, run the following command from the directory where you’ve placed it: + +```bash +npm install -g harperdb-X.X.X.tgz harperdb install +``` diff --git a/site/versioned_docs/version-4.5/getting-started/what-is-harper.md b/site/versioned_docs/version-4.5/getting-started/what-is-harper.md new file mode 100644 index 00000000..4b9113ad --- /dev/null +++ b/site/versioned_docs/version-4.5/getting-started/what-is-harper.md @@ -0,0 +1,60 @@ +--- +title: What is Harper +--- + +# What is Harper + +:::info +[Connect with our team!](https:/www.harpersystems.dev/contact) +::: + +## What is Harper? Performance, Simplicity, and Scale. + +Harper is an all-in-one backend technology that fuses database technologies, caching, application hosting, and messaging functions into a single system. Unlike traditional architectures where each piece runs independently and incurs extra costs and latency from serialization and network operations between processes, Harper systems can handle workloads seamlessly and efficiently. + +Harper simplifies scaling with clustering and native data replication. At scale, architectures tend to include 4 to 16 redundant, geo-distributed nodes located near every user population center. This ensures that every user experiences minimal network latency and maximum reliability in addition to the already rapid server responses. + +![](/img/v4.5/harperstack.jpg) + +## Understanding the Paradigm Shift + +Have you ever combined MongoDB with Redis, Next.js with Postgres, or perhaps Fastify with anything else? The options seem endless. It turns out that the cost of serialization, network hops, and intermediary processes in these systems adds up to 50% of the total system resources used (often more). Not to mention the hundreds of milliseconds of latency they can add. + +What we realized is that networking systems together in this way is inefficient and only necessary because a fused technology did not exist. So, we built Harper, a database fused with a complete JavaScript application system. It’s not only orders of magnitude more performant than separated systems, but it’s also easier to deploy and manage at scale. + +## Build With Harper + +Start by running Harper locally with [npm](https:/www.npmjs.com/package/harperdb) or [Docker](https:/hub.docker.com/r/harperdb/harperdb). + +Since technology tends to be built around the storage, processing, and transfer of data, start by [defining your schema](./first-harper-app#creating-our-first-table) with the `schema.graphql` file in the root of the application directory. + +If you would like to [query](./first-harper-app#adding-an-endpoint) this data, add the `@export` directive to our data schema and test out the [REST](../developers/rest), [MQTT](../developers/real-time#mqtt), or [WebSocket](../developers/real-time#websockets) endpoints. + +When you are ready for something a little more advanced, start [customizing your application](../developers/applications/#custom-functionality-with-javascript). + +Finally, when it’s time to deploy, explore [replication](../developers/replication/) between nodes. + +If you would like to jump into the most advanced capabilities, learn about [components](../developers/components/). + + +:::warning +Need help? Please don’t hesitate to [reach out](https:/www.harpersystems.dev/contact). +::: + +## Popular Use Cases + +With so much functionality built in, the use cases span nearly all application systems. Some of the most popular are listed below, motivated by new levels of performance and system simplicity. + +### Online Catalogs & Content Delivery + +For use cases like e-commerce, real estate listing, and content-oriented sites, Harper’s breakthroughs in performance and distribution pay dividends in the form of better SEO and higher conversion rates. One common implementation leverages Harper’s [Next.js Component](https:/github.com/HarperDB/nextjs) to host modern, performant frontend applications. Other implementations leverage the built-in caching layer and JavaScript application system to [server-side render pages](https:/www.harpersystems.dev/development/tutorials/server-side-rendering-with-multi-tier-cache) that remain fully responsive because of built-in WebSocket connections. + +### Data Delivery Networks + +For use cases like real-time sports updates, flight tracking, and zero-day software update distribution, Harper is rapidly gaining popularity. Harper’s ability to receive and broadcast messages while simultaneously handling application logic and data storage streamlines operations and eliminates the need for multiple separate systems. To build an understanding of our messaging system function, refer to our [real-time documentation](../developers/real-time). + +### Edge Inference Systems + +Capturing, storing, and processing real-time data streams from client and IoT systems typically requires a stack of technology. Harper’s selective data replication and self-healing connections make for an ideal multi-tier system where edge and cloud systems both run Harper, making everything more performant. + +[We’re happy](https:/www.harpersystems.dev/contact) to walk you through how to do this. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/index.md b/site/versioned_docs/version-4.5/index.md new file mode 100644 index 00000000..98e5f5d0 --- /dev/null +++ b/site/versioned_docs/version-4.5/index.md @@ -0,0 +1,104 @@ +--- +title: Harper Docs +--- + +# Harper Docs + +:::info +[Connect with our team!](https:/www.harpersystems.dev/contact) +::: + +Welcome to the Harper Documentation! Here, you'll find all things Harper, and everything you need to get started, troubleshoot issues, and make the most of our platform. + +## Getting Started + +
+
+

+ + Install Harper + +

+

+ Pick the installation method that best suits your environment +

+
+
+

+ + What is Harper + +

+

+ Learn about Harper, how it works, and some of its usecases +

+
+
+

+ + Harper Concepts + +

+

+ Learn about Harper's fundamental concepts and how they interact +

+
+
+ +## Building with Harper + +
+
+

+ + Harper Applications + +

+

+ Build your a fully featured Harper Component with custom functionality +

+
+
+

+ + REST Queries + +

+

+ The recommended HTTP interface for data access, querying, and manipulation +

+
+
+

+ + Operations API + +

+

+ Configure, deploy, administer, and control your Harper instance +

+
+
+ +
+
+

+ + Clustering & Replication + +

+

+ The process of connecting multiple Harper databases together to create a database mesh network that enables users to define data replication patterns. +

+
+
+

+ + Explore the Harper Studio + +

+

+ The web-based GUI for Harper. Studio enables you to administer, navigate, and monitor all of your Harper instances in a simple, user friendly interface. +

+
+
diff --git a/site/versioned_docs/version-4.5/technical-details/_category_.json b/site/versioned_docs/version-4.5/technical-details/_category_.json new file mode 100644 index 00000000..69ce80a6 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Technical Details", + "position": 4, + "link": { + "type": "generated-index", + "title": "Technical Details Documentation", + "description": "Reference documentation and technical specifications", + "keywords": [ + "technical-details" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/reference/analytics.md b/site/versioned_docs/version-4.5/technical-details/reference/analytics.md new file mode 100644 index 00000000..d3156053 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/reference/analytics.md @@ -0,0 +1,117 @@ +--- +title: Analytics +--- + +# Analytics + +Harper provides extensive telemetry and analytics data to help monitor the status of the server and work loads, and to help understand traffic and usage patterns to identify issues and scaling needs, and identify queries and actions that are consuming the most resources. + +Harper collects statistics for all operations, URL endpoints, and messaging topics, aggregating information by thread, operation, resource, and methods, in real-time. These statistics are logged in the `hdb_raw_analytics` and `hdb_analytics` table in the `system` database. + +There are two "levels" of analytics in the Harper analytics table: the first is the immediate level of raw direct logging of real-time statistics. These analytics entries are recorded once a second (when there is activity) by each thread, and include all recorded activity in the last second, along with system resource information. The records have a primary key that is the timestamp in milliseconds since epoch. This can be queried (with `superuser` permission) using the search\_by\_conditions operation (this will search for 10 seconds worth of analytics) on the `hdb_raw_analytics` table: + +``` +POST http:/localhost:9925 +Content-Type: application/json + +{ + "operation": "search_by_conditions", + "schema": "system", + "table": "hdb_raw_analytics", + "conditions": [{ + "search_attribute": "id", + "search_type": "between", + "search_value": [168859400000, 1688594010000] + }] +} +``` + +And a typical response looks like: + +``` +{ + "time": 1688594390708, + "period": 1000.8336279988289, + "metrics": [ + { + "metric": "bytes-sent", + "path": "search_by_conditions", + "type": "operation", + "median": 202, + "mean": 202, + "p95": 202, + "p90": 202, + "count": 1 + }, + ... + { + "metric": "memory", + "threadId": 2, + "rss": 1492664320, + "heapTotal": 124596224, + "heapUsed": 119563120, + "external": 3469790, + "arrayBuffers": 798721 + }, + { + "metric": "utilization", + "idle": 138227.52767700003, + "active": 70.5066209952347, + "utilization": 0.0005098165086230495 + } + ], + "threadId": 2, + "totalBytesProcessed": 12182820, + "id": 1688594390708.6853 +} +``` + +The second level of analytics recording is aggregate data. The aggregate records are recorded once a minute, and aggregate the results from all the per-second entries from all the threads, creating a summary of statistics once a minute. The ids for these milliseconds since epoch can be queried from the `hdb_analytics` table. You can query these with an operation like: + +``` +POST http:/localhost:9925 +Content-Type: application/json + +{ + "operation": "search_by_conditions", + "schema": "system", + "table": "hdb_analytics", + "conditions": [{ + "search_attribute": "id", + "search_type": "between", + "search_value": [1688194100000, 1688594990000] + }] +} +``` + +And a summary record looks like: + +``` +{ + "period": 60000, + "metric": "bytes-sent", + "method": "connack", + "type": "mqtt", + "median": 4, + "mean": 4, + "p95": 4, + "p90": 4, + "count": 1, + "id": 1688589569646, + "time": 1688589569646 +} +``` + +The following are general resource usage statistics that are tracked: + +* memory - This includes RSS, heap, buffer and external data usage. +* utilization - How much of the time the worker was processing requests. +* mqtt-connections - The number of MQTT connections. + +The following types of information is tracked for each HTTP request: + +* success - How many requests returned a successful response (20x response code). TTFB - Time to first byte in the response to the client. +* transfer - Time to finish the transfer of the data to the client. +* bytes-sent - How many bytes of data were sent to the client. + +Requests are categorized by operation name, for the operations API, by the resource (name) with the REST API, and by command for the MQTT interface. diff --git a/site/versioned_docs/version-4.5/technical-details/reference/architecture.md b/site/versioned_docs/version-4.5/technical-details/reference/architecture.md new file mode 100644 index 00000000..dd451ded --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/reference/architecture.md @@ -0,0 +1,42 @@ +--- +title: Architecture +--- + +# Architecture + +Harper's architecture consists of resources, which includes tables and user defined data sources and extensions, and server interfaces, which includes the RESTful HTTP interface, operations API, and MQTT. Servers are supported by routing and auth services. + +``` + ┌──────────┐ ┌──────────┐ + │ Clients │ │ Clients │ + └────┬─────┘ └────┬─────┘ + │ │ + ▼ ▼ + ┌────────────────────────────────────────┐ + │ │ + │ Socket routing/management │ + ├───────────────────────┬────────────────┤ + │ │ │ + │ Server Interfaces ─►│ Authentication │ + │ RESTful HTTP, MQTT │ Authorization │ + │ ◄─┤ │ + │ ▲ └────────────────┤ + │ │ │ │ + ├───┼──────────┼─────────────────────────┤ + │ │ │ ▲ │ + │ ▼ Resources ▲ │ ┌───────────┐ │ + │ │ └─┤ │ │ + ├─────────────────┴────┐ │ App │ │ + │ ├─►│ resources │ │ + │ Database tables │ └───────────┘ │ + │ │ ▲ │ + ├──────────────────────┘ │ │ + │ ▲ ▼ │ │ + │ ┌────────────────┐ │ │ + │ │ External │ │ │ + │ │ data sources ├────┘ │ + │ │ │ │ + │ └────────────────┘ │ + │ │ + └────────────────────────────────────────┘ +``` diff --git a/site/versioned_docs/version-4.5/technical-details/reference/blob.md b/site/versioned_docs/version-4.5/technical-details/reference/blob.md new file mode 100644 index 00000000..12b8b28e --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/reference/blob.md @@ -0,0 +1,100 @@ +--- +title: Blob +--- + +# Blob + +Blobs are binary large objects that can be used to store any type of unstructured/binary data and is designed for large content. Blobs support streaming and feature better performance for content larger than about 20KB. Blobs are built off the native JavaScript `Blob` type, and HarperDB extends the native `Blob` type for integrated storage with the database. To use blobs, you would generally want to declare a field as a `Blob` type in your schema: +```graphql +type MyTable { + id: Any! @primaryKey + data: Blob +} +``` + +You can then create a blob which writes the binary data to disk, and can then be included (as a reference) in a record. For example, you can create a record with a blob like: + +```javascript +let blob = await createBlob(largeBuffer); +await MyTable.put({ id: 'my-record', data: blob }); +``` +The `data` attribute in this example is a blob reference, and can be used like any other attribute in the record, but it is stored separately, and the data must be accessed asynchronously. You can retrieve the blob data with the standard `Blob` methods: + +```javascript +let buffer = await blob.bytes(); +``` + +If you are creating a resource method, you can return a `Response` object with a blob as the body: + +```javascript +export class MyEndpoint extends MyTable { + async get() { + return { + status: 200, + headers: {}, + body: this.data, / this.data is a blob + }); + } +} +``` +One of the important characteristics of blobs is they natively support asynchronous streaming of data. This is important for both creation and retrieval of large data. When we create a blob with `createBlob`, the returned blob will create the storage entry, but the data will be streamed to storage. This means that you can create a blob from a buffer or from a stream. You can also create a record that references a blob before the blob is fully written to storage. For example, you can create a blob from a stream: + +```javascript +let blob = await createBlob(stream); +/ at this point the blob exists, but the data is still being written to storage +await MyTable.put({ id: 'my-record', data: blob }); +/ we now have written a record that references the blob +let record = await MyTable.get('my-record'); +/ we now have a record that gives us access to the blob. We can asynchronously access the blob's data or stream the data, and it will be available as blob the stream is written to the blob. +let stream = record.data.stream(); +``` +This can be powerful functionality for large media content, where content can be streamed into storage as it streamed out in real-time to users as it is received. +Alternately, we can also wait for the blob to be fully written to storage before creating a record that references the blob: + +```javascript +let blob = await createBlob(stream); +/ at this point the blob exists, but the data is was not been written to storage +await blob.save(MyTable); +/ we now know the blob is fully written to storage +await MyTable.put({ id: 'my-record', data: blob }); +``` + +Note that this means that blobs are _not_ atomic or [ACID](https:/en.wikipedia.org/wiki/ACID) compliant; streaming functionality achieves the opposite behavior of ACID/atomic writes that would prevent access to data as it is being written. + +### Error Handling +Because blobs can be streamed and referenced prior to their completion, there is a chance that an error or interruption could occur while streaming data to the blob (after the record is committed). We can create an error handler for the blob to handle the case of an interrupted blob: + +```javascript +export class MyEndpoint extends MyTable { + let blob = this.data; + blob.on('error', () => { + / if this was a caching table, we may want to invalidate or delete this record: + this.invalidate(); + }); + async get() { + return { + status: 200, + headers: {}, + body: blob + }); + } +} +``` + +### Blob `size` + +Blobs that are created from streams may not have the standard `size` property available, because the size may not be known while data is being streamed. Consequently, the `size` property may be undefined until the size is determined. You can listen for the `size` event to be notified when the size is available: +```javascript +let record = await MyTable.get('my-record'); +let blob = record.data; +blob.size / will be available if it was saved with a known size +let stream blob.stream(); / start streaming the data +if (blob.size === undefined) { + blob.on('size', (size) => { + / will be called once the size is available + }) +} + +``` + +See the [configuration](../../deployments/configuration) documentation for more information on configuring where blob are stored. diff --git a/site/versioned_docs/version-4.5/technical-details/reference/content-types.md b/site/versioned_docs/version-4.5/technical-details/reference/content-types.md new file mode 100644 index 00000000..735b268d --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/reference/content-types.md @@ -0,0 +1,29 @@ +--- +title: Content Types +--- + +# Content Types + +Harper supports several different content types (or MIME types) for both HTTP request bodies (describing operations) as well as for serializing content into HTTP response bodies. Harper follows HTTP standards for specifying both request body content types and acceptable response body content types. Any of these content types can be used with any of the standard Harper operations. + +For request body content, the content type should be specified with the `Content-Type` header. For example with JSON, use `Content-Type: application/json` and for CBOR, include `Content-Type: application/cbor`. To request that the response body be encoded with a specific content type, use the `Accept` header. If you want the response to be in JSON, use `Accept: application/json`. If you want the response to be in CBOR, use `Accept: application/cbor`. + +The following content types are supported: + +## JSON - application/json + +JSON is the most widely used content type, and is relatively readable and easy to work with. However, JSON does not support all the data types that are supported by Harper, and can't be used to natively encode data types like binary data or explicit Maps/Sets. Also, JSON is not as efficient as binary formats. When using JSON, compression is recommended (this also follows standard HTTP protocol with the `Accept-Encoding` header) to improve network transfer performance (although there is server performance overhead). JSON is a good choice for web development and when standard JSON types are sufficient and when combined with compression and debuggability/observability is important. + +## CBOR - application/cbor + +CBOR is a highly efficient binary format, and is a recommended format for most production use cases with Harper. CBOR supports the full range of Harper data types, including binary data, typed dates, and explicit Maps/Sets. CBOR is very performant and space efficient even without compression. Compression will still yield better network transfer size/performance, but compressed CBOR is generally not any smaller than compressed JSON. CBOR also natively supports streaming for optimal performance (using indefinite length arrays). The CBOR format has excellent standardization and Harper's CBOR provides an excellent balance of performance and size efficiency. + +## MessagePack - application/x-msgpack + +MessagePack is another efficient binary format like CBOR, with support for all Harper data types. MessagePack generally has wider adoption than CBOR and can be useful in systems that don't have CBOR support (or good support). However, MessagePack does not have native support for streaming of arrays of data (for query results), and so query results are returned as a (concatenated) sequence of MessagePack objects/maps. MessagePack decoders used with Harper's MessagePack must be prepared to decode a direct sequence of MessagePack values to properly read responses. + +## Comma-separated Values (CSV) - text/csv + +Comma-separated values is an easy to use and understand format that can be readily imported into spreadsheets or used for data processing. CSV lacks hierarchical structure for most data types, and shouldn't be used for frequent/production use, but when you need it, it is available. + +In addition, with the REST interface, you can use file-style extensions to indicate an encoding like http:/host/path.csv to indicate CSV encoding. See the [REST documentation](../../developers/rest) for more information on how to do this. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/reference/data-types.md b/site/versioned_docs/version-4.5/technical-details/reference/data-types.md new file mode 100644 index 00000000..a066c856 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/reference/data-types.md @@ -0,0 +1,58 @@ +--- +title: Data Types +--- + +# Data Types + +Harper supports a rich set of data types for use in records in databases. Various data types can be used from both direct JavaScript interfaces in Custom Functions and the HTTP operations APIs. Using JSON for communication naturally limits the data types to those available in JSON (Harper’s supports all of JSON data types), but JavaScript code and alternate data formats facilitate the use of additional data types. Harper supports MessagePack and CBOR, which allows for all of Harper supported data types. [Schema definitions can specify the expected types for fields, with GraphQL Schema Types](../../developers/applications/defining-schemas), which are used for validation of incoming typed data (JSON, MessagePack), and is used for auto-conversion of untyped data (CSV, [query parameters](../../developers/rest)). Available data types include: + +(Note that these labels are descriptive, they do not necessarily correspond to the GraphQL schema type names, but the schema type names are noted where possible) + +## Boolean + +true or false. The GraphQL schema type name is `Boolean`. + +## String + +Strings, or text, are a sequence of any unicode characters and are internally encoded with UTF-8. The GraphQL schema type name is `String`. + +## Number + +Numbers can be stored as signed integers up to a 1000 bits of precision (about 300 digits) or floating point with 64-bit floating point precision, and numbers are automatically stored using the most optimal type. With JSON, numbers are automatically parsed and stored in the most appropriate format. Custom components and applications may use BigInt numbers to store/access integers that are larger than 53-bit. The following GraphQL schema type name are supported: + +* `Float` - Any number that can be represented with [64-bit double precision floating point number](https:/en.wikipedia.org/wiki/Double-precision\_floating-point\_format) ("double") +* `Int` - Any integer between from -2147483648 to 2147483647 +* `Long` - Any integer between from -9007199254740992 to 9007199254740992 +* `BigInt` - Any integer (negative or positive) with less than 300 digits + +Note that `BigInt` is a distinct and separate type from standard numbers in JavaScript, so custom code should handle this type appropriately. + +## Object/Map + +Objects, or maps, that hold a set named properties can be stored in Harper. When provided as JSON objects or JavaScript objects, all property keys are stored as strings. The order of properties is also preserved in Harper’s storage. Duplicate property keys are not allowed (they are dropped in parsing any incoming data). + +## Array + +Arrays hold an ordered sequence of values and can be stored in Harper. There is no support for sparse arrays, although you can use objects to store data with numbers (converted to strings) as properties. + +## Null + +A null value can be stored in Harper property values as well. + +## Date + +Dates can be stored as a specific data type. This is not supported in JSON, but is supported by MessagePack and CBOR. Custom Functions can also store and use Dates using JavaScript Date instances. The GraphQL schema type name is `Date`. + +## Binary Data + +Binary data can be stored in property values as well, with two different data types that are available: + +### Bytes +JSON doesn’t have any support for encoding binary data, but MessagePack and CBOR support binary data in data structures, and this will be preserved in HarperDB. Custom Functions can also store binary data by using NodeJS’s Buffer or Uint8Array instances to hold the binary data. The GraphQL schema type name is `Bytes`. + +### Blobs +Binary data can also be stored with [`Blob`s](./blob), which can scale much better for larger content than `Bytes`, as it is designed to be streamed and does not need to be held entirely in memory. It is recommended that `Blob`s are used for content larger than 20KB. + +## Explicit Map/Set + +Explicit instances of JavaScript Maps and Sets can be stored and preserved in Harper as well. This can’t be represented with JSON, but can be with CBOR. diff --git a/site/versioned_docs/version-4.5/technical-details/reference/dynamic-schema.md b/site/versioned_docs/version-4.5/technical-details/reference/dynamic-schema.md new file mode 100644 index 00000000..233e1e73 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/reference/dynamic-schema.md @@ -0,0 +1,148 @@ +--- +title: Dynamic Schema +--- + +# Dynamic Schema + +When tables are created without any schema, through the operations API (without specifying attributes) or studio, the tables follow "dynamic-schema" behavior. Generally it is best-practice to define schemas for your tables to ensure predictable, consistent structures with data integrity and precise control over indexing, without dependency on data itself. However, it can often be simpler and quicker to simply create a table and let the data auto-generate the schema dynamically with everything being auto-indexed for broad querying. + +With dynamic schemas individual attributes are reflexively created as data is ingested, meaning the table will adapt to the structure of data ingested. Harper tracks the metadata around schemas, tables, and attributes allowing for describe table, describe schema, and describe all operations. + +### Databases + +Harper databases hold a collection of tables together in a single file that are transactionally connected. This means that operations across tables within a database can be performed in a single atomic transaction. By default tables are added to the default database called "data", but other databases can be created and specified for tables. + +### Tables + +Harper tables group records together with a common data pattern. To create a table users must provide a table name and a primary key. + +* **Table Name**: Used to identify the table. +* **Primary Key**: This is a required attribute that serves as the unique identifier for a record and is also known as the `hash_attribute` in Harper operations API. + +## Primary Key + +The primary key (also referred to as the `hash_attribute`) is used to uniquely identify records. Uniqueness is enforced on the primary; inserts with the same primary key will be rejected. If a primary key is not provided on insert, a GUID will be automatically generated and returned to the user. The [Harper Storage Algorithm](./storage-algorithm) utilizes this value for indexing. + +**Standard Attributes** + +With tables that are using dynamic schemas, additional attributes are reflexively added via insert and update operations (in both SQL and NoSQL) when new attributes are included in the data structure provided to Harper. As a result, schemas are additive, meaning new attributes are created in the underlying storage algorithm as additional data structures are provided. Harper offers `create_attribute` and `drop_attribute` operations for users who prefer to manually define their data model independent of data ingestion. When new attributes are added to tables with existing data the value of that new attribute will be assumed `null` for all existing records. + +**Audit Attributes** + +Harper automatically creates two audit attributes used on each record if the table is created without a schema. + +* `__createdtime__`: The time the record was created in [Unix Epoch with milliseconds](https:/www.epochconverter.com/) format. +* `__updatedtime__`: The time the record was updated in [Unix Epoch with milliseconds](https:/www.epochconverter.com/) format. + +### Dynamic Schema Example + +To better understand the behavior let’s take a look at an example. This example utilizes [Harper API operations](../../developers/operations-api/databases-and-tables). + +**Create a Database** + +```bash +{ + "operation": "create_database", + "schema": "dev" +} +``` + +**Create a Table** + +Notice the schema name, table name, and primary key name are the only required parameters. + +```bash +{ + "operation": "create_table", + "database": "dev", + "table": "dog", + "primary_key": "id" +} +``` + +At this point the table does not have structure beyond what we provided, so the table looks like this: + +**dev.dog** + +![](/img/v4.5/reference/dynamic\_schema\_2\_create\_table.png.webp) + +**Insert Record** + +To define attributes we do not need to do anything beyond sending them in with an insert operation. + +```bash +{ + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + {"id": 1, "dog_name": "Penny", "owner_name": "Kyle"} + ] +} +``` + +With a single record inserted and new attributes defined, our table now looks like this: + +**dev.dog** + +![](/img/v4.5/reference/dynamic\_schema\_3\_insert\_record.png.webp) + +Indexes have been automatically created for `dog_name` and `owner_name` attributes. + +**Insert Additional Record** + +If we continue inserting records with the same data schema no schema updates are required. One record will omit the hash attribute from the insert to demonstrate GUID generation. + +```bash +{ + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + {"id": 2, "dog_name": "Monk", "owner_name": "Aron"}, + {"dog_name": "Harper","owner_name": "Stephen"} + ] +} +``` + +In this case, there is no change to the schema. Our table now looks like this: + +**dev.dog** + +![](/img/v4.5/reference/dynamic\_schema\_4\_insert\_additional\_record.png.webp) + +**Update Existing Record** + +In this case, we will update a record with a new attribute not previously defined on the table. + +```bash +{ + "operation": "update", + "database": "dev", + "table": "dog", + "records": [ + {"id": 2, "weight_lbs": 35} + ] +} +``` + +Now we have a new attribute called `weight_lbs`. Our table now looks like this: + +**dev.dog** + +![](/img/v4.5/reference/dynamic\_schema\_5\_update\_existing\_record.png.webp) + +**Query Table with SQL** + +Now if we query for all records where `weight_lbs` is `null` we expect to get back two records. + +```bash +{ + "operation": "sql", + "sql": "SELECT * FROM dev.dog WHERE weight_lbs IS NULL" +} +``` + +This results in the expected two records being returned. + +![](/img/v4.5/reference/dynamic\_schema\_6\_query\_table\_with\_sql.png.webp) diff --git a/site/versioned_docs/version-4.5/technical-details/reference/globals.md b/site/versioned_docs/version-4.5/technical-details/reference/globals.md new file mode 100644 index 00000000..848e2289 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/reference/globals.md @@ -0,0 +1,289 @@ +--- +title: Globals +--- + +# Globals + +The primary way that JavaScript code can interact with Harper is through the global variables, which has several objects and classes that provide access to the tables, server hooks, and resources that Harper provides for building applications. As global variables, these can be directly accessed in any module. + +These global variables are also available through the `harperdb` module/package, which can provide better typing in TypeScript. To use this with your own directory, make sure you link the package to your current `harperdb` installation: + +```bash +npm link harperdb +``` + +The `harperdb` package is automatically linked for all installed components. Once linked, if you are using EcmaScript module syntax you can import function from `harperdb` like: + +```javascript +import { tables, Resource } from 'harperdb'; +``` + +Or if you are using CommonJS format for your modules: + +```javascript +const { tables, Resource } = require('harperdb'); +``` + +The global variables include: + +## `tables` + +This is an object that holds all the tables for the default database (called `data`) as properties. Each of these property values is a table class that subclasses the Resource interface and provides access to the table through the Resource interface. For example, you can get a record from a table (in the default database) called 'my-table' with: + +```javascript +import { tables } from 'harperdb'; +const { MyTable } = tables; +async function getRecord() { + let record = await MyTable.get(recordId); +} +``` + +It is recommended that you [define a database](../../getting-started) for all the tables that are required to exist in your application. This will ensure that the tables exist on the `tables` object. Also note that the property names follow a CamelCase convention for use in JavaScript and in the GraphQL Schemas, but these are translated to snake\_case for the actual table names, and converted back to CamelCase when added to the `tables` object. + +## `databases` + +This is an object that holds all the databases in Harper, and can be used to explicitly access a table by database name. Each database will be a property on this object, each of these property values will be an object with the set of all tables in that database. The default database, `databases.data` should equal the `tables` export. For example, if you want to access the "dog" table in the "dev" database, you could do so: + +```javascript +import { databases } from 'harperdb'; +const { Dog } = databases.dev; +``` + +## `Resource` + +This is the base class for all resources, including tables and external data sources. This is provided so that you can extend it to implement custom data source providers. See the [Resource API documentation](./resource) for more details about implementing a Resource class. + +## `auth(username, password?): Promise` + +This returns the user object with permissions/authorization information based on the provided username. If a password is provided, the password will be verified before returning the user object (if the password is incorrect, an error will be thrown). + +## `logger` + +This provides methods `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify` for logging. See the [logging documentation](../../administration/logging/standard-logging) for more information. + +## `server` + +The `server` global object provides a number of functions and objects to interact with Harper's HTTP, networking, and authentication services. + +### `server.http(listener: RequestListener, options: HttpOptions): HttpServer[]` + +Alias: `server.request` + +Add a handler method to the HTTP server request listener middleware chain. + +Returns an array of server instances based on the specified `options.port` and `options.securePort`. + +Example: + +```js +server.http((request, next) => { + return request.url === '/graphql' + ? handleGraphQLRequest(request) + : next(request); +}, { + runFirst: true, / run this handler first +}); +``` + +#### `RequestListener` + +Type: `(request: Request, next: RequestListener) => Promise` + +The HTTP request listener to be added to the middleware chain. To continue chain execution pass the `request` to the `next` function such as `return next(request);`. + +### `Request` and `Response` + +The `Request` and `Response` classes are based on the WHATWG APIs for the [`Request`](https:/developer.mozilla.org/en-US/docs/Web/API/Request) and [`Response`](https:/developer.mozilla.org/en-US/docs/Web/API/Response) classes. Requests and responses are based on these standard-based APIs to facilitate reuse with modern web code. While Node.js' HTTP APIs are powerful low-level APIs, the `Request`/`Response` APIs provide excellent composability characteristics, well suited for layered middleware and for clean mapping to [RESTful method handlers](./resource) with promise-based responses, as well as interoperability with other standards-based APIs like [streams](https:/developer.mozilla.org/en-US/docs/Web/API/ReadableStream) used with [`Blob`s](https:/developer.mozilla.org/en-US/docs/Web/API/Blob). However, the Harper implementation of these classes is not a direct implementation of the WHATWG APIs, but implements additional/distinct properties for the the Harper server environment: + +#### `Request` +A `Request` object is passed to the direct static REST handlers, and preserved as the context for instance methods, and has the following properties: +- `url` - This is the request target, which is the portion of the URL that was received by the server. If a client sends a request to `http:/example.com:8080/path?query=string`, the actual received request is `GET /path?query=string` and the `url` property will be `/path?query=string`. +- `method` - This is the HTTP method of the request. This is a string like `GET`, `POST`, `PUT`, `DELETE`, etc. +- `headers` - This is a [`Headers`](https:/developer.mozilla.org/en-US/docs/Web/API/Headers) object that contains the headers of the request. +- `pathname` - This is the path portion of the URL, without the query string. For example, if the URL is `/path?query=string`, the `pathname` will be `/path`. +- `protocol` - This is the protocol of the request, like `http` or `https`. +- `data` - This is the deserialized body of the request (based on the type of data specified by `Content-Type` header). +- `ip` - This is the remote IP address of the client that made the request (or the remote IP address of the last proxy to connect to Harper). +- `host` - This is the host of the request, like `example.com`. +- `sendEarlyHints(link: string, headers?: object): void` - This method sends an early hints response to the client, prior to actually returning a response. This is useful for sending a link header to the client to indicate that another resource should be preloaded. The `headers` argument can be used to send additional headers with the early hints response, in addition to the `link`. This is generally most helpful in a cache resolution function, where you can send hints _if_ the data is not in the cache and is resolving from an origin: +```javascript +class Origin { + async get(request) { + / if we are fetching data from origin, send early hints + this.getContext().requestContext.sendEarlyHints(''); + let response = await fetch(request); + ... + } +} +Cache.sourcedFrom(Origin); +``` +- `login(username, password): Promise` - This method can be called to start an authenticated session. The login will authenticate the user by username and password. If the authentication was successful, a session will be created and a cookie will be set on the response header that references the session. All subsequent requests from the client that sends the cookie in requests will be authenticated as the user that logged in and the session record will be attached to the request. This method returns a promise that resolves when the login is successful, and rejects if the login is unsuccessful. +- `session` - This is the session object that is associated with current cookie-maintained session. This object is used to store session data for the current session. This is `Table` record instance, and can be updated by calling `request.session.update({ key: value })` or session can be retrieved with `request.session.get()`. If the cookie has not been set yet, a cookie will be set the first time a session is updated or a login occurs. +- `_nodeRequest` - This is the underlying Node.js [`http.IncomingMessage`](https:/nodejs.org/api/http.html#http_class_http_incomingmessage) object. This can be used to access the raw request data, such as the raw headers, raw body, etc. However, this is discouraged and should be used with caution since it will likely break any other server handlers that depends on the layered `Request` call with `Response` return pattern. +- `_nodeResponse` - This is the underlying Node.js [`http.ServerResponse`](https:/nodejs.org/api/http.html#http_class_http_serverresponse) object. This can be used to access the raw response data, such as the raw headers. Again, this is discouraged and can cause problems for middleware, should only be used if you are certain that other server handlers will not attempt to return a different `Response` object. + +#### `Response` + +REST methods can directly return data that is serialized and returned to users, or it can return a `Response` object (or a promise to a `Response`), or it can return a `Response`-like object with the following properties (or again, a promise to it): +- `status` - This is the HTTP status code of the response. This is a number like `200`, `404`, `500`, etc. +- `headers` - This is a [`Headers`](https:/developer.mozilla.org/en-US/docs/Web/API/Headers) object that contains the headers of the response. +- `data` - This is the data to be returned of the response. This will be serialized using Harper's content negotiation. +- `body` - Alternately (to `data`), the raw body can be returned as a `Buffer`, string, stream (Node.js or [`ReadableStream`](https:/developer.mozilla.org/en-US/docs/Web/API/ReadableStream)), or a [`Blob`](https:/developer.mozilla.org/en-US/docs/Web/API/Blob). + +#### `HttpOptions` + +Type: `Object` + +Properties: + +* `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` +* `port` - _optional_ - `number` - Specify which HTTP server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` +* `securePort` - _optional_ - `number` - Specify which HTTPS server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` + +#### `HttpServer` + +Node.js [`http.Server`](https:/nodejs.org/api/http.html#class-httpserver) or [`https.SecureServer`](https:/nodejs.org/api/https.html#class-httpsserver) instance. + +### `server.socket(listener: ConnectionListener, options: SocketOptions): SocketServer` + +Creates a socket server on the specified `options.port` or `options.securePort`. + +Only one socket server will be created. A `securePort` takes precedence. + +#### `ConnectionListener` + +Node.js socket server connection listener as documented in [`net.createServer`](https:/nodejs.org/api/net.html#netcreateserveroptions-connectionlistener) or [`tls.createServer`](https:/nodejs.org/api/tls.html#tlscreateserveroptions-secureconnectionlistener) + +#### `SocketOptions` + +* `port` - _optional_ - `number` - Specify the port for the [`net.Server`](https:/nodejs.org/api/net.html#class-netserver) instance. +* `securePort` - _optional_ - `number` - Specify the port for the [`tls.Server`](https:/nodejs.org/api/tls.html#class-tlsserver) instance. + +#### `SocketServer` + +Node.js [`net.Server`](https:/nodejs.org/api/net.html#class-netserver) or [`tls.Server`](https:/nodejs.org/api/tls.html#class-tlsserver) instance. + +### `server.ws(listener: WsListener, options: WsOptions): HttpServer[]` + +Add a listener to the WebSocket connection listener middleware chain. The WebSocket server is associated with the HTTP server specified by the `options.port` or `options.securePort`. Use the [`server.upgrade()`](./globals#serverupgradelistener-upgradelistener-options-upgradeoptions-void) method to add a listener to the upgrade middleware chain. + +Example: + +```js +server.ws((ws, request, chainCompletion) => { + chainCompletion.then(() => { + ws.on('error', console.error); + + ws.on('message', function message(data) { + console.log('received: %s', data); + }); + + ws.send('something'); + }); +}); +``` + +#### `WsListener` + +Type: `(ws: WebSocket, request: Request, chainCompletion: ChainCompletion, next: WsListener): Promise` + +The WebSocket connection listener. + +* The `ws` argument is the [WebSocket](https:/github.com/websockets/ws/blob/master/doc/ws.md#class-websocket) instance as defined by the `ws` module. +* The `request` argument is Harper's transformation of the `IncomingMessage` argument of the standard ['connection'](https:/github.com/websockets/ws/blob/master/doc/ws.md#event-connection) listener event for a WebSocket server. +* The `chainCompletion` argument is a `Promise` of the associated HTTP server's request chain. Awaiting this promise enables the user to ensure the HTTP request has finished being processed before operating on the WebSocket. +* The `next` argument is similar to that of other `next` arguments in Harper's server middlewares. To continue execution of the WebSocket connection listener middleware chain, pass all of the other arguments to this one such as: `next(ws, request, chainCompletion)` + +#### `WsOptions` + +Type: `Object` + +Properties: + +* `maxPayload` - _optional_ - `number` - Set the max payload size for the WebSocket server. Defaults to 100 MB. +* `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` +* `port` - _optional_ - `number` - Specify which WebSocket server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` +* `securePort` - _optional_ - `number` - Specify which WebSocket secure server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` + +### `server.upgrade(listener: UpgradeListener, options: UpgradeOptions): void` + +Add a listener to the HTTP Server [upgrade](https:/nodejs.org/api/http.html#event-upgrade_1) event. If a WebSocket connection listener is added using [`server.ws()`](./globals#serverwslistener-wslistener-options-wsoptions-httpserver), a default upgrade handler will be added as well. The default upgrade handler will add a `__harperdb_request_upgraded` boolean to the `request` argument to signal the connection has already been upgraded. It will also check for this boolean _before_ upgrading and if it is `true`, it will pass the arguments along to the `next` listener. + +This method should be used to delegate HTTP upgrade events to an external WebSocket server instance. + +Example: + +> This example is from the Harper Next.js component. See the complete source code [here](https:/github.com/HarperDB/nextjs/blob/main/extension.js) + +```js +server.upgrade( + (request, socket, head, next) => { + if (request.url === '/_next/webpack-hmr') { + return upgradeHandler(request, socket, head).then(() => { + request.__harperdb_request_upgraded = true; + + next(request, socket, head); + }); + } + + return next(request, socket, head); + }, + { runFirst: true } +); +``` + +#### `UpgradeListener` + +Type: `(request, socket, head, next) => void` + +The arguments are passed to the middleware chain from the HTTP server [`'upgrade'`](https:/nodejs.org/api/http.html#event-upgrade_1) event. + +#### `UpgradeOptions` + +Type: `Object` + +Properties: + +* `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` +* `port` - _optional_ - `number` - Specify which HTTP server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` +* `securePort` - _optional_ - `number` - Specify which HTTP secure server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` + +### `server.config` + +This provides access to the Harper configuration object. This comes from the [harperdb-config.yaml](../../deployments/configuration) (parsed into object form). + +### `server.recordAnalytics(value, metric, path?, method?, type?)` + +This records the provided value as a metric into Harper's analytics. Harper efficiently records and tracks these metrics and makes them available through [analytics API](./analytics). The values are aggregated and statistical information is computed when many operations are performed. The optional parameters can be used to group statistics. For the parameters, make sure you are not grouping on too fine of a level for useful aggregation. The parameters are: + +* `value` - This is a numeric value for the metric that is being recorded. This can be a value measuring time or bytes, for example. +* `metric` - This is the name of the metric. +* `path` - This is an optional path (like a URL path). For a URL like /my-resource/, you would typically include a path of "my-resource", not including the id so you can group by all the requests to "my-resource" instead of individually aggregating by each individual id. +* `method` - Optional method to group by. +* `type` - Optional type to group by. + +### `server.getUser(username): Promise` +This returns the user object with permissions/authorization information based on the provided username. This does not verify the password, so it is generally used for looking up users by username. If you want to verify a user by password, use [`server.authenticateUser`](./globals#serverauthenticateuserusername-password-user). + +### `server.authenticateUser(username, password): Promise` +This returns the user object with permissions/authorization information based on the provided username. The password will be verified before returning the user object (if the password is incorrect, an error will be thrown). + +### `server.resources: Resources` +This provides access to the map of all registered resources. This is the central registry in Harper for registering any resources to be exported for use by REST, MQTT, or other components. Components that want to register resources should use the `server.resources.set(name, resource)` method to add to this map. Exported resources can be found by passing in a path to `server.resources.getMatch(path)` which will find any resource that matches the path or beginning of the path. + +#### `server.resources.set(name, resource, exportTypes?)` +Register a resource with the server. For example: +``` +class NewResource extends Resource { +} +server.resources.set('NewResource', Resource); +/ or limit usage: +server.resources.set('NewResource', Resource, { rest: true, mqtt: false, 'my-protocol': true }); +``` +#### `server.resources.getMatch(path, exportType?)` +Find a resource that matches the path. For example: +``` +server.resources.getMatch('/NewResource/some-id'); +/ or specify the export/protocol type, to allow it to be limited: +server.resources.getMatch('/NewResource/some-id', 'my-protocol'); +``` \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/reference/graphql.md b/site/versioned_docs/version-4.5/technical-details/reference/graphql.md new file mode 100644 index 00000000..e9753307 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/reference/graphql.md @@ -0,0 +1,248 @@ +--- +title: GraphQL Querying +--- + +# GraphQL Querying + +Harper supports GraphQL in a variety of ways. It can be used for [defining schemas](../../developers/applications/defining-schemas), and for querying [Resources](./resource). + +Get started by setting `graphql: true` in `config.yaml`. + +This automatically enables a `/graphql` endpoint that can be used for GraphQL queries. + +> Harper's GraphQL component is inspired by the [GraphQL Over HTTP](https:/graphql.github.io/graphql-over-http/draft/#) specification; however, it does not fully implement neither that specification nor the [GraphQL](https:/spec.graphql.org/) specification. + +Queries can either be `GET` or `POST` requests, and both follow essentially the same request format. `GET` requests must use search parameters, and `POST` requests use the request body. + +For example, to request the GraphQL Query: +```graphql +query GetDogs { + Dog { + id + name + } +} +``` + +The `GET` request would look like: + +```http +GET /graphql?query=query+GetDogs+%7B+Dog+%7B+id+name+%7D+%7D+%7D +Accept: application/graphql-response+json +``` + +And the `POST` request would look like: + +```http +POST /graphql/ +Content-Type: application/json +Accept: application/graphql-response+json + +{ + "query": "query GetDogs { Dog { id name } } }" +} +``` + +> Tip: For the best user experience, include the `Accept: application/graphql-response+json` header in your request. This provides better status codes for errors. + +The Harper GraphQL querying system is strictly limited to exported Harper Resources. For many users, this will typically be a table that uses the `@exported` directive in its schema. Queries can only specify Harper Resources and their attributes in the selection set. Queries can filter using [arguments](https:/graphql.org/learn/queries/#arguments) on the top-level Resource field. Harper provides a short form pattern for simple queries, and a long form pattern based off of the [Resource Query API](./resource#query) for more complex queries. + +Unlike REST queries, GraphQL queries can specify multiple resources simultaneously: + +```graphql +query GetDogsAndOwners { + Dog { + id + name + breed + } + + Owner { + id + name + occupation + } +} +``` + +This will return all dogs and owners in the database. And is equivalent to executing two REST queries: + +```http +GET /Dog/?select(id,name,breed) +# and +GET /Owner/?select(id,name,occupation) +``` + +### Request Parameters + +There are three request parameters for GraphQL queries: `query`, `operationName`, and `variables` + +1. `query` - _Required_ - The string representation of the GraphQL document. + 1. Limited to [Executable Definitions](https:/spec.graphql.org/October2021/#executabledefinition) only. + 1. i.e. GraphQL [`query`](https:/graphql.org/learn/queries/#fields) or `mutation` (coming soon) operations, and [fragments](https:/graphql.org/learn/queries/#fragments). + 1. If an shorthand, unnamed, or singular named query is provided, they will be executed by default. Otherwise, if there are multiple queries, the `operationName` parameter must be used. +1. `operationName` - _Optional_ - The name of the query operation to execute if multiple queries are provided in the `query` parameter +1. `variables` - _Optional_ - A map of variable values to be used for the specified query + +### Type Checking + +The Harper GraphQL Querying system takes many liberties from the GraphQL specification. This extends to how it handle type checking. In general, the querying system does **not** type check. Harper uses the `graphql` parser directly, and then performs a transformation on the resulting AST. We do not control any type checking/casting behavior of the parser, and since the execution step diverges from the spec greatly, the type checking behavior is only loosely defined. + +In variable definitions, the querying system will ensure non-null values exist (and error appropriately), but it will not do any type checking of the value itself. + +For example, the variable `$name: String!` states that `name` should be a non-null, string value. +- If the request does not contain the `name` variable, an error will be returned +- If the request provides `null` for the `name` variable, an error will be returned +- If the request provides any non-string value for the `name` variable, i.e. `1`, `true`, `{ foo: "bar" }`, the behavior is undefined and an error may or may not be returned. +- If the variable definition is changed to include a default value, `$name: String! = "John"`, then when omitted, `"John"` will be used. + - If `null` is provided as the variable value, an error will still be returned. + - If the default value does not match the type specified (i.e. `$name: String! = 0`), this is also considered undefined behavior. It may or may not fail in a variety of ways. +- Fragments will generally extend non-specified types, and the querying system will do no validity checking on them. For example, `fragment Fields on Any { ... }` is just as valid as `fragment Fields on MadeUpTypeName { ... }`. See the Fragments sections for more details. + +The only notable place the querying system will do some level of type analysis is the transformation of arguments into a query. +- Objects will be transformed into properly nested attributes +- Strings and Boolean values are passed through as their AST values +- Float and Int values will be parsed using the JavaScript `parseFloat` and `parseInt` methods respectively. +- List and Enums are not supported. + +### Fragments + +The querying system loosely supports fragments. Both fragment definitions and inline fragments are supported, and are entirely a composition utility. Since this system does very little type checking, the `on Type` part of fragments is entirely pointless. Any value can be used for `Type` and it will have the same effect. + +For example, in the query + +```graphql +query Get { + Dog { + ...DogFields + } +} + +fragment DogFields on Dog { + name + breed +} +``` + +The `Dog` type in the fragment has no correlation to the `Dog` resource in the query (that correlates to the Harper `Dog` resource). + +You can literally specify anything in the fragment and it will behave the same way: + +```graphql +fragment DogFields on Any { ... } # this is recommended +fragment DogFields on Cat { ... } +fragment DogFields on Animal { ... } +fragment DogFields on LiterallyAnything { ... } +``` + +As an actual example, fragments should be used for composition: + +```graphql +query Get { + Dog { + ...sharedFields + breed + } + Owner { + ...sharedFields + occupation + } +} + +fragment sharedFields on Any { + id + name +} +``` + +### Short Form Querying + +Any attribute can be used as an argument for a query. In this short form, multiple arguments is treated as multiple equivalency conditions with the default `and` operation. + +For example, the following query requires an `id` variable to be provided, and the system will search for a `Dog` record matching that id. + +```graphql +query GetDog($id: ID!) { + Dog(id: $id) { + name + breed + owner { + name + } + } +} +``` + +And as a properly formed request: +```http +POST /graphql/ +Content-Type: application/json +Accept: application/graphql-response+json + +{ + "query": "query GetDog($id: ID!) { Dog(id: $id) { name breed owner {name}}", + "variables": { + "id": "0" + } +} +``` + +The REST equivalent would be: +```http +GET /Dog/?id==0&select(name,breed,owner{name}) +# or +GET /Dog/0?select(name,breed,owner{name}) +``` + +Short form queries can handle nested attributes as well. + +For example, return all dogs who have an owner with the name `"John"` + +```graphql +query GetDog { + Dog(owner: { name: "John" }) { + name + breed + owner { + name + } + } +} +``` + +Would be equivalent to +```http +GET /Dog/?owner.name==John&select(name,breed,owner{name}) +``` + +And finally, we can put all of these together to create semi-complex, equality based queries! + +The following query has two variables and will return all dogs who have the specified name as well as the specified owner name. + +```graphql +query GetDog($dogName: String!, $ownerName: String! ) { + Dog(name: $dogName, owner: { name: $ownerName }) { + name + breed + owner { + name + } + } +} +``` + +### Long Form Querying + +> Coming soon! + +### Mutations + +> Coming soon! + +### Subscriptions + +> Coming soon! + +### Directives + +> Coming soon! \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/reference/headers.md b/site/versioned_docs/version-4.5/technical-details/reference/headers.md new file mode 100644 index 00000000..0301b152 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/reference/headers.md @@ -0,0 +1,12 @@ +--- +title: Harper Headers +--- + +# Harper Headers + +All Harper API responses include headers that are important for interoperability and debugging purposes. The following headers are returned with all Harper API responses: + +| Key | Example Value | Description | +|-------------------|------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------| +| server-timing | db;dur=7.165 | This reports the duration of the operation, in milliseconds. This follows the standard for Server-Timing and can be consumed by network monitoring tools. | +| content-type | application/json | This reports the MIME type of the returned content, which is negotiated based on the requested content type in the Accept header. | diff --git a/site/versioned_docs/version-4.5/technical-details/reference/index.md b/site/versioned_docs/version-4.5/technical-details/reference/index.md new file mode 100644 index 00000000..8b2629e5 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/reference/index.md @@ -0,0 +1,16 @@ +--- +title: Reference +--- + +# Reference + +This section contains technical details and reference materials for Harper. + +* [Resource API](./resource) +* [Transactions](./transactions) +* [Storage Algorithm](./storage-algorithm) +* [Dynamic Schema](./dynamic-schema) +* [Headers](./headers) +* [Limitations](./limits) +* Content Types +* [Data Types](./data-types) diff --git a/site/versioned_docs/version-4.5/technical-details/reference/limits.md b/site/versioned_docs/version-4.5/technical-details/reference/limits.md new file mode 100644 index 00000000..9e343887 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/reference/limits.md @@ -0,0 +1,36 @@ +--- +title: Harper Limits +--- + +# Harper Limits + +This document outlines limitations of Harper. + +## Database Naming Restrictions + +**Case Sensitivity** + +Harper database metadata (database names, table names, and attribute/column names) are case sensitive. Meaning databases, tables, and attributes can differ only by the case of their characters. + +**Restrictions on Database Metadata Names** + +Harper database metadata (database names, table names, and attribute names) cannot contain the following UTF-8 characters: + +``` +/`¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ +``` + +Additionally, they cannot contain the first 31 non-printing characters. Spaces are allowed, but not recommended as best practice. The regular expression used to verify a name is valid is: + +``` +^[\x20-\x2E|\x30-\x5F|\x61-\x7E]*$ +``` + +## Table Limitations + +**Attribute Maximum** + +Harper limits the number of total indexed attributes across tables (including the primary key of each table) to 10,000 per database. + +## Primary Keys +The maximum length of a primary key is 1978 bytes or 659 characters (whichever is shortest). \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/reference/resource.md b/site/versioned_docs/version-4.5/technical-details/reference/resource.md new file mode 100644 index 00000000..2d3b781f --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/reference/resource.md @@ -0,0 +1,738 @@ +--- +title: Resource Class +--- + +# Resource Class + +## Resource Class + +The Resource class is designed to provide a unified API for modeling different data resources within Harper. Database/table data can be accessed through the Resource API. The Resource class can be extended to create new data sources. Resources can be exported to define endpoints. Tables themselves extend the Resource class, and can be extended by users. + +Conceptually, a Resource class provides an interface for accessing, querying, modifying, and monitoring a set of entities or records. Instances of a Resource class can represent a single record or entity, or a collection of records, at a given point in time, that you can interact with through various methods or queries. Resource instances can represent an atomic transactional view of a resource and facilitate transactional interaction. A Resource instance holds the primary key/identifier, context information, and any pending updates to the record, so any instance methods can act on the record and have full access to this information to during execution. Therefore, there are distinct resource instances created for every record or query that is accessed, and the instance methods are used for interaction with the data. + +Resource classes also have static methods, which are generally the preferred way to externally interact with tables and resources. The static methods handle parsing paths and query strings, starting a transaction as necessary, performing access authorization checks (if required), creating a resource instance, and calling the instance methods. This general rule for how to interact with resources: + +* If you want to _act upon_ a table or resource, querying or writing to it, then use the static methods to initial access or write data. For example, you could use `MyTable.get(34)` to access the record with a primary key of `34`. + * You can subsequently use the instance methods on the returned resource instance to perform additional actions on the record. +* If you want to _define custom behavior_ for a table or resource (to control how a resource responds to queries/writes), then extend the class and override/define instance methods. + +The Resource API is heavily influenced by the REST/HTTP API, and the methods and properties of the Resource class are designed to map to and be used in a similar way to how you would interact with a RESTful API. + +The REST-based API is a little different than traditional Create-Read-Update-Delete (CRUD) APIs that were designed with single-server interactions in mind, but semantics that attempt to guarantee no existing record or overwrite-only behavior require locks that don't scale well in distributed database. Centralizing writes around `put` calls provides much more scalable, simple, and consistent behavior in a distributed eventually consistent database. You can generally think of CRUD operations mapping to REST operations like this: + +* Read - `get` +* Create with a known primary key - `put` +* Create with a generated primary key - `post`/`create` +* Update (Full) - `put` +* Update (Partial) - `patch` +* Delete - `delete` + +The RESTful HTTP server and other server interfaces will directly call resource methods of the same name to fulfill incoming requests so resources can be defined as endpoints for external interaction. When resources are used by the server interfaces, the static method will be executed (which starts a transaction and does access checks), which will then create the resource instance and call the corresponding instance method. Paths (URL, MQTT topics) are mapped to different resource instances. Using a path that specifies an ID like `/MyResource/3492` will be mapped to a Resource instance where the instance's ID will be `3492`, and interactions will use the instance methods like `get()`, `put()`, and `post()`. Using the root path (`/MyResource/`) will map to a Resource instance with an ID of `null`, and this represents the collection of all the records in the resource or table. + +You can create classes that extend `Resource` to define your own data sources, typically to interface with external data sources (the `Resource` base class is available as a global variable in the Harper JS environment). In doing this, you will generally be extending and providing implementations for the instance methods below. For example: + +```javascript +export class MyExternalData extends Resource { + async get() { + / fetch data from an external source, using our id + let response = await this.fetch(this.id); + / do something with the response + } + put(data) { + / send the data into the external source + } + delete() { + / delete an entity in the external data source + } + subscribe(options) { + / if the external data source is capable of real-time notification of changes, can subscribe + } +} +/ we can export this class from resources.json as our own endpoint, or use this as the source for +/ a Harper data to store and cache the data coming from this data source: +tables.MyCache.sourcedFrom(MyExternalData); +``` + +You can also extend table classes in the same way, overriding the instance methods for custom functionality. The `tables` object is a global variable in the Harper JavaScript environment, along with `Resource`: + +```javascript +export class MyTable extends tables.MyTable { + get() { + / we can add properties or change properties before returning data: + this.newProperty = 'newValue'; + this.existingProperty = 44; + return super.get(); / returns the record, modified with the changes above + } + put(data) { + / can change data any way we want + super.put(data); + } + delete() { + super.delete(); + } + post(data) { + / providing a post handler (for HTTP POST requests) is a common way to create additional + / actions that aren't well described with just PUT or DELETE + } +} +``` + +Make sure that if are extending and `export`ing your table with this class, that you remove the `@export` directive in your schema, so that you aren't exporting the same table/class name twice. + +All Resource methods that are called from HTTP methods may directly return data or may return a [`Response`](https:/developer.mozilla.org/en-US/docs/Web/API/Response) object or an object with `headers` and a `status` (HTTP status code), to explicitly return specific headers and status code. + +## Global Variables + +### `tables` + +This is an object with all the tables in the default database (the default database is "data"). Each table that has been declared or created will be available as a (standard) property on this object, and the value will be the table class that can be used to interact with that table. The table classes implement the Resource API. + +### `databases` + +This is an object with all the databases that have been defined in Harper (in the running instance). Each database that has been declared or created will be available as a (standard) property on this object. The property values are an object with the tables in that database, where each property is a table, like the `tables` object. In fact, `databases.data === tables` should always be true. + +### `Resource` + +This is the Resource base class. This can be directly extended for custom resources, and is the base class for all tables. + +### `server` + +This object provides extension points for extension components that wish to implement new server functionality (new protocols, authentication, etc.). See the [extensions documentation for more information](../../developers/components/reference#extensions). + +### `transaction` + +This provides a function for starting transactions. See the transactions section below for more information. + +### `contentTypes` + +This provides an interface for defining new content type handlers. See the content type extensions documentation for more information. + +### TypeScript Support + +While these objects/methods are all available as global variables, it is easier to get TypeScript support (code assistance, type checking) for these interfaces by explicitly `import`ing them. This can be done by setting up a package link to the main Harper package in your app: + +``` +# you may need to go to your harper directory and set it up as a link first +npm link harperdb +``` + +And then you can import any of the main Harper APIs you will use, and your IDE should understand the full typings associated with them: + +``` +import { databases, tables, Resource } from 'harperdb'; +``` + +## Resource Class (Instance) Methods + +### Properties/attributes declared in schema + +Properties that have been defined in your table's schema can be accessed and modified as direct properties on the Resource instances. + +### `get(queryOrProperty?)`: Resource|AsyncIterable + +This is called to return the record or data for this resource, and is called by HTTP GET requests. This may be optionally called with a `query` object to specify a query should be performed, or a string to indicate that the specified property value should be returned. When defining Resource classes, you can define or override this method to define exactly what should be returned when retrieving a record. The default `get` method (`super.get()`) returns the current record as a plain object. + +The query object can be used to access any query parameters that were included in the URL. For example, with a request to `/my-resource/some-id?param1=value`, we can access URL/request information: + +```javascript +get(query) { + / note that query will only exist (as an object) if there is a query string + let param1 = query?.get?.('param1'); / returns 'value' + let id = this.getId(); / returns 'some-id' + ... +} +``` + +If `get` is called for a single record (for a request like `/Table/some-id`), the default action is to return `this` instance of the resource. If `get` is called on a collection (`/Table/?name=value`), the default action is to `search` and return an AsyncIterable of results. + +It is important to note that `this` is the resource instance for a specific record, specified by the primary key. Therefore, calling `super.get(query)` performs a `get` on this specific record/resource, not on the whole table. If you wish to access a _different_ record, you should use the static `get` method on the table class, like `Table.get(otherId, context)`. + +### `search(query: Query)`: AsyncIterable + +This performs a query on this resource, searching for records that are descendants. By default, this is called by `get(query)` from a collection resource. When this is called for the root resource (like `/Table/`) it searches through all records in the table. However, if you call search from an instance with a specific ID like `1` from a path like `Table/1`, it will only return records that are descendants of that record, like `[1, 1]` (path of Table/1/1) and `[1, 2]` (path of Table/1/2). If you want to do a standard search of the table, make you call the static method like `Table.search(...)`. You can define or override this method to define how records should be queried. The default `search` method on tables (`super.search(query)`) will perform a query and return an AsyncIterable of results. The query object can be used to specify the desired query. + +### `getId(): string|number|Array` + +Returns the primary key value for this resource. + +### `put(data: object, query?: Query): Resource|void|Response` + +This will assign the provided record or data to this resource, and is called for HTTP PUT requests. You can define or override this method to define how records should be updated. The default `put` method on tables (`super.put(data)`) writes the record to the table (updating or inserting depending on if the record previously existed) as part of the current transaction for the resource instance. + +It is important to note that `this` is the resource instance for a specific record, specified by the primary key. Therefore, calling `super.put(data)` updates this specific record/resource, not another records in the table. If you wish to update a _different_ record, you should use the static `put` method on the table class, like `Table.put(data, context)`. + +The `query` argument is used to represent any additional query parameters that were included in the URL. For example, with a request to `/my-resource/some-id?param1=value`, we can access URL/request information: + +```javascript +put(data, query) { + let param1 = query?.get?.('param1'); / returns 'value' + ... +} +``` + +### `patch(data: object): Resource|void|Response` + +### `patch(data: object, query?: Query)` + +This will update the existing record with the provided data's properties, and is called for HTTP PATCH requests. You can define or override this method to define how records should be updated. The default `patch` method on tables (`super.patch(data)`) updates the record. The properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `data` object. This is performed as part of the current transaction for the resource instance. The `query` argument is used to represent any additional query parameters that were included. + +### `update(data: object, fullUpdate: boolean?)` + +This is called by the default `put` and `patch` handlers to update a record. `put` calls with `fullUpdate` as `true` to indicate a full record replacement (`patch` calls it with the second argument as `false`). Any additional property changes that are made before the transaction commits will also be persisted. + +### `delete(queryOrProperty?): Resource|void|Response` + +This will delete this record or resource, and is called for HTTP DELETE requests. You can define or override this method to define how records should be deleted. The default `delete` method on tables (`super.delete(record)`) deletes the record from the table as part of the current transaction. + +### `publish(message): Resource|void|Response` + +This will publish a message to this resource, and is called for MQTT publish commands. You can define or override this method to define how messages should be published. The default `publish` method on tables (`super.publish(message)`) records the published message as part of the current transaction; this will not change the data in the record but will notify any subscribers to the record/topic. + +### `post(data: object, query?: Query): Resource|void|Response` + +This is called for HTTP POST requests. You can define this method to provide your own implementation of how POST requests should be handled. Generally `POST` provides a generic mechanism for various types of data updates, and is a good place to define custom functionality for updating records. The default behavior is to create a new record/resource. The `query` argument is used to represent any additional query parameters that were included. + +### `invalidate()` + +This method is available on tables. This will invalidate the current record in the table. This can be used with a caching table and is used to indicate that the source data has changed, and the record needs to be reloaded when next accessed. + +### `subscribe(subscriptionRequest: SubscriptionRequest): Promise` + +This will subscribe to the current resource, and is called for MQTT subscribe commands. You can define or override this method to define how subscriptions should be handled. The default `subscribe` method on tables (`super.publish(message)`) will set up a listener that will be called for any changes or published messages to this resource. + +The returned (promise resolves to) Subscription object is an `AsyncIterable` that you can use a `for await` to iterate through. It also has a `queue` property which holds (an array of) any messages that are ready to be delivered immediately (if you have specified a start time, previous count, or there is a message for the current or "retained" record, these may be immediately returned). + +The `SubscriptionRequest` object supports the following properties (all optional): + +* `includeDescendants` - If this is enabled, this will create a subscription to all the record updates/messages that are prefixed with the id. For example, a subscription request of `{id:'sub', includeDescendants: true}` would return events for any update with an id/topic of the form sub/\* (like `sub/1`). +* `startTime` - This will begin the subscription at a past point in time, returning all updates/messages since the start time (a catch-up of historical messages). This can be used to resume a subscription, getting all messages since the last subscription. +* `previousCount` - This specifies the number of previous updates/messages to deliver. For example, `previousCount: 10` would return the last ten messages. Note that `previousCount` can not be used in conjunction with `startTime`. +* `omitCurrent` - Indicates that the current (or retained) record should _not_ be immediately sent as the first update in the subscription (if no `startTime` or `previousCount` was used). By default, the current record is sent as the first update. + +### `connect(incomingMessages?: AsyncIterable, query?: Query): AsyncIterable` + +This is called when a connection is received through WebSockets or Server Sent Events (SSE) to this resource path. This is called with `incomingMessages` as an iterable stream of incoming messages when the connection is from WebSockets, and is called with no arguments when the connection is from a SSE connection. This can return an asynchronous iterable representing the stream of messages to be sent to the client. + +### `set(property, value)` + +This will assign the provided value to the designated property in the resource's record. During a write operation, this will indicate that the record has changed and the changes will be saved during commit. During a read operation, this will modify the copy of the record that will be serialized during serialization (converted to the output format of JSON, MessagePack, etc.). + +### `allowCreate(user: any, data: Promise, context: Context): boolean | Promise` + +This is called to determine if the user has permission to create the current resource. This is called as part of external incoming requests (HTTP). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's insert permission to the table. The allow method may be asynchronous and return a promise that resolves to a boolean, and may await the `data` promise to determine if the data is valid for creation. + +### `allowRead(user: any, query: Map | void, context: Context): boolean | Promise` + +This is called to determine if the user has permission to read from the current resource. This is called as part of external incoming requests (HTTP GET). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's read permission to the table. The allow method may be asynchronous and return a promise that resolves to a boolean. + +### `allowUpdate(user: any, data: Promise, context: Context): boolean | Promise` + +This is called to determine if the user has permission to update the current resource. This is called as part of external incoming requests (HTTP PUT). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's update permission to the table. The allow method may be asynchronous and return a promise that resolves to a boolean, and may await the `data` promise to determine if the data is valid for creation. + +### `allowDelete(user: any, query: Map | void, context: Context): boolean | Promise` + +This is called to determine if the user has permission to delete the current resource. This is called as part of external incoming requests (HTTP DELETE). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's delete permission to the table. The allow method may be asynchronous and return a promise that resolves to a boolean. + +### `addTo(property, value)` + +This adds to provided value to the specified property using conflict-free data type (CRDT) incrementation. This ensures that even if multiple calls are simultaneously made to increment a value, the resulting merge of data changes from different threads and nodes will properly sum all the added values. + +### `getUpdatedTime(): number` + +This returns the last updated time of the resource (timestamp of last commit). This is returned as milliseconds from epoch. + +### `wasLoadedFromSource(): boolean` + +Indicates if the record had been loaded from source. When using caching tables, this indicates that there was a cache miss and the data had to be loaded from the source (or waiting on an inflight request from the source to finish). + +### `getContext(): Context` + +Returns the context for this resource. The context contains information about the current transaction, the user that initiated this action, and other metadata that should be retained through the life of an action. + +#### `Context` + +The `Context` object has the following (potential) properties: + +* `user` - This is the user object, which includes information about the username, role, and authorizations. +* `transaction` - The current transaction If the current method was triggered by an HTTP request, the following properties are available: +* `lastModified` - This value is used to indicate the last modified or updated timestamp of any resource(s) that are accessed and will inform the response's `ETag` (or `Last-Modified`) header. This can be updated by application code if it knows that modification should cause this timestamp to be updated. + +When a resource gets a request through HTTP, the request object is the context, which has the following properties: + +* `url` - The local path/URL of the request (this will not include the protocol or host name, but will start at the path and includes the query string). +* `method` - The method of the HTTP request. +* `headers` - This is an object with the headers that were included in the HTTP request. You can access headers by calling `context.headers.get(headerName)`. +* `responseHeaders` - This is an object with the headers that will be included in the HTTP response. You can set headers by calling `context.responseHeaders.set(headerName, value)`. +* `pathname` - This provides the path part of the URL (no querystring). +* `host` - This provides the host name of the request (from the `Host` header). +* `ip` - This provides the ip address of the client that made the request. +* `body` - This is the request body as a raw NodeJS Readable stream, if there is a request body. +* `data` - If the HTTP request had a request body, this provides a promise to the deserialized data from the request body. (Note that for methods that normally have a request body like `POST` and `PUT`, the resolved deserialized data is passed in as the main argument, but accessing the data from the context provides access to this for requests that do not traditionally have a request body like `DELETE`). + +When a resource is accessed as a data source: + +* `requestContext` - For resources that are acting as a data source for another resource, this provides access to the context of the resource that is making a request for data from the data source resource. Note that it is generally not recommended to rely on this context. The resolved data may be used fulfilled many different requests, and relying on this first request context may not be representative of future requests. Also, source resolution may be triggered by various actions, not just specified endpoints (for example queries, operations, studio, etc.), so make sure you are not relying on specific request context information. + +### `operation(operationObject: Object, authorize?: boolean): Promise` + +This method is available on tables and will execute a Harper operation, using the current table as the target of the operation (the `table` and `database` do not need to be specified). See the [operations API](../../developers/operations-api/) for available operations that can be performed. You can set the second argument to `true` if you want the current user to be checked for authorization for the operation (if `true`, will throw an error if they are not authorized). + +### `allowStaleWhileRevalidate(entry: { version: number, localTime: number, expiresAt: number, value: object }, id): boolean` + +For caching tables, this can be defined to allow stale entries to be returned while revalidation is taking place, rather than waiting for revalidation. The `version` is the timestamp/version from the source, the `localTime` is when the resource was last refreshed, the `expiresAt` is when the resource expired and became stale, and the `value` is the last value (the stale value) of the record/resource. All times are in milliseconds since epoch. Returning `true` will allow the current stale value to be returned while revalidation takes place concurrently. Returning `false` will cause the response to wait for the data source or origin to revalidate or provide the latest value first, and then return the latest value. + +## Resource Static Methods and Properties + +The Resource class also has static methods that mirror the instance methods with an initial argument that is the id of the record to act on. The static methods are generally the preferred and most convenient method for interacting with tables outside of methods that are directly extending a table. Whereas instances methods are bound to a specific record, the static methods allow you to specify any record in the table to act on. + +The `get`, `put`, `delete`, `publish`, `subscribe`, and `connect` methods all have static equivalents. There is also a `static search()` method for specifically handling searching a table with query parameters. By default, the Resource static methods default to creating an instance bound to the record specified by the arguments, and calling the instance methods. Again, generally static methods are the preferred way to interact with resources and call them from application code. These methods are available on all user Resource classes and tables. + +### `get(id: Id, context?: Resource|Context)` + +This will retrieve a resource instance by id. For example, if you want to retrieve comments by id in the retrieval of a blog post you could do: + +```javascript +const { MyTable, Comment } = tables; +... +/ in class: + async get() { + for (let commentId of this.commentIds) { + let comment = await Comment.get(commentId, this); + / now you can do something with the comment record + } + } +``` + +Type definition for `Id`: + +```typescript +Id = string|number|array +``` + +### `get(query: Query, context?: Resource|Context)` + +This can be used to retrieve a resource instance by a query. The query can be used to specify a single/unique record by an `id` property, and can be combined with a `select`: + +```javascript +MyTable.get({ id: 34, select: ['name', 'age'] }); +``` + +This method may also be used to retrieve a collection of records by a query. If the query is not for a specific record id, this will call the `search` method, described above. + +### `put(id: Id, record: object, context?: Resource|Context): Promise` + +This will save the provided record or data to this resource. This will create a new record or fully replace an existing record if one exists with the same `id` (primary key). + +### `put(record: object, context?: Resource|Context): Promise` + +This will save the provided record or data to this resource. This will create a new record or fully replace an existing record if one exists with the same primary key provided in the record. If your table doesn't have a primary key attribute, you will need to use the method with the `id` argument. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `create(record: object, context?: Resource|Context): Promise` + +This will create a new record using the provided record for all fields (except primary key), generating a new primary key for the record. This does _not_ check for an existing record; the record argument should not have a primary key and should use the generated primary key. This will (asynchronously) return the new resource instance. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `post(id: Id, data: object, context?: Resource|Context): Promise` + +### `post(data: object, context?: Resource|Context): Promise` + +This will save the provided data to this resource. By default, this will create a new record (by calling `create`). However, the `post` method is specifically intended to be available for custom behaviors, so extending a class to support custom `post` method behavior is encouraged. + +### `patch(recordUpdate: object, context?: Resource|Context): Promise` + +### `patch(id: Id, recordUpdate: object, context?: Resource|Context): Promise` + +This will save the provided updates to the record. The `recordUpdate` object's properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `recordUpdate` object. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `delete(id: Id, context?: Resource|Context): Promise` + +Deletes this resource's record or data. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `publish(message: object, context?: Resource|Context): Promise` + +### `publish(topic: Id, message: object, context?: Resource|Context): Promise` + +Publishes the given message to the record entry specified by the id in the context. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `subscribe(subscriptionRequest?, context?: Resource|Context): Promise` + +Subscribes to a record/resource. See the description of the `subscriptionRequest` object above for more information on how to use this. + +### `search(query: Query, context?: Resource|Context): AsyncIterable` + +This will perform a query on this table or collection. The query parameter can be used to specify the desired query. + +### `setComputedAttribute(name: string, computeFunction: (record: object) => any)` + +This will define the function to use for a computed attribute. To use this, the attribute must be defined in the schema as a computed attribute. The `computeFunction` will be called with the record as an argument and should return the computed value for the attribute. For example: + +```javascript +MyTable.setComputedAttribute('computedAttribute', (record) => { + return record.attribute1 + record.attribute2; +}); +``` + +For a schema like: + +```graphql +type MyTable @table { + id: ID @primaryKey + attribute1: Int + attribute2: Int + computedAttribute: Int @computed +} +``` + +See the [schema documentation](../../developers/applications/defining-schemas) for more information on computed attributes. + +### `primaryKey` + +This property indicates the name of the primary key attribute for a table. You can get the primary key for a record using this property name. For example: + +```javascript +let record34 = await Table.get(34); +record34[Table.primaryKey] -> 34 +``` + +There are additional methods that are only available on table classes (which are a type of resource). + +### `Table.sourcedFrom(Resource, options)` + +This defines the source for a table. This allows a table to function as a cache for an external resource. When a table is configured to have a source, any request for a record that is not found in the table will be delegated to the source resource to retrieve (via `get`) and the result will be cached/stored in the table. All writes to the table will also first be delegated to the source (if the source defines write functions like `put`, `delete`, etc.). The `options` parameter can include an `expiration` property that will configure the table with a time-to-live expiration window for automatic deletion or invalidation of older entries. The `options` parameter (also) supports: + +* `expiration` - Default expiration time for records in seconds. +* `eviction` - Eviction time for records in seconds. +* `scanInterval` - Time period for scanning the table for records to evict. + +If the source resource implements subscription support, real-time invalidation can be performed to ensure the cache is guaranteed to be fresh (and this can eliminate or reduce the need for time-based expiration of data). + +### `directURLMapping` +This property can be set to force the direct URL request target to be mapped to the resource primary key. Normally, URL resource targets are parsed, where the path is mapped to the primary key of the resource (and decoded using standard URL decoding), and any query string parameters are used to query that resource. But if this is turned on, the full URL is used as the primary key. For example: +```javascript +export class MyTable extends tables.MyTable { + static directURLMapping = true; +} +``` +```http request +GET /MyTable/test?foo=bar +``` +This will be mapped to the resource with a primary key of `test?foo=bar`, and no querying will be performed on that resource. + +### `getRecordCount({ exactCount: boolean })` +This will return the number of records in the table. By default, this will return an approximate count of records, which is fast and efficient. If you want an exact count, you can pass `{ exactCount: true }` as the first argument, but this will be slower and more expensive. The return value will be a Promise that resolves to an object with a `recordCount` property, which is the number of records in the table. If this was not an exact count, it will also include `estimatedRange` array with estimate range of the count. + +```javascript + +### `parsePath(path, context, query) {` + +This is called by static methods when they are responding to a URL (from HTTP request, for example), and translates the path to an id. By default, this will parse `.property` suffixes for accessing properties and specifying preferred content type in the URL (and for older tables it will convert a multi-segment path to multipart an array id). However, in some situations you may wish to preserve the path directly as a string. You can override `parsePath` for simpler path to id preservation: + +```javascript + static parsePath(path) { + return path; / return the path as the id + } +``` + +### `getRecordCount: Promise<{}>` + +### `isCollection(resource: Resource): boolean` + +This returns a boolean indicating if the provide resource instance represents a collection (can return a query result) or a single record/entity. + +### Context and Transactions + +Whenever you implement an action that is calling other resources, it is recommended that you provide the "context" for the action. This allows a secondary resource to be accessed through the same transaction, preserving atomicity and isolation. + +This also allows timestamps that are accessed during resolution to be used to determine the overall last updated timestamp, which informs the header timestamps (which facilitates accurate client-side caching). The context also maintains user, session, and request metadata information that is communicated so that contextual request information (like headers) can be accessed and any writes are properly attributed to the correct user, or any additional security checks to be applied to the user. + +When using an export resource class, the REST interface will automatically create a context for you with a transaction and request metadata, and you can pass this to other actions by simply including `this` as the source argument (second argument) to the static methods. + +For example, if we had a method to post a comment on a blog, and when this happens we also want to update an array of comment IDs on the blog record, but then add the comment to a separate comment table. We might do this: + +```javascript +const { Comment } = tables; + +export class BlogPost extends tables.BlogPost { + post(comment) { + / add a comment record to the comment table, using this resource as the source for the context + Comment.put(comment, this); + this.comments.push(comment.id); / add the id for the record to our array of comment ids + / Both of these actions will be committed atomically as part of the same transaction + } +} +``` + +Please see the [transaction documentation](./transactions) for more information on how transactions work in Harper. + +### Query + +The `get`/`search` methods accept a Query object that can be used to specify a query for data. The query is an object that has the following properties, which are all optional: + +#### `conditions` + +This is an array of objects that specify the conditions to use the match records (if conditions are omitted or it is an empty array, this is a search for everything in the table). Each condition object can have the following properties: + +* `attribute`: Name of the property/attribute to match on. +* `value`: The value to match. +* `comparator`: This can specify how the value is compared. This defaults to "equals", but can also be "greater\_than", "greater\_than\_equal", "less\_than", "less\_than\_equal", "starts\_with", "contains", "ends\_with", "between", and "not\_equal". +* `conditions`: An array of conditions, which follows the same structure as above. +* `operator`: Specifies the operator to apply to this set of conditions (`and` or `or`. This is optional and defaults to `and`). For example, a complex query might look like: + +For example, a more complex query might look like: + +```javascript +Table.search({ conditions: [ + { attribute: 'price', comparator: 'less_than', value: 100 }, + { operator: 'or', conditions: [ + { attribute: 'rating', comparator: 'greater_than', value: 4 }, + `{ attribute: 'featured', value: true }` + ]} +]}); +``` + +**Chained Attributes/Properties** + +Chained attribute/property references can be used to search on properties within related records that are referenced by [relationship properties](../../developers/applications/defining-schemas) (in addition to the [schema documentation](../../developers/applications/defining-schemas), see the [REST documentation](../../developers/rest) for more of overview of relationships and querying). Chained property references are specified with an array, with each entry in the array being a property name for successive property references. For example, if a relationship property called `brand` has been defined that references a `Brand` table, we could search products by brand name: + +```javascript +Product.search({ conditions: [ + `{ attribute: ['brand', 'name'], value: 'Harper' }` +]}); +``` + +This effectively executes a join, searching on the `Brand` table and joining results with matching records in the `Product` table. Chained array properties can be used in any condition, as well nested/grouped conditions. The chain of properties may also be more than two entries, allowing for multiple relationships to be traversed, effectively joining across multiple tables. An array of chained properties can also be used as the `attribute` in the `sort` property, allowing for sorting by an attribute in a referenced joined tables. + +#### `operator` + +Specifies if the conditions should be applied as an `"and"` (records must match all conditions), or as an "or" (records must match at least one condition). This is optional and defaults to `"and"`. + +#### `limit` + +This specifies the limit of the number of records that should be returned from the query. + +#### `offset` + +This specifies the number of records that should be skipped prior to returning records in the query. This is often used with `limit` to implement "paging" of records. + +#### `select` + +This specifies the specific properties that should be included in each record that is returned. This can be an array, to specify a set of properties that should be included in the returned objects. The array can specify an `select.asArray = true` property and the query results will return a set of arrays of values of the specified properties instead of objects; this can be used to return more compact results. Each of the elements in the array can be a property name, or can be an object with a `name` and `select` array itself that specifies properties that should be returned by the referenced sub-object or related record. For example, a `select` can defined: + +```javascript +Table.search({ select: [ 'name', 'age' ], conditions: ...}) +``` + +Or nested/joined properties from referenced objects can be specified, here we are including the referenced `related` records, and returning the `description` and `id` from each of the related objects: + +```javascript +Table.search({ select: [ 'name', `{ name: 'related', select: ['description', 'id'] }` ], conditions: ...}) +``` + +The select properties can also include certain special properties: + +* `$id` - This will specifically return the primary key of the record (regardless of name, even if there is no defined primary key attribute for the table). +* `$updatedtime` - This will return the last updated timestamp/version of the record (regardless of whether there is an attribute for the updated time). + +Alternately, the select value can be a string value, to specify that the value of the specified property should be returned for each iteration/element in the results. For example to just return an iterator of the `id`s of object: + +```javascript +Table.search({ select: 'id', conditions: ...}) +``` + +#### `sort` + +This defines the sort order, and should be an object that can have the following properties: + +* `attributes`: The attribute to sort on. +* `descending`: If true, will sort in descending order (optional and defaults to `false`). +* `next`: Specifies the next sort order to resolve ties. This is an object that follows the same structure as `sort`. + +#### `explain` + +This will return the conditions re-ordered as Harper will execute them. Harper will estimate the number of the matching records for each condition and apply the narrowest condition applied first. + +#### `enforceExecutionOrder` + +This will force the conditions to be executed in the order they were supplied, rather than using query estimation to re-order them. + +The query results are returned as an `AsyncIterable`. In order to access the elements of the query results, you must use a `for await` loop (it does _not_ return an array, you can not access the results by index). + +For example, we could do a query like: + +```javascript +let { Product } = tables; +let results = Product.search({ + conditions: [ + { attribute: 'rating', value: 4.5, comparator: 'greater_than' }, + { attribute: 'price', value: 100, comparator: 'less_than' }, + ], + offset: 20, + limit: 10, + select: ['id', 'name', 'price', 'rating'], + sort: `{ attribute: 'price' }` +}) +for await (let record of results) { + / iterate through each record in the query results +} +``` + +`AsyncIterable`s can be returned from resource methods, and will be properly serialized in responses. When a query is performed, this will open/reserve a read transaction until the query results are iterated, either through your own `for await` loop or through serialization. Failing to iterate the results this will result in a long-lived read transaction which can degrade performance (including write performance), and may eventually be aborted. + +### Interacting with the Resource Data Model + +When extending or interacting with table resources, when a resource instance is retrieved and instantiated, it will be loaded with the record data from its table. You can interact with this record through the resource instance. For any properties that have been defined in the table's schema, you can direct access or modify properties through standard property syntax. For example, let's say we defined a product schema: + +```graphql +type Product @table { + id: ID @primaryKey + name: String + rating: Int + price: Float +} +``` + +If we have extended this table class with our get() we can interact with any these specified attributes/properties: + +```javascript +export class CustomProduct extends Product { + get(query) { + let name = this.name; / this is the name of the current product + let rating = this.rating; / this is the rating of the current product + this.rating = 3 / we can also modify the rating for the current instance + / (with a get this won't be saved by default, but will be used when serialized) + return super.get(query); + } +} +``` + +Likewise, we can interact with resource instances in the same way when retrieving them through the static methods: + +```javascript +let product1 = await Product.get(1); +let name = product1.name; / this is the name of the product with a primary key of 1 +let rating = product1.rating; / this is the rating of the product with a primary key of 1 +product1.rating = 3 / modify the rating for this instance (this will be saved without a call to update()) + +``` + +If there are additional properties on (some) products that aren't defined in the schema, we can still access them through the resource instance, but since they aren't declared, there won't be getter/setter definition for direct property access, but we can access properties with the `get(propertyName)` method and modify properties with the `set(propertyName, value)` method: + +```javascript +let product1 = await Product.get(1); +let additionalInformation = product1.get('additionalInformation'); / get the additionalInformation property value even though it isn't defined in the schema +product1.set('newProperty', 'some value'); / we can assign any properties we want with set +``` + +And likewise, we can do this in an instance method, although you will probably want to use super.get()/set() so you don't have to write extra logic to avoid recursion: + +```javascript +export class CustomProduct extends Product { + get(query) { + let additionalInformation = super.get('additionalInformation'); / get the additionalInformation property value even though it isn't defined in the schema + super.set('newProperty', 'some value'); / we can assign any properties we want with set + } +} +``` + +Note that you may also need to use `get`/`set` for properties that conflict with existing method names. For example, your schema defines an attribute called `getId` (not recommended), you would need to access that property through `get('getId')` and `set('getId', value)`. + +If you want to save the changes you make, you can call the \`update()\`\` method: + +```javascript +let product1 = await Product.get(1); +product1.rating = 3; +product1.set('newProperty', 'some value'); +product1.update(); / save both of these property changes +``` + +Updates are automatically saved inside modifying methods like put and post: + +```javascript +export class CustomProduct extends Product { + post(data) { + this.name = data.name; + this.set('description', data.description); + / both of these changes will be saved automatically as this transaction commits + } +} +``` + +We can also interact with properties in nested objects and arrays, following the same patterns. For example we could define more complex types on our product: + +```graphql +type Product @table { + id: ID @primaryKey + name: String + rating: Int + price: Float + brand: Brand; + variations: [Variation]; +} +type Brand { + name: String +} +type Variation { + name: String + price: Float +} +``` + +We can interact with these nested properties: + +```javascript +export class CustomProduct extends Product { + post(data) { + let brandName = this.brand.name; + let firstVariationPrice = this.variations[0].price; + let additionalInfoOnBrand = this.brand.get('additionalInfo'); / not defined in schema, but can still try to access property + / make some changes + this.variations.splice(0, 1); / remove first variation + this.variations.push({ name: 'new variation', price: 9.99 }); / add a new variation + this.brand.name = 'new brand name'; + / all these change will be saved + } +} +``` + +If you need to delete a property, you can do with the `delete` method: + +```javascript +let product1 = await Product.get(1); +product1.delete('additionalInformation'); +product1.update(); +``` + +You can also get "plain" object representation of a resource instance by calling `toJSON`, which will return a simple frozen object with all the properties (whether defined in the schema) as direct normal properties (note that this object can _not_ be modified, it is frozen since it is belongs to a cache): + +```javascript +let product1 = await Product.get(1); +let plainObject = product1.toJSON(); +for (let key in plainObject) { + / can iterate through the properties of this record +} +``` + +## Response Object + +The resource methods can return an object that will be serialized and returned as the response to the client. However, these methods can also return a `Response` style object with `status`, `headers`, and optionally `body` or `data` properties. This allows you to have more control over the response, including setting custom headers and status codes. For example, you could return a redirect response like: + +```javascript +return `{ status: 302, headers: { Location: '/new-location' }` }; +``` + +If you include a `body` property, this must be a string or buffer that will be returned as the response body. If you include a `data` property, this must be an object that will be serialized as the response body (using the standard content negotiation). For example, we could return an object with a custom header: + +```javascript +return { status: 200, headers: { 'X-Custom-Header': 'custom value' }, data: `{ message: 'Hello, World!' }` }; +``` + +### Throwing Errors + +You may throw errors (and leave them uncaught) from the response methods and these should be caught and handled by protocol the handler. For REST requests/responses, this will result in an error response. By default the status code will be 500. You can assign a property of `statusCode` to errors to indicate the HTTP status code that should be returned. For example: + +```javascript +if (notAuthorized()) { + let error = new Error('You are not authorized to access this'); + error.statusCode = 403; + throw error; +} +``` diff --git a/site/versioned_docs/version-4.5/technical-details/reference/storage-algorithm.md b/site/versioned_docs/version-4.5/technical-details/reference/storage-algorithm.md new file mode 100644 index 00000000..c755adb2 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/reference/storage-algorithm.md @@ -0,0 +1,27 @@ +--- +title: Storage Algorithm +--- + +# Storage Algorithm + +The Harper storage algorithm is fundamental to the Harper core functionality, enabling the [Dynamic Schema](./dynamic-schema) and all other user-facing functionality. Harper is built on top of Lightning Memory-Mapped Database (LMDB), a key-value store offering industry leading performance and functionality, which allows for our storage algorithm to store data in tables as rows/objects. This document will provide additional details on how data is stored within Harper. + +## Query Language Agnostic + +The Harper storage algorithm was designed to abstract the data storage from any individual query language. Harper currently supports both SQL and NoSQL on top of this storage algorithm, with the ability to add additional query languages in the future. This means data can be inserted via NoSQL and read via SQL while hitting the same underlying data storage. + +## ACID Compliant + +Utilizing Multi-Version Concurrency Control (MVCC) through LMDB, Harper offers ACID compliance independently on each node. Readers and writers operate independently of each other, meaning readers don’t block writers and writers don’t block readers. Each Harper table has a single writer process, avoiding deadlocks and assuring that writes are executed in the order in which they were received. Harper tables can have multiple reader processes operating at the same time for consistent, high scale reads. + +## Universally Indexed + +All top level attributes are automatically indexed immediately upon ingestion. The [Harper Dynamic Schema](./dynamic-schema) reflexively creates both the attribute and index reflexively as new schema metadata comes in. Indexes are agnostic of datatype, honoring the following order: booleans, numbers ordered naturally, strings ordered lexically. Within the LMDB implementation, table records are grouped together into a single LMDB environment file, where each attribute index is a sub-database (dbi) inside said environment file. An example of the indexing scheme can be seen below. + +## Additional LMDB Benefits + +Harper inherits both functional and performance benefits by implementing LMDB as the underlying key-value store. Data is memory-mapped, which enables quick data access without data duplication. All writers are fully serialized, making writes deadlock-free. LMDB is built to maximize operating system features and functionality, fully exploiting buffer cache and built to run in CPU cache. To learn more about LMDB, visit their documentation. + +## Harper Indexing Example (Single Table) + +![](/img/v4.5/reference/HarperDB-3.0-Storage-Algorithm.png.webp) diff --git a/site/versioned_docs/version-4.5/technical-details/reference/transactions.md b/site/versioned_docs/version-4.5/technical-details/reference/transactions.md new file mode 100644 index 00000000..8c712122 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/reference/transactions.md @@ -0,0 +1,40 @@ +--- +title: Transactions +--- + +# Transactions + +Transactions are an important part of robust handling of data in data-driven applications. Harper provides ACID-compliant support for transactions, allowing for guaranteed atomic, consistent, and isolated data handling within transactions, with durability guarantees on commit. Understanding how transactions are tracked and behave is important for properly leveraging transactional support in Harper. For most operations this is very intuitive, each HTTP request is executed in a transaction, so when multiple actions are executed in a single request, they are normally automatically included in the same transaction. + +Transactions span a database. Once a read snapshot is started, it is an atomic snapshot of all the tables in a database. And writes that span multiple tables in the database will all be committed atomically together (no writes in one table will be visible before writes in another table in the same database). If a transaction is used to access or write data in multiple databases, there will actually be a separate database transaction used for each database, and there is no guarantee of atomicity between separate transactions in separate databases. This can be an important consideration when deciding if and how tables should be organized into different databases. + +Because Harper is designed to be a low-latency distributed database, locks are avoided in data handling. Because of this, transactions do not lock data within the transaction. When a transaction starts, it will provide a read snapshot of the database for any retrievals or queries, which means all reads will be performed on a single version of the database isolated from any other writes that are concurrently taking place. And within a transaction all writes are aggregated and atomically written on commit. These writes are all isolated (from other transactions) until committed, and all become visible atomically. However, because transactions are non-locking, it is possible that writes from other transactions may occur between when reads are performed and when the writes are committed (at which point the last write will win for any records that have been written concurrently). Support for locks in transactions is planned for a future release. + +Transactions can also be explicitly started using the `transaction` global function that is provided in the Harper environment: + +## `transaction(context?, callback: (transaction) => any): Promise` + +This executes the callback in a transaction, providing a context that can be used for any resource methods that are called. This returns a promise for when the transaction has been committed. The callback itself may be asynchronous (return a promise), allowing for asynchronous activity within the transaction. This is useful for starting a transaction when your code is not already running within a transaction (in an HTTP request handler, a transaction will typically already be started). For example, if we wanted to run an action on a timer that periodically loads data, we could ensure that the data is loaded in single transactions like this (note that HDB is multi-threaded and if we do a timer-based job, we very likely want it to only run in one thread): + +```javascript +import { tables } from 'harperdb'; +const { MyTable } = tables; +if (isMainThread) / only on main thread + setInterval(async () => { + let someData = await (await fetch(... some URL ...)).json(); + transaction((txn) => { + for (let item in someData) { + MyTable.put(item, txn); + } + }); + }, 3600000); / every hour +``` + +You can provide your own context object for the transaction to attach to. If you call `transaction` with a context that already has a transaction started, it will simply use the current transaction, execute the callback and immediately return (this can be useful for ensuring that a transaction has started). + +Once the transaction callback is completed (for non-nested transaction calls), the transaction will commit, and if the callback throws an error, the transaction will abort. However, the callback is called with the `transaction` object, which also provides the following methods and property: + +* `commit(): Promise` - Commits the current transaction. The transaction will be committed once the returned promise resolves. +* `abort(): void` - Aborts the current transaction and resets it. +* `resetReadSnapshot(): void` - Resets the read snapshot for the transaction, resetting to the latest data in the database. +* `timestamp: number` - This is the timestamp associated with the current transaction. diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/index.md b/site/versioned_docs/version-4.5/technical-details/release-notes/index.md new file mode 100644 index 00000000..3ca46792 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/index.md @@ -0,0 +1,267 @@ +--- +title: Release Notes +--- + +# Release Notes + +### Current Release + +[Meet Tucker](./v4-tucker/tucker) Our 4th Release Pup + +[4.5.16 Tucker](./v4-tucker/4.5.16) + +[4.5.15 Tucker](./v4-tucker/4.5.15) + +[4.5.14 Tucker](./v4-tucker/4.5.14) + +[4.5.13 Tucker](./v4-tucker/4.5.13) + +[4.5.12 Tucker](./v4-tucker/4.5.12) + +[4.5.11 Tucker](./v4-tucker/4.5.11) + +[4.5.10 Tucker](./v4-tucker/4.5.10) + +[4.5.9 Tucker](./v4-tucker/4.5.9) + +[4.5.8 Tucker](./v4-tucker/4.5.8) + +[4.5.7 Tucker](./v4-tucker/4.5.7) + +[4.5.6 Tucker](./v4-tucker/4.5.6) + +[4.5.5 Tucker](./v4-tucker/4.5.5) + +[4.5.4 Tucker](./v4-tucker/4.5.4) + +[4.5.3 Tucker](./v4-tucker/4.5.3) + +[4.5.2 Tucker](./v4-tucker/4.5.2) + +[4.5.1 Tucker](./v4-tucker/4.5.1) + +[4.5.0 Tucker](./v4-tucker/4.5.0) + +[4.4.24 Tucker](./v4-tucker/4.4.24) + +[4.4.23 Tucker](./v4-tucker/4.4.23) + +[4.4.22 Tucker](./v4-tucker/4.4.22) + +[4.4.21 Tucker](./v4-tucker/4.4.21) + +[4.4.20 Tucker](./v4-tucker/4.4.20) + +[4.4.19 Tucker](./v4-tucker/4.4.19) + +[4.4.21 Tucker](./v4-tucker/4.4.21) + +[4.4.20 Tucker](./v4-tucker/4.4.20) + +[4.4.19 Tucker](./v4-tucker/4.4.19) + +[4.4.18 Tucker](./v4-tucker/4.4.18) + +[4.4.17 Tucker](./v4-tucker/4.4.17) + +[4.4.16 Tucker](./v4-tucker/4.4.16) + +[4.4.15 Tucker](./v4-tucker/4.4.15) + +[4.4.14 Tucker](./v4-tucker/4.4.14) + +[4.4.13 Tucker](./v4-tucker/4.4.13) + +[4.4.12 Tucker](./v4-tucker/4.4.12) + +[4.4.11 Tucker](./v4-tucker/4.4.11) + +[4.4.10 Tucker](./v4-tucker/4.4.10) + +[4.4.9 Tucker](./v4-tucker/4.4.9) + +[4.4.8 Tucker](./v4-tucker/4.4.8) + +[4.4.7 Tucker](./v4-tucker/4.4.7) + +[4.4.6 Tucker](./v4-tucker/4.4.6) + +[4.4.5 Tucker](./v4-tucker/4.4.5) + +[4.4.4 Tucker](./v4-tucker/4.4.4) + +[4.4.4 Tucker](./v4-tucker/4.4.3) + +[4.4.2 Tucker](./v4-tucker/4.4.2) + +[4.4.1 Tucker](./v4-tucker/4.4.1) + +[4.4.0 Tucker](./v4-tucker/4.4.0) + +[4.3.38 Tucker](./v4-tucker/4.3.38) + +[4.3.37 Tucker](./v4-tucker/4.3.37) + +[4.3.36 Tucker](./v4-tucker/4.3.36) + +[4.3.35 Tucker](./v4-tucker/4.3.35) + +[4.3.34 Tucker](./v4-tucker/4.3.34) + +[4.3.33 Tucker](./v4-tucker/4.3.33) + +[4.3.32 Tucker](./v4-tucker/4.3.32) + +[4.3.31 Tucker](./v4-tucker/4.3.31) + +[4.3.30 Tucker](./v4-tucker/4.3.30) + +[4.3.29 Tucker](./v4-tucker/4.3.29) + +[4.3.28 Tucker](./v4-tucker/4.3.28) + +[4.3.27 Tucker](./v4-tucker/4.3.27) + +[4.3.26 Tucker](./v4-tucker/4.3.26) + +[4.3.25 Tucker](./v4-tucker/4.3.25) + +[4.3.24 Tucker](./v4-tucker/4.3.24) + +[4.3.23 Tucker](./v4-tucker/4.3.23) + +[4.3.22 Tucker](./v4-tucker/4.3.22) + +[4.3.21 Tucker](./v4-tucker/4.3.21) + +[4.3.20 Tucker](./v4-tucker/4.3.20) + +[4.3.19 Tucker](./v4-tucker/4.3.19) + +[4.3.18 Tucker](./v4-tucker/4.3.18) + +[4.3.17 Tucker](./v4-tucker/4.3.17) + +[4.3.16 Tucker](./v4-tucker/4.3.16) + +[4.3.15 Tucker](./v4-tucker/4.3.15) + +[4.3.14 Tucker](./v4-tucker/4.3.14) + +[4.3.13 Tucker](./v4-tucker/4.3.13) + +[4.3.12 Tucker](./v4-tucker/4.3.12) + +[4.3.11 Tucker](./v4-tucker/4.3.11) + +[4.3.10 Tucker](./v4-tucker/4.3.10) + +[4.3.9 Tucker](./v4-tucker/4.3.9) + +[4.3.8 Tucker](./v4-tucker/4.3.8) + +[4.3.7 Tucker](./v4-tucker/4.3.7) + +[4.3.6 Tucker](./v4-tucker/4.3.6) + +[4.3.5 Tucker](./v4-tucker/4.3.5) + +[4.3.4 Tucker](./v4-tucker/4.3.4) + +[4.3.3 Tucker](./v4-tucker/4.3.3) + +[4.3.2 Tucker](./v4-tucker/4.3.2) + +[4.3.1 Tucker](./v4-tucker/4.3.1) + +[4.3.0 Tucker](./v4-tucker/4.3.0) + +[4.2.8 Tucker](./v4-tucker/4.2.8) + +[4.2.7 Tucker](./v4-tucker/4.2.7) + +[4.2.6 Tucker](./v4-tucker/4.2.6) + +[4.2.5 Tucker](./v4-tucker/4.2.5) + +[4.2.4 Tucker](./v4-tucker/4.2.4) + +[4.2.3 Tucker](./v4-tucker/4.2.3) + +[4.2.2 Tucker](./v4-tucker/4.2.2) + +[4.2.1 Tucker](./v4-tucker/4.2.1) + +[4.2.0 Tucker](./v4-tucker/4.2.0) + +[4.1.2 Tucker](./v4-tucker/4.1.2) + +[4.1.1 Tucker](./v4-tucker/4.1.1) + +[4.1.0 Tucker](./v4-tucker/4.1.0) + +[4.0.7 Tucker](./v4-tucker/4.0.7) + +[4.0.6 Tucker](./v4-tucker/4.0.6) + +[4.0.5 Tucker](./v4-tucker/4.0.5) + +[4.0.4 Tucker](./v4-tucker/4.0.4) + +[4.0.3 Tucker](./v4-tucker/4.0.3) + +[4.0.2 Tucker](./v4-tucker/4.0.2) + +[4.0.1 Tucker](./v4-tucker/4.0.1) + +[4.0.0 Tucker](./v4-tucker/4.0.0) + +### Past Releases + +[Meet Monkey](./v3-monkey/) Our 3rd Release Pup + +[3.2.1 Monkey](./v3-monkey/3.2.1) + +[3.2.0 Monkey](./v3-monkey/3.2.0) + +[3.1.5 Monkey](./v3-monkey/3.1.5) + +[3.1.4 Monkey](./v3-monkey/3.1.4) + +[3.1.3 Monkey](./v3-monkey/3.1.3) + +[3.1.2 Monkey](./v3-monkey/3.1.2) + +[3.1.1 Monkey](./v3-monkey/3.1.1) + +[3.1.0 Monkey](./v3-monkey/3.1.0) + +[3.0.0 Monkey](./v3-monkey/3.0.0) + +*** + +[Meet Penny](./v2-penny/) Our 2nd Release Pup + +[2.3.1 Penny](./v2-penny/2.3.1) + +[2.3.0 Penny](./v2-penny/2.3.0) + +[2.2.3 Penny](./v2-penny/2.2.3) + +[2.2.2 Penny](./v2-penny/2.2.2) + +[2.2.0 Penny](./v2-penny/2.2.0) + +[2.1.1 Penny](./v2-penny/2.1.1) + +*** + +[Meet Alby](./v1-alby/) Our 1st Release Pup + +[1.3.1 Alby](./v1-alby/1.3.1) + +[1.3.0 Alby](./v1-alby/1.3.0) + +[1.2.0 Alby](./v1-alby/1.2.0) + +[1.1.0 Alby](./v1-alby/1.1.0) diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/1.1.0.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/1.1.0.md new file mode 100644 index 00000000..b42514a2 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/1.1.0.md @@ -0,0 +1,77 @@ +--- +title: 1.1.0 +sidebar_position: 89899 +--- + +### HarperDB 1.1.0, Alby Release +4/18/2018 + +**Features** + +* Users & Roles: + + * Limit/Assign access to all HarperDB operations + + * Limit/Assign access to schemas, tables & attributes + + * Limit/Assign access to specific SQL operations (`INSERT`, `UPDATE`, `DELETE`, `SELECT`) + +* Enhanced SQL parser + + * Added extensive ANSI SQL Support. + + * Added Array function, which allows for converting relational data into Object/Hierarchical data + + * `Distinct_Array` Function: allows for removing duplicates in the Array function. + + * Enhanced SQL Validation: Improved validation around structure of SQL, validating the schema, etc.. + + * 10x performance improvement on SQL statements. + +* Export Function: can now call a NoSQL/SQL search and have it export to CSV or JSON. + +* Added upgrade function to CLI + +* Added ability to perform bulk update from CSV + +* Created landing page for HarperDB. + +* Added CORS support to HarperDB + +**Fixes** + +* Fixed memory leak in CSV bulk loads + +* Corrected error when attempting to perform a `SQL DELETE` + +* Added further validation to NoSQL `UPDATE` to validate schema & table exist + +* Fixed install issue occurring when part of the install path does not exist, the install would silently fail. + +* Fixed issues with replicated data when one of the replicas is down + +* Removed logging of initial user’s credentials during install + +* Can now use reserved words as aliases in SQL + +* Removed user(s) password in results when calling `list_users` + +* Corrected forwarding of operations to other nodes in a cluster + +* Corrected lag in schema meta-data passing to other nodes in a cluster + +* Drop table & schema now move the table & schema or table to the trash folder under the Database folder for later permanent deletion. + +* Bulk inserts no longer halt the entire operation if n records already exist, instead the return includes the hashes of records that have been skipped. + +* Added ability to accept EULA from command line + +* Corrected `search_by_value` not searching on the correct attribute + +* Added ability to increase the timeout of a request by adding `SERVER_TIMEOUT_MS` to config/settings.js + +* Add error handling resulting from SQL calculations. + +* Standardized error responses as JSON. + +* Corrected internal process generation to not allow more processes than machine has cores. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/1.2.0.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/1.2.0.md new file mode 100644 index 00000000..095bf239 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/1.2.0.md @@ -0,0 +1,42 @@ +--- +title: 1.2.0 +sidebar_position: 89799 +--- + +### HarperDB 1.2.0, Alby Release +7/10/2018 + +**Features** + +* Time to Live: Conserve the resources of your edge device by setting data on devices to live for a specific period of time. +* Geo: HarperDB has implemented turf.js into its SQL parser to enable geo based analytics. +* Jobs: CSV Data loads, Exports & Time to Live now all run as back ground jobs. +* Exports: Perform queries that export into JSON or CSV and save to disk or S3. + + +**Fixes** + +* Fixed issue where CSV data loads incorrectly report number of records loaded. +* Added validation to stop `BETWEEN` operations in SQL. +* Updated logging to not include internal variables in the logs. +* Cleaned up `add_role` response to not include internal variables. +* Removed old and unused dependencies. +* Build out further unit tests and integration tests. +* Fixed https to handle certificates properly. +* Improved stability of clustering & replication. +* Corrected issue where Objects and Arrays were not casting properly in `SQL SELECT` response. +* Fixed issue where Blob text was not being returned from `SQL SELECT`s. +* Fixed error being returned when querying on table with no data, now correctly returns empty array. +* Improved performance in SQL when searching on exact values. +* Fixed error when ./harperdb stop is called. +* Fixed logging issue causing instability in installer. +* Fixed `read_log` operation to accept date time. +* Added permissions checking to `export_to_s3`. +* Added ability to run SQL on `SELECT` without a `FROM`. +* Fixed issue where updating a user’s password was not encrypting properly. +* Fixed `user_guide.html` to point to readme on git repo. +* Created option to have HarperDB run as a foreground process. +* Updated `user_info` to return the correct role for a user. +* Fixed issue where HarperDB would not stop if the database root was deleted. +* Corrected error message on insert if an invalid schema is provided. +* Added permissions checks for user & role operations. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/1.3.0.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/1.3.0.md new file mode 100644 index 00000000..ad196159 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/1.3.0.md @@ -0,0 +1,27 @@ +--- +title: 1.3.0 +sidebar_position: 89699 +--- + +### HarperDB 1.3.0, Alby Release +11/2/2018 + +**Features** + +* Upgrade: Upgrade to newest version via command line. +* SQL Support: Added `IS NULL` for SQL parser. +* Added attribute validation to search operations. + + +**Fixes** + +* Fixed `SELECT` calculations, i.e. `SELECT` 2+2. +* Fixed select OR not returning expected results. +* No longer allowing reserved words for schema and table names. +* Corrected process interruptions from improper SQL statements. +* Improved message handling between spawned processes that replace killed processes. +* Enhanced error handling for updates to tables that do not exist. +* Fixed error handling for NoSQL responses when `get_attributes` is provided with invalid attributes. +* Fixed issue with new columns not being updated properly in update statements. +* Now validating roles, tables and attributes when creating or updating roles. +* Fixed an issue where in some cases `undefined` was being returned after dropping a role diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/1.3.1.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/1.3.1.md new file mode 100644 index 00000000..77e3ffe4 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/1.3.1.md @@ -0,0 +1,29 @@ +--- +title: 1.3.1 +sidebar_position: 89698 +--- + +### HarperDB 1.3.1, Alby Release +2/26/2019 + +**Features** + +* Clustering connection direction appointment +* Foundations for threading/multi processing +* UUID autogen for hash attributes that were not provided +* Added cluster status operation + + +**Bug Fixes and Enhancements** + +* More logging +* Clustering communication enhancements +* Clustering queue ordering by timestamps +* Cluster re connection enhancements +* Number of system core(s) detection +* Node LTS (10.15) compatibility +* Update/Alter users enhancements +* General performance enhancements +* Warning is logged if different versions of harperdb are connected via clustering +* Fixed need to restart after user creation/alteration +* Fixed SQL error that occurred on selecting from an empty table \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/_category_.json b/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/_category_.json new file mode 100644 index 00000000..e33195ec --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Alby (Version 1)", + "position": -1 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/index.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/index.md new file mode 100644 index 00000000..00d63978 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v1-alby/index.md @@ -0,0 +1,13 @@ +--- +title: HarperDB Alby (Version 1) +--- + +# HarperDB Alby (Version 1) + +Did you know our release names are dedicated to employee pups? For our first release, Alby was our pup. + +Here is a bit about Alby: + +![picture of black dog](/img/v4.5/dogs/alby.webp) + +_Hi, I am Alby. My mom is Kaylan Stock, Director of Marketing at HarperDB. I am a 9-year-old Great Dane mix who loves sun bathing, going for swims, and wreaking havoc on the local squirrels. My favorite snack is whatever you are eating, and I love a good butt scratch!_ diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.1.1.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.1.1.md new file mode 100644 index 00000000..e1314a5f --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.1.1.md @@ -0,0 +1,27 @@ +--- +title: 2.1.1 +sidebar_position: 79898 +--- + +### HarperDB 2.1.1, Penny Release +05/22/2020 + +**Highlights** + +* CORE-1007 Added the ability to perform `SQL INSERT` & `UPDATE` with function calls & expressions on values. +* CORE-1023 Fixed minor bug in final SQL step incorrectly trying to translate ordinals to alias in `ORDER BY` statement. +* CORE-1020 Fixed bug allowing 'null' and 'undefined' string values to be passed in as valid hash values. +* CORE-1006 Added SQL functionality that enables `JOIN` statements across different schemas. +* CORE-1005 Implemented JSONata library to handle our JSON document search functionality in SQL, creating the `SEARCH_JSON` function. +* CORE-1009 Updated schema validation to allow all printable ASCII characters to be used in schema/table/attribute names, except, forward slashes and backticks. Same rules apply now for hash attribute values. +* CORE-1003 Fixed handling of ORDER BY statements with function aliases. +* CORE-1004 Fixed bug related to `SELECT*` on `JOIN` queries with table columns with the same name. +* CORE-996 Fixed an issue where the `transact_to_cluster` flag is lost for CSV URL loads, fixed an issue where new attributes created in CSV bulk load do not sync to the cluster. +* CORE-994 Added new operation `system_information`. This operation returns info & metrics for the OS, time, memory, cpu, disk, network. +* CORE-993 Added new custom date functions for AlaSQL & UTC updates. +* CORE-991 Changed jobs to spawn a new process which will run the intended job without impacting a main HarperDB process. +* CORE-992 HTTPS enabled by default. +* CORE-990 Updated `describe_table` to add the record count for the table for LMDB data storage. +* CORE-989 Killed the socket cluster processes prior to HarperDB processes to eliminate a false uptime. +* CORE-975 Updated time values set by SQL Date Functions to be in epoch format. +* CORE-974 Added date functions to `SQL SELECT` column alias functionality. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.2.0.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.2.0.md new file mode 100644 index 00000000..267168cd --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.2.0.md @@ -0,0 +1,43 @@ +--- +title: 2.2.0 +sidebar_position: 79799 +--- + +### HarperDB 2.2.0, Penny Release +08/24/2020 + +**Features/Updates** + +* CORE-997 Updated the data format for CSV data loads being sync'd across a cluster to take up less resources +* CORE-1018 Adds SQL functionality for `BETWEEN` statements +* CORE-1032 Updates permissions to allow regular users (i.e. non-super users) to call the `get_job` operation +* CORE-1036 On create/drop table we auto create/drop the related transactions environments for the schema.table +* CORE-1042 Built raw functions to write to a tables transaction log for insert/update/delete operations +* CORE-1057 Implemented write transaction into lmdb create/update/delete functions +* CORE-1048 Adds `SEARCH` wildcard handling for role permissions standards +* CORE-1059 Added config setting to disable transaction logging for an instance +* CORE-1076 Adds permissions filter to describe operations +* CORE-1043 Change clustering catchup to use the new transaction log +* CORE-1052 Removed word "master" from source +* CORE-1061 Added new operation called `delete_transactions_before` this will tail a transaction log for a specific schema / table +* CORE-1040 On HarperDB startup make sure all tables have a transaction environment +* CORE-1055 Added 2 new setting to change the server headersTimeout & keepAliveTimeout from the config file +* CORE-1044 Created new operation `read_transaction_log` which will allow a user to get transactions for a table by `timestamp`, `username`, or `hash_value` +* CORE-1043 Change clustering catchup to use the new transaction log +* CORE-1089 Added new attribute to `system_information` for table/transaction log data size in bytes & transaction log record count +* CORE-1101 Fix to store empty strings rather than considering them null & fix to be able to search on empty strings in SQL/NoSQL. +* CORE-1054 Updates permissions object to remove delete attribute permission and update table attribute permission key to `attribute_permissions` +* CORE-1092 Do not allow the `__createdtime__` to be updated +* CORE-1085 Updates create schema/table & drop schema/table/attribute operations permissions to require super user role and adds integration tests to validate +* CORE-1071 Updates response messages and status codes from `describe_schema` and `describe_table` operations to provide standard language/status code when a schema item is not found +* CORE-1049 Updates response message for SQL update op with no matching rows +* CORE-1096 Added tracking of the origin in the transaction log. This origin object stores the node name, timestamp of the transaction from the originating node & the user. + +**Bug Fixes** + +* CORE-1028 Fixes bug for simple `SQL SELECT` queries not returning aliases and incorrectly returning hash values when not requested in query +* CORE-1037 Fixed an issue where numbers with leading zero i.e. 00123 are converted to numbers rather than being honored as strings. +* CORE-1063 Updates permission error response shape to consolidate issues into individual objects per schema/table combo +* CORE-1098 Fixed an issue where transaction environments were remaining in the global cache after being dropped. +* CORE-1086 Fixed issue where responses from insert/update were incorrect with skipped records. +* CORE-1079 Fixes SQL bugs around invalid schema/table and special characters in `WHERE` clause \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.2.2.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.2.2.md new file mode 100644 index 00000000..827c63db --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.2.2.md @@ -0,0 +1,16 @@ +--- +title: 2.2.2 +sidebar_position: 79797 +--- + +### HarperDB 2.2.2, Penny Release +10/27/2020 + +* CORE-1154 Allowed transaction logging to be disabled even if clustering is enabled. +* CORE-1153 Fixed issue where `delete_files_before` was writing to transaction log. +* CORE-1152 Fixed issue where no more than 4 HarperDB forks would be created. +* CORE-1112 Adds handling for system timestamp attributes in permissions. +* CORE-1131 Adds better handling for checking perms on operations with action value in JSON. +* CORE-1113 Fixes validation bug checking for super user/cluster user permissions and other permissions. +* CORE-1135 Adds validation for valid keys in role API operations. +* CORE-1073 Adds new `import_from_s3` operation to API. diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.2.3.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.2.3.md new file mode 100644 index 00000000..eca953e2 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.2.3.md @@ -0,0 +1,9 @@ +--- +title: 2.2.3 +sidebar_position: 79796 +--- + +### HarperDB 2.2.3, Penny Release +11/16/2020 + +* CORE-1158 Performance improvements to core delete function and configuration of `delete_files_before` to run in batches with a pause into between. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.3.0.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.3.0.md new file mode 100644 index 00000000..2b248490 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.3.0.md @@ -0,0 +1,22 @@ +--- +title: 2.3.0 +sidebar_position: 79699 +--- + +### HarperDB 2.3.0, Penny Release +12/03/2020 + +**Features/Updates** + +* CORE-1191, CORE-1190, CORE-1125, CORE-1157, CORE-1126, CORE-1140, CORE-1134, CORE-1123, CORE-1124, CORE-1122 Added JWT Authentication option (See documentation for more information) +* CORE-1128, CORE-1143, CORE-1140, CORE-1129 Added `upsert` operation +* CORE-1187 Added `get_configuration` operation which allows admins to view their configuration settings. +* CORE-1175 Added new internal LMDB function to copy an environment for use in future features. +* CORE-1166 Updated packages to address security vulnerabilities. + +**Bug Fixes** + +* CORE-1195 Modified `drop_attribute` to drop after data cleanse completes. +* CORE-1149 Fix SQL bug regarding self joins and updates alasql to 0.6.5 release. +* CORE-1168 Fix inconsistent invalid schema/table errors. +* CORE-1162 Fix bug which caused `delete_files_before` to cause tables to grow in size due to an open cursor issue. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.3.1.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.3.1.md new file mode 100644 index 00000000..51291a01 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/2.3.1.md @@ -0,0 +1,12 @@ +--- +title: 2.3.1 +sidebar_position: 79698 +--- + +### HarperDB 2.3.1, Penny Release +1/29/2021 + +**Bug Fixes** + +* CORE-1218 A bug in HarperDB 2.3.0 was identified related to manually calling the `create_attribute` operation. This bug caused secondary indexes to be overwritten by the most recently inserted or updated value for the index, thereby causing a search operation filtered with that index to only return the most recently inserted/updated row. Note, this issue does not affect attributes that are reflexively/automatically created. It only affects attributes created using `create_attribute`. To resolve this issue in 2.3.0 or earlier, drop and recreate your table using reflexive attribute creation. In 2.3.1, drop and recreate your table and use either reflexive attribute creation or `create_attribute`. +* CORE-1219 Increased maximum table attributes from 1000 to 10000 \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/_category_.json b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/_category_.json new file mode 100644 index 00000000..285eecf7 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Penny (Version 2)", + "position": -2 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/index.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/index.md new file mode 100644 index 00000000..37b3ffde --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v2-penny/index.md @@ -0,0 +1,13 @@ +--- +title: HarperDB Penny (Version 2) +--- + +# HarperDB Penny (Version 2) + +Did you know our release names are dedicated to employee pups? For our second release, Penny was the star. + +Here is a bit about Penny: + +![picture of brindle dog](/img/v4.5/dogs/penny.webp) + +_Hi I am Penny! My dad is Kyle Bernhardy, the CTO of HarperDB. I am a nine-year-old Whippet who lives for running hard and fast while exploring the beautiful terrain of Colorado. My favorite activity is chasing birds along with afternoon snoozes in a sunny spot in my backyard._ diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.0.0.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.0.0.md new file mode 100644 index 00000000..2907ee6c --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.0.0.md @@ -0,0 +1,31 @@ +--- +title: 3.0.0 +sidebar_position: 69999 +--- + +### HarperDB 3.0, Monkey Release +5/18/2021 + +**Features/Updates** + +* CORE-1217, CORE-1226, CORE-1232 Create new `search_by_conditions` operation. +* CORE-1304 Upgrade to Node 12.22.1. +* CORE-1235 Adds new upgrade/install functionality. +* CORE-1206, CORE-1248, CORE-1252 Implement `lmdb-store` library for optimized performance. +* CORE-1062 Added alias operation for `delete_files_before`, named `delete_records_before`. +* CORE-1243 Change `HTTPS_ON` settings value to false by default. +* CORE-1189 Implement fastify web server, resulting in improved performance. +* CORE-1221 Update user API to use role name instead of role id. +* CORE-1225 Updated dependencies to eliminate npm security warnings. +* CORE-1241 Adds 3.0 update directive and refactors/fixes update functionality. + +**Bug Fixes** + +* CORE-1299 Remove all references to the `PROJECT_DIR` setting. This setting is problematic when using node version managers and upgrading the version of node and then installing a new instance of HarperDB. +* CORE-1288 Fix bug with drop table/schema that was causing 'env required' error log. +* CORE-1285 Update warning log when trying to create an attribute that already exists. +* CORE-1254 Added logic to manage data collisions in clustering. +* CORE-1212 Add pre-check to `drop_user` that returns error if user doesn't exist. +* CORE-1114 Update response code and message from `add_user` when user already exists. +* CORE-1111 Update response from `create_attribute` to match the create schema/table response. +* CORE-1205 Fixed bug that prevented schema/table from being dropped if name was a number or had a wildcard value in it. Updated validation for insert, upsert and update. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.0.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.0.md new file mode 100644 index 00000000..148690f6 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.0.md @@ -0,0 +1,23 @@ +--- +title: 3.1.0 +sidebar_position: 69899 +--- + +### HarperDB 3.1.0, Monkey Release +8/24/2021 + +**Features/Updates** + +* CORE-1320, CORE-1321, CORE-1323, CORE-1324 Version 1.0 of HarperDB Custom Functions +* CORE-1275, CORE-1276, CORE-1278, CORE-1279, CORE-1280, CORE-1282, CORE-1283, CORE-1305, CORE-1314 IPC server for communication between HarperDB processes, including HarperDB, HarperDB Clustering, and HarperDB Functions +* CORE-1352, CORE-1355, CORE-1356, CORE-1358 Implement pm2 for HarperDB process management +* CORE-1292, CORE-1308, CORE-1312, CORE-1334, CORE-1338 Updated installation process to start HarperDB immediately on install and to accept all config settings via environment variable or command line arguments +* CORE-1310 Updated licensing functionality +* CORE-1301 Updated validation for performance improvement +* CORE-1359 Add `hdb-response-time` header which returns the HarperDB response time in milliseconds +* CORE-1330, CORE-1309 New config settings: `LOG_TO_FILE`, `LOG_TO_STDSTREAMS`, `IPC_SERVER_PORT`, `RUN_IN_FOREGROUND`, `CUSTOM_FUNCTIONS`, `CUSTOM_FUNCTIONS_PORT`, `CUSTOM_FUNCTIONS_DIRECTORY`, `MAX_CUSTOM_FUNCTION_PROCESSES` + +**Bug Fixes** + +* CORE-1315 Corrected issue in HarperDB restart scenario +* CORE-1370 Update some of the validation error handlers so that they don't log full stack \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.1.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.1.md new file mode 100644 index 00000000..0adbeb21 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.1.md @@ -0,0 +1,18 @@ +--- +title: 3.1.1 +sidebar_position: 69898 +--- + +### HarperDB 3.1.1, Monkey Release +9/23/2021 + +**Features/Updates** + +* CORE-1393 Added utility function to add settings from env/cmd vars to the settings file on every run/restart +* CORE-1395 Create a setting which will allow to enable the local Studio to be served from an instance of HarperDB +* CORE-1397 Update the stock 404 response to not return the request URL +* General updates to optimize Docker container + +**Bug Fixes** + +* CORE-1399 Added fixes for complex SQL alias issues \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.2.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.2.md new file mode 100644 index 00000000..f1c192b6 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.2.md @@ -0,0 +1,15 @@ +--- +title: 3.1.2 +sidebar_position: 69897 +--- + +### HarperDB 3.1.2, Monkey Release +10/21/2021 + +**Features/Updates** + +* Updated the installation ASCII art to reflect the new HarperDB logo + +**Bug Fixes** + +* CORE-1408 Corrects issue where `drop_attribute` was not properly setting the LMDB version number causing tables to behave unexpectedly \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.3.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.3.md new file mode 100644 index 00000000..2d484f8d --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.3.md @@ -0,0 +1,11 @@ +--- +title: 3.1.3 +sidebar_position: 69896 +--- + +### HarperDB 3.1.3, Monkey Release +1/14/2022 + +**Bug Fixes** + +* CORE-1446 Fix for scans on indexes larger than 1 million entries causing queries to never return \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.4.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.4.md new file mode 100644 index 00000000..ae0074fd --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.4.md @@ -0,0 +1,11 @@ +--- +title: 3.1.4 +sidebar_position: 69895 +--- + +### HarperDB 3.1.4, Monkey Release +2/24/2022 + +**Features/Updates** + +* CORE-1460 Added new setting `STORAGE_WRITE_ASYNC`. If this setting is true, LMDB will have faster write performance at the expense of not being crash safe. The default for this setting is false, which results in HarperDB being crash safe. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.5.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.5.md new file mode 100644 index 00000000..eff4b5b0 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.1.5.md @@ -0,0 +1,11 @@ +--- +title: 3.1.5 +sidebar_position: 69894 +--- + +### HarperDB 3.1.5, Monkey Release +3/4/2022 + +**Features/Updates** + +* CORE-1498 Fixed incorrect autocasting of string that start with "0." that tries to convert to number but instead returns NaN. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.2.0.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.2.0.md new file mode 100644 index 00000000..003575d8 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.2.0.md @@ -0,0 +1,13 @@ +--- +title: 3.2.0 +sidebar_position: 69799 +--- + +### HarperDB 3.2.0, Monkey Release +3/25/2022 + +**Features/Updates** + +* CORE-1391 Bug fix related to orphaned HarperDB background processes. +* CORE-1509 Updated node version check, updated Node.js version, updated project dependencies. +* CORE-1518 Remove final call from logger. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.2.1.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.2.1.md new file mode 100644 index 00000000..dc511a70 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.2.1.md @@ -0,0 +1,11 @@ +--- +title: 3.2.1 +sidebar_position: 69798 +--- + +### HarperDB 3.2.1, Monkey Release +6/1/2022 + +**Features/Updates** + +* CORE-1573 Added logic to track the pid of the foreground process if running in foreground. Then on stop, use that pid to kill the process. Logic was also added to kill the pm2 daemon when stop is called. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.3.0.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.3.0.md new file mode 100644 index 00000000..3e3ca784 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/3.3.0.md @@ -0,0 +1,12 @@ +--- +title: 3.3.0 +sidebar_position: 69699 +--- + +### HarperDB 3.3.0 - Monkey + +* CORE-1595 Added new role type `structure_user`, this enables non-superusers to be able to create/drop schema/table/attribute. +* CORE-1501 Improved performance for drop_table. +* CORE-1599 Added two new operations for custom functions `install_node_modules` & `audit_node_modules`. +* CORE-1598 Added `skip_node_modules` flag to `package_custom_function_project` operation. This flag allows for not bundling project dependencies and deploying a smaller project to other nodes. Use this flag in tandem with `install_node_modules`. +* CORE-1707 Binaries are now included for Linux on AMD64, Linux on ARM64, and macOS. GCC, Make, Python are no longer required when installing on these platforms. diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/_category_.json b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/_category_.json new file mode 100644 index 00000000..0103ac36 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Monkey (Version 3)", + "position": -3 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/index.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/index.md new file mode 100644 index 00000000..4c61ed0e --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v3-monkey/index.md @@ -0,0 +1,11 @@ +--- +title: HarperDB Monkey (Version 3) +--- + +# HarperDB Monkey (Version 3) + +Did you know our release names are dedicated to employee pups? For our third release, we have Monkey. + +![picture of tan dog](/img/v4.5/dogs/monkey.webp) + +_Hi, I am Monkey, a.k.a. Monk, a.k.a. Monchichi. My dad is Aron Johnson, the Director of DevOps at HarperDB. I am an eight-year-old Australian Cattle dog mutt whose favorite pastime is hunting and collecting tennis balls from the park next to her home. I love burrowing in the Colorado snow, rolling in the cool grass on warm days, and cheese!_ diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.0.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.0.md new file mode 100644 index 00000000..49770307 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.0.md @@ -0,0 +1,124 @@ +--- +title: 4.0.0 +sidebar_position: 59999 +--- + +### HarperDB 4.0.0, Tucker Release +11/2/2022 + +**Networking & Data Replication (Clustering)** + +The HarperDB clustering internals have been rewritten and the underlying technology for Clustering has been completely replaced with [NATS](https:/nats.io/), an enterprise grade connective technology responsible for addressing, discovery and exchanging of messages that drive the common patterns in distributed systems. +* CORE-1464, CORE-1470, : Remove SocketCluster dependencies and all code related to them. +* CORE-1465, CORE-1485, CORE-1537, CORE-1538, CORE-1558, CORE-1583, CORE_1665, CORE-1710, CORE-1801, CORE-1865 :Add nats-`server` code as dependency, on install of HarperDB download nats-`server` is possible else fallback to building from source code. +* CORE-1593, CORE-1761: Add `nats.js` as project dependency. +* CORE-1466: Build NATS configs on `harperdb run` based on HarperDB YAML configuration. +* CORE-1467, CORE-1508: Launch and manage NATS servers with PM2. +* CORE-1468, CORE-1507: Create a process which reads the work queue stream and processes transactions. +* CORE-1481, CORE-1529, CORE-1698, CORE-1502, CORE-1696: On upgrade to 4.0, update pre-existing clustering configurations, create table transaction streams, create work queue stream, update `hdb_nodes` table, create clustering folder structure, and rebuild self-signed certs. +* CORE-1494, CORE-1521, CORE-1755: Build out internals to interface with NATS. +* CORE-1504: Update existing hooks to save transactions to work with NATS. +* CORE-1514, CORE-1515, CORE-1516, CORE-1527, CORE-1532: Update `add_node`, `update_node`, and `remove_node` operations to no longer need host and port in payload. These operations now manage dynamically sourcing of table level transaction streams between nodes and work queues. +* CORE-1522: Create `NATSReplyService` process which handles the receiving NATS based requests from remote instances and sending back appropriate responses. +* CORE-1471, CORE-1568, CORE-1563, CORE-1534, CORE-1569: Update `cluster_status` operation. +* CORE-1611: Update pre-existing transaction log operations to be audit log operations. +* CORE-1541, CORE-1612, CORE-1613: Create translation log operations which interface with streams. +* CORE-1668: Update NATS serialization / deserialization to use MessagePack. +* CORE-1673: Add `system_info` param to `hdb_nodes` table and update on `add_node` and `cluster_status`. +* CORE-1477, CORE-1493, CORE-1557, CORE-1596, CORE-1577: Both a full HarperDB restart & just clustering restart call the NATS server with a reload directive to maintain full uptime while servers refresh. +* CORE-1474:HarperDB install adds clustering folder structure. +* CORE-1530: Post `drop_table` HarperDB purges the related transaction stream. +* CORE-1567: Set NATS config to always use TLS. +* CORE-1543: Removed the `transact_to_cluster` attribute from the bulk load operations. Now bulk loads always replicate. +* CORE-1533, CORE-1556, CORE-1561, CORE-1562, CORE-1564: New operation `configure_cluster`, this operation enables bulk publishing and subscription of multiple tables to multiple instances of HarperDB. +* CORE-1535: Create work queue stream on install of HarperDB. This stream receives transactions from remote instances of HarperDB which are then ingested in order. +* CORE-1551: Create transaction streams on the remote node if they do not exist when performing `add_node` or `update_node`. +* CORE-1594, CORE-1605, CORE-1749, CORE-1767, CORE-1770: Optimize the work queue stream and its consumer to be more performant and validate exact once delivery. +* CORE-1621, CORE-1692, CORE-1570, CORE-1693: NATS stream names are MD5 hashed to avoid characters that HarperDB allows, but NATS may not. +* CORE-1762: Add a new optional attribute to `add_node` and `update_node` named `opt_start_time`. This attribute sets a starting time to start synchronizing transactions. +* CORE-1785: Optimizations and bug fixes in regards to sourcing data from remote instances on HarperDB. +* CORE-1588: Created new operation `set_cluster_routes` to enable setting routes for instances of HarperDB to mesh together. +* CORE-1589: Created new operation `get_cluster_routes` to allow for retrieval of routes used to connect the instance of HarperDB to the mesh. +* CORE-1590: Created new operation `delete_cluster_routes` to allow for removal of routes used to connect the instance of HarperDB to the mesh. +* CORE-1667: Fix old environment variable `CLUSTERING_PORT` not mapping to new hub server port. +* CORE-1609: Allow `remove_node` to be called when the other node cannot be reached. +* CORE-1815: Add transaction lock to `add_node` and `update_node` to avoid concurrent nats source update bug. +* CORE-1848: Update stream configs if the node name has been changed in the YAML configuration. +* CORE-1873: Update `add_node` and `update_node` so that it auto-creates schema/table on both local and remote node respectively + + +**Data Storage** + +We have made improvements to how we store, index, and retrieve data. +* CORE-1619: Enabled new concurrent flushing technology for improved write performance. +* CORE-1701: Optimize search performance for `search_by_conditions` when executing multiple AND conditions. +* CORE-1652: Encode the values of secondary indices more efficiently for faster access. +* CORE-1670: Store updated timestamp in `lmdb.js`' version property. +* CORE-1651: Enabled multiple value indexing of array values which allows for the ability to search on specific elements in an array more efficiently. +* CORE-1649, CORE-1659: Large text values (larger than 255 bytes) are no longer stored in separate blob index. Now they are segmented and delimited in the same index to increase search performance. +* Complex objects and object arrays are no longer stored in a separate index to preserve storage and increase write throughput. +* CORE-1650, CORE-1724, CORE-1738: Improved internals around interpreting attribute values. +* CORE-1657: Deferred property decoding allows large objects to be stored, but individual attributes can be accessed (like with get_attributes) without incurring the cost of decoding the entire object. +* CORE-1658: Enable in-memory caching of records for even faster access to frequently accessed data. +* CORE-1693: Wrap updates in async transactions to ensure ACID-compliant updates. +* CORE-1653: Upgrade to 4.0 rebuilds tables to reflect changes made to index improvements. +* CORE-1753: Removed old `node-lmdb` dependency. +* CORE-1787: Freeze objects returned from queries. +* CORE-1821: Read the `WRITE_ASYNC` setting which enables LMDB nosync. + +**Logging** + +HarperDB has increased logging specificity by breaking out logs based on components logging. There are specific log files each for HarperDB Core, Custom Functions, Hub Server, Leaf Server, and more. +* CORE-1497: Remove `pino` and `winston` dependencies. +* CORE-1426: All logging is output via `stdout` and `stderr`, our default logging is then picked up by PM2 which handles writing out to file. +* CORE-1431: Improved `read_log` operation validation. +* CORE-1433, CORE-1463: Added log rotation. +* CORE-1553, CORE-1555, CORE-1552, CORE-1554, CORE-1704: Performance gain by only serializing objects and arrays if the log is for the level defined in configuration. +* CORE-1436: Upgrade to 4.0 updates internals for logging changes. +* CORE-1428, CORE-1440, CORE-1442, CORE-1434, CORE-1435, CORE-1439, CORE-1482, CORE-1751, CORE-1752: Bug fixes, performance improvements and improved unit tests. +* CORE-1691: Convert non-PM2 managed log file writes to use Node.js `fs.appendFileSync` function. + +**Configuration** + +HarperDB has updated its configuration from a properties file to YAML. +* CORE-1448, CORE-1449, CORE-1519, CORE-1587: Upgrade automatically converts the pre-existing settings file to YAML. +* CORE-1445, CORE-1534, CORE-1444, CORE-1858: Build out new logic to create, update, and interpret the YAML configuration file. +* Installer has updated prompts to reflect YAML settings. +* CORE-1447: Create an alias for the `configure_cluster` operation as `set_configuration`. +* CORE-1461, CORE-1462, CORE-1483: Unit test improvements. +* CORE-1492: Improvements to get_configuration and set_configuration operations. +* CORE-1503: Modify HarperDB configuration for more granular certificate definition. +* CORE-1591: Update `routes` IP param to `host` and to `leaf` config in `harperdb.conf` +* CORE-1519: Fix issue when switching between old and new versions of HarperDB we are getting the config parameter is undefined error on npm install. + +**Broad NodeJS and Platform Support** +* CORE-1624: HarperDB can now run on multiple versions of NodeJS, from v14 to v19. We primarily test on v18, so that is the preferred version. + +**Windows 10 and 11** +* CORE-1088: HarperDB now runs natively on Windows 10 and 11 without the need to run in a container or installed in WSL. Windows is only intended for evaluation and development purposes, not for production work loads. + +**Extra Changes and Bug Fixes** +* CORE-1520: Refactor installer to remove all waterfall code and update to use Promises. +* CORE-1573: Stop the PM2 daemon and any logging processes when stopping hdb. +* CORE-1586: When HarperDB is running in foreground stop any additional logging processes from being spawned. +* CORE-1626: Update docker file to accommodate new `harperdb.conf` file. +* CORE-1592, CORE-1526, CORE-1660, CORE-1646, CORE-1640, CORE-1689, CORE-1711, CORE-1601, CORE-1726, CORE-1728, CORE-1736, CORE-1735, CORE-1745, CORE-1729, CORE-1748, CORE-1644, CORE-1750, CORE-1757, CORE-1727, CORE-1740, CORE-1730, CORE-1777, CORE-1778, CORE-1782, CORE-1775, CORE-1771, CORE-1774, CORE-1759, CORE-1772, CORE-1861, CORE-1862, CORE-1863, CORE-1870, CORE-1869:Changes for CI/CD pipeline and integration tests. +* CORE-1661: Fixed issue where old boot properties file caused an error when attempting to install 4.0.0. +* CORE-1697, CORE-1814, CORE-1855: Upgrade fastify dependency to new major version 4. +* CORE-1629: Jobs are now running as processes managed by the PM2 daemon. +* CORE-1733: Update LICENSE to reflect our EULA on our site. +* CORE-1606: Enable Custom Functions by default. +* CORE-1714: Include pre-built binaries for most common platforms (darwin-arm64, darwin-x64, linux-arm64, linux-x64, win32-x64). +* CORE-1628: Fix issue where setting license through environment variable not working. +* CORE-1602, CORE-1760, CORE-1838, CORE-1839, CORE-1847, CORE-1773: HarperDB Docker container improvements. +* CORE-1706: Add support for encoding HTTP responses with MessagePack. +* CORE-1709: Improve the way lmdb.js dependencies are installed. +* CORE-1758: Remove/update unnecessary HTTP headers. +* CORE-1756: On `npm install` and `harperdb install` change the node version check from an error to a warning if the installed Node.js version does not match our preferred version. +* CORE-1791: Optimizations to authenticated user caching. +* CORE-1794: Update README to discuss Windows support & Node.js versions +* CORE-1837: Fix issue where Custom Function directory was not being created on install. +* CORE-1742: Add more validation to audit log - check schema/table exists and log is enabled. +* CORE-1768: Fix issue where when running in foreground HarperDB process is not stopping on `harperdb stop`. +* CORE-1864: Fix to semver checks on upgrade. +* CORE-1850: Fix issue where a `cluster_user` type role could not be altered. diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.1.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.1.md new file mode 100644 index 00000000..9e148e63 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.1.md @@ -0,0 +1,12 @@ +--- +title: 4.0.1 +sidebar_position: 59998 +--- + +### HarperDB 4.0.1, Tucker Release +01/20/2023 + +**Bug Fixes** + +* CORE-1992 Local studio was not loading because the path got mangled in the build. +* CORE-2001 Fixed deploy_custom_function_project after node update broke it. diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.2.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.2.md new file mode 100644 index 00000000..b65d1427 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.2.md @@ -0,0 +1,12 @@ +--- +title: 4.0.2 +sidebar_position: 59997 +--- + +### HarperDB 4.0.2, Tucker Release +01/24/2023 + +**Bug Fixes** + +* CORE-2003 Fix bug where if machine had one core thread config would default to zero. +* Update to lmdb 2.7.3 and msgpackr 1.7.0 diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.3.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.3.md new file mode 100644 index 00000000..67aaae56 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.3.md @@ -0,0 +1,11 @@ +--- +title: 4.0.3 +sidebar_position: 59996 +--- + +### HarperDB 4.0.3, Tucker Release +01/26/2023 + +**Bug Fixes** + +* CORE-2007 Add update nodes 4.0.0 launch script to build script to fix clustering upgrade. diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.4.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.4.md new file mode 100644 index 00000000..2a30c9d1 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.4.md @@ -0,0 +1,11 @@ +--- +title: 4.0.4 +sidebar_position: 59995 +--- + +### HarperDB 4.0.4, Tucker Release +01/27/2023 + +**Bug Fixes** + +* CORE-2009 Fixed bug where add node was not being called when upgrading clustering. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.5.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.5.md new file mode 100644 index 00000000..dc66721f --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.5.md @@ -0,0 +1,14 @@ +--- +title: 4.0.5 +sidebar_position: 59994 +--- + +### HarperDB 4.0.5, Tucker Release +02/15/2023 + +**Bug Fixes** + +* CORE-2029 Improved the upgrade process for handling existing user TLS certificates and correctly configuring TLS settings. Added a prompt to upgrade to determine if new certificates should be created or existing certificates should be kept/used. +* Fix the way NATS connections are honored in a local environment. +* Do not define the certificate authority path to NATS if it is not defined in the HarperDB config. + diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.6.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.6.md new file mode 100644 index 00000000..bf97d148 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.6.md @@ -0,0 +1,11 @@ +--- +title: 4.0.6 +sidebar_position: 59993 +--- + +### HarperDB 4.0.6, Tucker Release +03/09/2023 + +**Bug Fixes** + +* Fixed a data serialization error that occurs when a large number of different record structures are persisted in a single table. diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.7.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.7.md new file mode 100644 index 00000000..7d48666a --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.0.7.md @@ -0,0 +1,11 @@ +--- +title: 4.0.7 +sidebar_position: 59992 +--- + +### HarperDB 4.0.7, Tucker Release +03/10/2023 + +**Bug Fixes** + +* Update lmdb.js dependency \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.1.0.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.1.0.md new file mode 100644 index 00000000..eaa825a8 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.1.0.md @@ -0,0 +1,63 @@ +--- +title: 4.1.0 +sidebar_position: 59899 +--- + +# 4.1.0 + +HarperDB 4.1 introduces the ability to use worker threads for concurrently handling HTTP requests. Previously this was handled by processes. This shift provides important benefits in terms of better control of traffic delegation with support for optimized load tracking and session affinity, better debuggability, and reduced memory footprint. + +This means debugging will be much easier for custom functions. If you install/run HarperDB locally, most modern IDEs like WebStorm and VSCode support worker thread debugging, so you can start HarperDB in your IDE, and set breakpoints in your custom functions and debug them. + +The associated routing functionality now includes session affinity support. This can be used to consistently route users to the same thread which can improve caching locality, performance, and fairness. This can be enabled in with the [`http.sessionAffinity` option in your configuration](../../../deployments/configuration#http). + +HarperDB 4.1's NoSQL query handling has been revamped to consistently use iterators, which provide an extremely memory efficient mechanism for directly streaming query results to the network _as_ the query results are computed. This results in faster Time to First Byte (TTFB) (only the first record/value in a query needs to be computed before data can start to be sent), and less memory usage during querying (the entire query result does not need to be stored in memory). These iterators are also available in query results for custom functions and can provide means for custom function code to iteratively access data from the database without loading entire results. This should be a completely transparent upgrade, all HTTP APIs function the same, with the one exception that custom functions need to be aware that they can't access query results by `[index]` (they should use array methods or for-in loops to handle query results). + +4.1 includes configuration options for specifying the location of database storage files. This allows you to specifically locate database directories and files on different volumes for better flexibility and utilization of disks and storage volumes. See the [storage configuration](../../../../deployments/configuration#storage) and [schemas configuration](../../../../deployments/configuration#schemas) for information on how to configure these locations. + +Logging has been revamped and condensed into one `hdb.log` file. See [logginglogging for more information. + +A new operation called `cluster_network` was added, this operation will ping the cluster and return a list of enmeshed nodes. + +Custom Functions will no longer automatically load static file routes, instead the `@fastify/static` plugin will need to be registered with the Custom Function server. See [Host A Static Web UI-static](https:/docs.harperdb.io/docs/v/4.1/custom-functions/host-static). + +Updates to S3 import and export mean that these operations now require the bucket `region` in the request. Also, if referencing a nested object it should be done in the `key` parameter. See examples [here](../../../developers/operations-api/bulk-operations#import-from-s3). + +Due to the AWS SDK v2 reaching end of life support we have updated to v3. This has caused some breaking changes in our operations `import_from_s3` and `export_to_s3`: + +* A new attribute `region` will need to be supplied +* The `bucket` attribute can no longer have trailing slashes. Slashes will now need to be in the `key`. + +Starting HarperDB without any command (just `harperdb`) now runs HarperDB like a standard process, in the foreground. This means you can use standard unix tooling for interacting with the process and is conducive for running HarperDB with systemd or any other process management tool. If you wish to have HarperDB launch itself in separate background process (and immediately terminate the shell process), you can do so by running `harperdb start`. + +Internal Tickets completed: + +* CORE-609 - Ensure that attribute names are always added to global schema as Strings +* CORE-1549 - Remove fastify-static code from Custom Functions server which auto serves content from "static" folder +* CORE-1655 - Iterator based queries +* CORE-1764 - Fix issue where describe\_all operation returns an empty object for non super-users if schema(s) do not yet have table(s) +* CORE-1854 - Switch to using worker threads instead of processes for handling concurrency +* CORE-1877 - Extend the csv\_url\_load operation to allow for additional headers to be passed to the remote server when the csv is being downloaded +* CORE-1893 - Add last updated timestamp to describe operations +* CORE-1896 - Fix issue where Select \* from system.hdb\_info returns wrong HDB version number after Instance Upgrade +* CORE-1904 - Fix issue when executing GEOJSON query in SQL +* CORE-1905 - Add HarperDB YAML configuration setting which defines the storage location of NATS streams +* CORE-1906 - Add HarperDB YAML configuration setting defining the storage location of tables. +* CORE-1655 - Streaming binary format serialization +* CORE-1943 - Add configuration option to set mount point for audit tables +* CORE-1921 - Update NATS transaction lifecycle to handle message deduplication in work queue streams. +* CORE-1963 - Update logging for better readability, reduced duplication, and request context information. +* CORE-1968 - In server\nats\natsIngestService.js remove the js\_msg.working(); line to improve performance. +* CORE-1976 - Fix error when calling describe\_table operation with no schema or table defined in payload. +* CORE-1983 - Fix issue where create\_attribute operation does not validate request for required attributes +* CORE-2015 - Remove PM2 logs that get logged in console when starting HDB +* CORE-2048 - systemd script for 4.1 +* CORE-2052 - Include thread information in system\_information for visibility of threads +* CORE-2061 - Add a better error msg when clustering is enabled without a cluster user set +* CORE-2068 - Create new log rotate logic since pm2 log-rotate no longer used +* CORE-2072 - Update to Node 18.15.0 +* CORE-2090 - Upgrade Testing from v4.0.x and v3.x to v4.1. +* CORE-2091 - Run the performance tests +* CORE-2092 - Allow for automatic patch version updates of certain packages +* CORE-2109 - Add verify option to clustering TLS configuration +* CORE-2111 - Update AWS SDK to v3 diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.1.1.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.1.1.md new file mode 100644 index 00000000..537ef71c --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.1.1.md @@ -0,0 +1,15 @@ +--- +title: 4.1.1 +sidebar_position: 59898 +--- + +# 4.1.1 + +06/16/2023 + +* HarperDB uses improved logic for determining default heap limits and thread counts. When running in a restricted container and on NodeJS 18.15+, HarperDB will use the constrained memory limit to determine heap limits for each thread. In more memory constrained servers with many CPU cores, a reduced default thread count will be used to ensure that excessive memory is not used by many workers. You may still define your own thread count (with `http`/`threads`) in the [configuration](../../../deployments/configuration). +* An option has been added for [disabling the republishing NATS messages](../../../deployments/configuration), which can provide improved replication performance in a fully connected network. +* Improvements to our OpenShift container. +* Dependency security updates. +* **Bug Fixes** +* Fixed a bug in reporting database metrics in the `system_information` operation. diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.1.2.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.1.2.md new file mode 100644 index 00000000..2a62db64 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.1.2.md @@ -0,0 +1,13 @@ +--- +title: 4.1.2 +sidebar_position: 59897 +--- + +### HarperDB 4.1.2, Tucker Release +06/16/2023 + +* HarperDB has updated binary dependencies to support older glibc versions back 2.17. +* A new CLI command was added to get the current status of whether HarperDB is running and the cluster status. This is available with `harperdb status`. +* Improvements to our OpenShift container. +* Dependency security updates. + diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.0.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.0.md new file mode 100644 index 00000000..55bfe220 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.0.md @@ -0,0 +1,99 @@ +--- +title: 4.2.0 +sidebar_position: 59799 +--- + +# 4.2.0 + +#### HarperDB 4.2.0 + +HarperDB 4.2 introduces a new interface to accessing our core database engine with faster access, well-typed idiomatic JavaScript interfaces, ergonomic object mapping, and real-time data subscriptions. 4.2 also had adopted a new component architecture for building extensions to deliver customized external data sources, authentication, file handlers, content types, and more. These architectural upgrades lead to several key new HarperDB capabilities including a new REST interface, advanced caching, real-time messaging and publish/subscribe functionality through MQTT, WebSockets, and Server-Sent Events. + +4.2 also introduces configurable database schemas, using GraphQL Schema syntax. The new component structure is also configuration-driven, providing easy, low-code paths to building applications. [Check out our new getting starting guide](../../../getting-started) to see how easy it is to get started with HarperDB apps. + +### Resource API + +The [Resource API](../../reference/resource) is the new interface for accessing data in HarperDB. It utilizes a uniform interface for accessing data in HarperDB database/tables and is designed to easily be implemented or extended for defining customized application logic for table access or defining custom external data sources. This API has support for connecting resources together for caching and delivering data change and message notifications in real-time. The [Resource API documentation details this interface](../../reference/resource). + +### Component Architecture + +HarperDB's custom functions have evolved towards a [full component architecture](../../../developers/components); our internal functionality is defined as components, and this can be used in a modular way in conjunction with user components. These can all easily be configured and loaded through configuration files, and there is now a [well-defined interface for creating your own components. Components can easily be deployed/installed into HarperDB using [NPM and Github references as well. + +### Configurable Database Schemas + +HarperDB applications or components support [schema definitions using GraphQL schema syntax](../../../../developers/applications/defining-schemas). This makes it easy to define your table and attribute structure and gives you control over which attributes should be indexed and what types they should be. With schemas in configuration, these schemas can be bundled with an application and deployed together with application code. + +### REST Interface + +HarperDB 4.2 introduces a new REST interface for accessing data through best-practice HTTP APIs using intuitive paths and standards-based methods and headers that directly map to our Resource API. This new interface provides fast and easy access to data via queries through GET requests, modifications of data through PUTs, customized actions through POSTs and more. With standards-based header support built-in, this works seamlessly with external caches (including browser caches) for accelerated performance and reduced network transfers. + +### Real-Time + +HarperDB 4.2 now provides standard interfaces for subscribing to data changes and receiving notifications of changes and messages in real-time. Using these new real-time messaging capabilities with structured data provides a powerful integrated platform for both database style data updates and querying along with message delivery. [Real-time messaging](../../../../developers/real-time) of data is available through several protocols: + +#### MQTT + +4.2 now includes MQTT support which is a publish and subscribe messaging protocol, designed for efficiency (designed to be efficient enough for even small Internet of Things devices). This allows clients to connect to HarperDB and publish messages through our data center and subscribe to messages and data for real-time delivery. 4.2 implements support for QoS 0 and 1, along with durable sessions. + +#### WebSockets + +HarperDB now also supports WebSockets. This can be used as a transport for MQTT or as a connection for custom connection handling. + +#### Server-Sent Events + +HarperDB also includes support for Server-Sent Events. This is a very easy-to-use browser API that allows web sites/applications to connect to HarperDB and subscribe to data changes with minimal effort over standard HTTP. + +### Database Structure + +HarperDB databases contain a collection of tables, and these tables are now contained in a single transactionally-consistent database file. This means reads and writes can be performed transactionally and atomically across tables (as long as they are in the same database). Multi-table transactions are replicated as single atomic transactions as well. Audit logs are also maintained in the same database with atomic consistency as well. + +Databases are now entirely encapsulated in a file, which means they can be moved/copied to another database without requiring any separate metadata updates in the system tables. + +### Clone Node + +HarperDB includes new functionality for adding new HarperDB nodes in a cluster. New instances can be configured to clone from a leader node, performing and copying a database snapshot from a leader node, and self-configuring from the leader node as well, to facilitate accelerated deployment of new nodes for fast horizontal scaling to meet demand needs. [See the documentation on Clone Node for more information.](../../../../administration/cloning) + +### Operations API terminology updates + +Any operation that used the `schema` property was updated to make this property optional and alternately support `database` as the property for specifying the database (formerly 'schema'). If both `schema` and `database` are absent, operation defaults to using the `data` database. Term 'primary key' now used in place of 'hash'. noSQL operation `search_by_hash` updated to `search_by_id`. + +Support was added for defining a table with `primary_key` instead of `hash_attribute`. + +## Configuration + +There have been significant changes to `harperdb-config.yaml`, however none of these changes should affect pre-4.2 versions. If you upgrade to 4.2 any existing configuration should be backwards compatible and will not need to be updated. + +`harperdb-config.yaml` has had some configuration values added, removed, renamed and defaults changed. Please refer to [harperdb-config.yaml](../../../deployments/configuration) for the most current configuration parameters. + +* The `http` element has been expanded. + * `compressionThreshold` was added. + * All `customFunction` configuration now lives here, except for the `tls` section. +* `threads` has moved out of the `http` element and now is its own top level element. +* `authentication` section was moved out of the `operationsApi` section and is now its own top level element/section. +* `analytics.aggregatePeriod` was added. +* Default logging level was changed to `warn`. +* Default clustering log level was changed to `info`. +* `clustering.republishMessages` now defaults to `false`. +* `operationsApi.foreground` was removed. To start HarperDB in the foreground, from the CLI run `harperdb`. +* Made `operationsApi` configuration optional. Any config not defined here will default to the `http` section. +* Added a `securePort` parameter to `operationsApi` and `http` used for setting the https port. +* Added a new top level `tls` section. +* Removed `customFunctions.enabled`, `customFunctions.network.https`, `operationsApi.network.https` and `operationsApi.nodeEnv`. +* Added an element called `componentRoot` which replaces `customFunctions.root`. +* Updated custom pathing to use `databases` instead of `schemas`. +* Added `logging.auditAuthEvents.logFailed` and `logging.auditAuthEvents.logSuccessful` for enabling logging of auth events. +* A new `mqtt` section was added. + +### Socket Management + +HarperDB now uses socket sharing to distribute incoming connections to different threads (`SO_REUSEPORT`). This is considered to be the most performant mechanism available for multi-threaded socket handling. This does mean that we have deprecated session-affinity based socket delegation. + +HarperDB now also supports more flexible port configurations: application endpoints and WebSockets run on 9926 by default, but these can be separated, or application endpoints can be configured to run on the same port as the operations API for a single port configuration. + +### Sessions + +HarperDB now supports cookie-based sessions for authentication for web clients. This can be used with the standard authentication mechanisms to login, and then cookies can be used to preserve the authenticated session. This is generally a more secure way of maintaining authentication in browsers, without having to rely on storing credentials. + +### Dev Mode + +HarperDB can now directly run a HarperDB application from any location using `harperdb run /path/to/app` or `harperdb dev /path/to/app`. The latter starts in dev mode, with logging directly to the console, debugging enabled, and auto-restarting with any changes in your application files. Dev mode is recommended for local application and component development. diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.1.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.1.md new file mode 100644 index 00000000..38617ca9 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.1.md @@ -0,0 +1,13 @@ +--- +title: 4.2.1 +sidebar_position: 59798 +--- + +### HarperDB 4.2.1, Tucker Release +11/3/2023 + +* Downgrade NATS 2.10.3 back to 2.10.1 due to regression in connection handling. +* Handle package names with underscores. +* Improved validation of queries and comparators +* Avoid double replication on transactions with multiple commits +* Added file metadata on get_component_file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.2.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.2.md new file mode 100644 index 00000000..15768374 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.2.md @@ -0,0 +1,15 @@ +--- +title: 4.2.2 +sidebar_position: 59797 +--- + +### HarperDB 4.2.2, Tucker Release +11/8/2023 + +* Increase timeouts for NATS connections. +* Fix for database snapshots for backups (and for clone node). +* Fix application of permissions for default tables exposed through REST. +* Log replication failures with record information. +* Fix application of authorization/permissions for MQTT commands. +* Fix copying of local components in clone node. +* Fix calculation of overlapping start time in clone node. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.3.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.3.md new file mode 100644 index 00000000..dab25c3d --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.3.md @@ -0,0 +1,13 @@ +--- +title: 4.2.3 +sidebar_position: 59796 +--- + +### HarperDB 4.2.3, Tucker Release +11/15/2023 + +* When setting setting securePort, disable unsecure port setting on same port +* Fix `harperdb status` when pid file is missing +* Fix/include missing icons/fonts from local studio +* Fix crash that can occur when concurrently accessing records > 16KB +* Apply a lower heap limit to better ensure that memory leaks are quickly caught/mitigated \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.4.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.4.md new file mode 100644 index 00000000..87ee241d --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.4.md @@ -0,0 +1,10 @@ +--- +title: 4.2.4 +sidebar_position: 59795 +--- + +### HarperDB 4.2.4, Tucker Release +11/16/2023 + +* Prevent coercion of strings to numbers in SQL queries (in WHERE clause) +* Address fastify deprecation warning about accessing config \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.5.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.5.md new file mode 100644 index 00000000..1172c4b3 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.5.md @@ -0,0 +1,12 @@ +--- +title: 4.2.5 +sidebar_position: 59794 +--- + +### HarperDB 4.2.5, Tucker Release +11/22/2023 + +* Disable compression on server-sent events to ensure messages are immediately sent (not queued for later deliver) +* Update geoNear function to tolerate null values +* lmdb-js fix to ensure prefetched keys are pinned in memory until retrieved +* Add header to indicate start of a new authenticated session (for studio to identify authenticated sessions) diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.6.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.6.md new file mode 100644 index 00000000..d0a1f177 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.6.md @@ -0,0 +1,10 @@ +--- +title: 4.2.6 +sidebar_position: 59793 +--- + +### HarperDB 4.2.6, Tucker Release +11/29/2023 + +* Update various geo SQL functions to tolerate invalid values +* Properly report component installation/load errors in `get_components` (for studio to load components after an installation failure) \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.7.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.7.md new file mode 100644 index 00000000..78bfcaa7 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.7.md @@ -0,0 +1,11 @@ +--- +title: 4.2.7 +sidebar_position: 59792 +--- + +### HarperDB 4.2.7 +12/6/2023 + +* Add support for cloning over the top of an existing HarperDB instance +* Add health checks for NATS consumer with ability to restart consumer loops for better resiliency +* Revert Fastify autoload module due to a regression that had caused EcmaScript modules for Fastify route modules to fail to load on Windows \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.8.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.8.md new file mode 100644 index 00000000..fbe94b69 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.2.8.md @@ -0,0 +1,14 @@ +--- +title: 4.2.8 +sidebar_position: 59791 +--- + +### HarperDB 4.2.8 +12/19/2023 + +* Added support CLI command line arguments for clone node +* Added support for cloning a node without enabling clustering +* Clear NATS client cache on closed event +* Fix check for attribute permissions so that an empty attribute permissions array is treated as a table level permission definition +* Improve speed of cross-node health checks +* Fix for using `database` in describe operations diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.0.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.0.md new file mode 100644 index 00000000..f6aa2046 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.0.md @@ -0,0 +1,125 @@ +--- +title: 4.3.0 +sidebar_position: 59699 +--- + +# 4.3.0 + +#### HarperDB 4.3.0, Tucker Release + +3/19/2024 + +#### Relationships and Joins + +HarperDB now supports defining relationships between tables. These relationships can be defined as one-to-many, many-to-one, or many-to-many, and use a foreign key to record the relationship between records from different tables. An example of how to use this to define a many-to-one and one-to-many relationships between a product and brand table: + +```graphql +type Product @table { + id: ID @primaryKey + name: String @indexed + # foreign key used to reference a brand + brandId: ID @indexed + # many-to-one relationship to brand + brand: Related @relation(from: "brandId") +} +type Brand @table { + id: ID @primaryKey + name: String @indexed + # one-to-many relationship of brand to products of that brand + products: Product @relation(to: "brandId") +} +``` + +This relationships model can be used in queries and selects, which will automatically "join" the data from the tables. For example, you could search for products by brand name: + +```http +/Product?brand.name=Microsoft +``` + +HarperDB also now supports querying with a sort order. Multiple sort orders can be provided breaking ties. Nested select have also been added, which also utilizes joins when related records are referenced. For example: + +```http +/Product?brand.name=Microsoft&sort(price)&select(name,brand{name,size}) +``` + +See the [schema definition documentation](../../../../developers/applications/defining-schemas) for more information on defining relationships, and the [REST documentation for more information on queries](../../../../developers/rest). + +#### OpenAPI Specification + +A new default endpoint `GET /openapi` was added for describing endpoints configured through a GraphQL schema. + +#### Query Optimizations + +HarperDB has also made numerous improvements to query planning and execution for high performance query results with a broader range of queries. + +#### Indexing Nulls + +New tables and indexes now support indexing null values, enabling queries by null (as well as queries for non-null values). For example, you can query by nulls with the REST interface: + +```http +GET /Table/?attribute=null +``` + +Note, that existing indexes will remain without null value indexing, and can only support indexing/querying by nulls if they are rebuilt (removed and re-added). + +#### CLI Expansion + +The HarperDB now supports an expansive set of commands that execute operations from the operations API. For example, you can list users from the command line: + +```bash +harperdb list_users +``` + +#### BigInt Support + +HarperDB now supports `BigInt` attributes/values with integers (with full precision) up to 1000 bits (or 10^301). These can be used as primary keys or standard attributes, and can be used in queries or other operations. Within JSON documents, you can simply use standard JSON integer numbers with up to 300 digits, and large BigInt integers will be returned as standard JSON numbers. + +#### Local Studio Upgrade + +HarperDB has upgraded the local studio to match the same version that is offered on http:/studio.harperdb.io. The local studio now has the full robust feature set of the online version. + +### MQTT + +#### mTLS Support + +HarperDB now supports mTLS based authentication for HTTP, WebSockets, and MQTT. See the [configuration documentation for more information](../../../deployments/configuration). + +#### Single-Level Wildcards + +HarperDB's MQTT service now supports single-level wildcards (`+`), which facilitates a great range of subscriptions. + +#### Retain handling + +HarperDB's MQTT now supports the retain handling flags for subscriptions that are made using MQTT v5. + +#### CRDT + +HarperDB now supports basic conflict-free data type (CRDT) updates that allow properties to be individually updated and merged when separate properties are updated on different threads or nodes. Individual property CRDT updates are automatically performed when you update individual properties through the resource API. Individual property CRDT updates are used when making `PATCH` requests through the REST API. + +The CRDT functionality also supports explicit incrementation to merge multiple parallel incrementation requests with proper summing. See the [Resource API for more information](../../reference/resource). + +#### Configuration Improvements + +The configuration has improved support for detecting port conflicts, handling paths for fastify routes, and now includes support for specifying a heap limit and TLS ciphers. See the [configuration documentation for more information](../../../deployments/configuration). + +#### Balanced Audit Log Cleanup + +Audit log cleanup has been improved to reduce resource consumption during scheduled cleanups. + +#### `export_*` support for `search_by_conditions` + +The `export_local` and `export_to_s3` operations now support `search_by_conditions` as one of the allowed search operators. + +### Storage Performance Improvements + +Significant improvements were made to handling of free-space to decrease free-space fragmentation and improve performance of reusing free-space for new data. This includes prioritizing reuse of recently released free-space for more better memory/caching utilization. + +#### Compact Database + +In addition to storage improvements, HarperDB now includes functionality for [compacting a database](../../../deployments/harper-cli) (while offline), which can be used to eliminate all free-space to reset any fragmentation. + +#### Compression + +Compression is now enabled by default for all records over 4KB. + +To learn more on how to configure compression visit [configuration](https:/docs.harperdb.io/docs/v/4.3/deployments/configuration). diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.1.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.1.md new file mode 100644 index 00000000..e583d175 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.1.md @@ -0,0 +1,11 @@ +--- +title: 4.3.1 +sidebar_position: 59698 +--- + +### HarperDB 4.3.1 +3/25/2024 + +* Fix Fastify warning about responseTime usage +* Add access to the MQTT topic in the context +* Fix for ensuring local NATS streams are created diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.10.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.10.md new file mode 100644 index 00000000..bd286e90 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.10.md @@ -0,0 +1,12 @@ +--- +title: 4.3.10 +sidebar_position: 59689 +--- + +### HarperDB 4.3.10 +5/5/2024 + +* Provide a `data` property on the request/context with deserialized data from the request body for any request including methods that don't typically have a request body +* Ensure that CRDTs are not double applied after committing a transaction +* Delete MQTT will after publishing even if it fails to publish +* Improve transaction retry logic to use async non-optimistic transactions after multiple retries \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.11.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.11.md new file mode 100644 index 00000000..df2cc2fb --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.11.md @@ -0,0 +1,10 @@ +--- +title: 4.3.11 +sidebar_position: 59688 +--- + +### HarperDB 4.3.11 +5/15/2024 + +* Add support for multiple certificates with SNI-based selection of certificates for HTTPS/TLS +* Fix warning in Node v22 \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.12.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.12.md new file mode 100644 index 00000000..c4344da9 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.12.md @@ -0,0 +1,10 @@ +--- +title: 4.3.12 +sidebar_position: 59687 +--- + +### HarperDB 4.3.12 +5/16/2024 + +* Fix for handling ciphers in multiple certificates +* Allow each certificate config to have multiple hostnames \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.13.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.13.md new file mode 100644 index 00000000..7152f231 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.13.md @@ -0,0 +1,11 @@ +--- +title: 4.3.13 +sidebar_position: 59686 +--- + +### HarperDB 4.3.13 +5/22/2024 + +* Fix for handling HTTPS/TLS with IP address targets (no hostname) where SNI is not available +* Fix for memory leak when a node is down and consumers are trying to reconnect +* Faster cross-thread notification mechanism for transaction events \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.14.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.14.md new file mode 100644 index 00000000..8374b138 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.14.md @@ -0,0 +1,9 @@ +--- +title: 4.3.14 +sidebar_position: 59685 +--- + +### HarperDB 4.3.14 +5/24/2024 + +* Fix application of ciphers to multi-certificate TLS configuration \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.15.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.15.md new file mode 100644 index 00000000..5bbb2304 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.15.md @@ -0,0 +1,10 @@ +--- +title: 4.3.15 +sidebar_position: 59684 +--- + +### HarperDB 4.3.15 +5/29/2024 + +* Add support for wildcards in hostnames for SNI +* Properly apply ciphers settings on multiple TLS configurations \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.16.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.16.md new file mode 100644 index 00000000..b3b198d8 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.16.md @@ -0,0 +1,10 @@ +--- +title: 4.3.16 +sidebar_position: 59683 +--- + +### HarperDB 4.3.16 +6/3/2024 + +* Properly shim legacy TLS configuration with new multi-certificate support +* Show the changed filenames when an application is reloaded \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.17.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.17.md new file mode 100644 index 00000000..6cebb30b --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.17.md @@ -0,0 +1,14 @@ +--- +title: 4.3.17 +sidebar_position: 59682 +--- + +### HarperDB 4.3.17 +6/13/2024 + +* Add MQTT analytics of incoming messages and separate by QoS level +* Ensure that any installed `harperdb` package in components is relinked to running harperdb. +* Upgrade storage to more efficiently avoid storage increases +* Fix to improve database metrics in system_information +* Fix for pathing on Windows with extension modules +* Add ability to define a range of listening threads \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.18.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.18.md new file mode 100644 index 00000000..7de1ca2d --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.18.md @@ -0,0 +1,9 @@ +--- +title: 4.3.18 +sidebar_position: 59681 +--- + +### HarperDB 4.3.18 +6/18/2024 + +* Immediately terminate an MQTT connection when there is a keep-alive timeout. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.19.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.19.md new file mode 100644 index 00000000..ed2782da --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.19.md @@ -0,0 +1,11 @@ +--- +title: 4.3.19 +sidebar_position: 59680 +--- + +### HarperDB 4.3.19 +7/2/2024 + +* Properly return records for the existing value for subscriptions used for retained messages, so they are correctly serialized. +* Ensure that deploy components empty the target directory for a clean installation and expansion of a `package` sub-directory. +* Ensure that we do not double load components that are referenced by symlink from node_modules and in components directory. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.2.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.2.md new file mode 100644 index 00000000..7a967e98 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.2.md @@ -0,0 +1,15 @@ +--- +title: 4.3.2 +sidebar_position: 59697 +--- + +### HarperDB 4.3.2 +3/29/2024 + +* Clone node updates to individually clone missing parts +* Fixes for publishing OpenShift container +* Increase purge stream timeout +* Fixed declaration of analytics schema so queries work before a restart +* Fix for iterating queries when deleted records exist +* LMDB stability upgrade +* Fix for cleanup of last will in MQTT \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.20.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.20.md new file mode 100644 index 00000000..68a18912 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.20.md @@ -0,0 +1,17 @@ +--- +title: 4.3.20 +sidebar_position: 59679 +--- + +### HarperDB 4.3.20 +7/11/2024 + +* The restart_service operation is now executed as a job, making it possible to track the progress of a restart (which is performed as a rolling restart of threads) +* Disable Nagle's algorithm for TCP connections to improve performance +* Append Server-Timing header if a fastify route has already added one +* Avoid symlinking the harperdb directory to itself +* Fix for deleting an empty database +* Upgrade ws and pm2 packages for security vulnerabilities +* Improved TypeScript definitions for Resource and Context. +* The context of a source can set `noCacheStore` to avoid caching the results of a retrieval from source +* Better error reporting of MQTT parsing errors and termination of connections for compliance diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.21.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.21.md new file mode 100644 index 00000000..b8c22de5 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.21.md @@ -0,0 +1,13 @@ +--- +title: 4.3.21 +sidebar_position: 59678 +--- + +### HarperDB 4.3.21 +8/21/2024 + +* Fixed an issue with iterating/serializing query results with a `limit`. +* Fixed an issue that was preventing the caching of structured records in memory. +* Fixed and added several TypeScript exported types including `tables`, `databases`, `Query`, and `Context`. +* Fixed logging warnings about license limits after a license is updated. +* Don't register a certificate as the default certificate for non-SNI connections unless it lists an IP address in the SAN field. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.22.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.22.md new file mode 100644 index 00000000..92f1da33 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.22.md @@ -0,0 +1,14 @@ +--- +title: 4.3.22 +sidebar_position: 59677 +--- + +### HarperDB 4.3.22 +9/6/2024 + +* Adding improved back-pressure handling for large subscriptions and backlogs with durable MQTT sessions +* Allow .extension in URL paths to indicate both preferred encoding and decoding +* Added support for multi-part ids in query parameters +* Limit describe calls by time before using statistical sampling +* Proper cleanup of a transaction when it is aborted due to running out of available read transactions +* Updates to release/builds \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.23.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.23.md new file mode 100644 index 00000000..8dd47c25 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.23.md @@ -0,0 +1,11 @@ +--- +title: 4.3.23 +sidebar_position: 59676 +--- + +### HarperDB 4.3.23 +9/12/2024 + +* Avoid long-running read transactions on subscription catch-ups +* Reverted change to setting default certificate for IP address only +* Better handling of last-will messages on startup \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.24.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.24.md new file mode 100644 index 00000000..ef4933ea --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.24.md @@ -0,0 +1,9 @@ +--- +title: 4.3.24 +sidebar_position: 59675 +--- + +### HarperDB 4.3.24 +9/12/2024 + +* Fix for querying for large strings (over 255 characters) \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.25.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.25.md new file mode 100644 index 00000000..387a2588 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.25.md @@ -0,0 +1,12 @@ +--- +title: 4.3.25 +sidebar_position: 59674 +--- + +### HarperDB 4.3.25 +9/24/2024 + +* Add analytics for replication latency +* Fix iteration issue over asynchronous joined queries +* Local studio fix for loading applications in insecure context (HTTP) +* Local studio fix for loading configuration tab \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.26.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.26.md new file mode 100644 index 00000000..d910120c --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.26.md @@ -0,0 +1,10 @@ +--- +title: 4.3.26 +sidebar_position: 59673 +--- + +### HarperDB 4.3.26 +9/27/2024 + +* Fixed a security issue that allowed users to bypass access controls with the operations API +* Previously expiration handling was limited to tables with a source, but now it can be applied to any table \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.27.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.27.md new file mode 100644 index 00000000..ca8352d3 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.27.md @@ -0,0 +1,13 @@ +--- +title: 4.3.27 +sidebar_position: 59672 +--- + +### HarperDB 4.3.27 +10/2/2024 + +* Fixed handling HTTP upgrade with Connection header that does not use Upgrade as the sole value (for Firefox) +* Added metrics for requests by status code +* Properly remove attributes from the stored metadata when removed from GraphQL schema +* Fixed a regression in clustering retrieval of schema description +* Fix attribute validation/handling to ensure that sequential ids can be assigned with insert/upsert operations \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.28.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.28.md new file mode 100644 index 00000000..fdba3828 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.28.md @@ -0,0 +1,11 @@ +--- +title: 4.3.28 +sidebar_position: 59671 +--- + +### HarperDB 4.3.28 +10/3/2024 + +* Tolerate user with no role when building NATS config +* Change metrics for requests by status code to be prefixed with "response_" +* Log error `cause`, and other properties, when available. diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.29.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.29.md new file mode 100644 index 00000000..c1f533fd --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.29.md @@ -0,0 +1,16 @@ +--- +title: 4.3.29 +sidebar_position: 59670 +--- + +### HarperDB 4.3.29 +10/7/2024 + +* Avoid unnecessary cookie session creation without explicit login +* Added support for caching directives in operations API +* Fixed issue with creating metadata for table with no primary key +* Local studio upgrade: + * Added support for "cache only" mode to view table data without origin resolution + * Added partial support for cookie-based authentication + * Added support for browsing tables with no primary key + * Improved performance for sorting tables diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.3.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.3.md new file mode 100644 index 00000000..52d7ebde --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.3.md @@ -0,0 +1,9 @@ +--- +title: 4.3.3 +sidebar_position: 59696 +--- + +### HarperDB 4.3.3 +4/01/2024 + +* Improve MQTT logging by properly logging auth failures, logging disconnections diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.30.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.30.md new file mode 100644 index 00000000..70c10852 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.30.md @@ -0,0 +1,9 @@ +--- +title: 4.3.30 +sidebar_position: 59669 +--- + +### HarperDB 4.3.30 +10/9/2024 + +* Properly assign transaction timestamp to writes from cache resolutions (ensuring that latencies can be calculated on replicating nodes) diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.31.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.31.md new file mode 100644 index 00000000..097726ac --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.31.md @@ -0,0 +1,11 @@ +--- +title: 4.3.31 +sidebar_position: 59668 +--- + +### HarperDB 4.3.31 +10/10/2024 + +* Reset the restart limit for manual restarts to ensure that NATS process will continue to restart after more than 10 manual restarts +* Only apply caching directives (from headers) to tables/resources that are configured to be caching, sourced from another resource +* Catch/tolerate errors on serializing objects for logging diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.32.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.32.md new file mode 100644 index 00000000..ee5da648 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.32.md @@ -0,0 +1,11 @@ +--- +title: 4.3.32 +sidebar_position: 59667 +--- + +### HarperDB 4.3.32 +10/16/2024 + +* Fix a memory leak when cluster_network closes a hub connection +* Improved MQTT error handling, with less verbose logging of more common errors, and treat a missing subscription as an invalid/missing topic +* Record analytics and server-timing header even when cache resolution fails diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.33.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.33.md new file mode 100644 index 00000000..271373ef --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.33.md @@ -0,0 +1,9 @@ +--- +title: 4.3.33 +sidebar_position: 59666 +--- + +### HarperDB 4.3.33 +10/24/2024 + +* Change the default maximum length for a fastify route parameter from 100 to 1000 characters. diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.34.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.34.md new file mode 100644 index 00000000..1071c273 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.34.md @@ -0,0 +1,9 @@ +--- +title: 4.3.34 +sidebar_position: 59665 +--- + +### HarperDB 4.3.34 +10/24/2024 + +* lmdb-js upgrade diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.35.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.35.md new file mode 100644 index 00000000..1811732b --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.35.md @@ -0,0 +1,10 @@ +--- +title: 4.3.35 +sidebar_position: 59664 +--- + +### HarperDB 4.3.35 +11/12/2024 + +* Upgrades for supporting Node.js V23 +* Fix for handling a change in the schema for nested data structures diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.36.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.36.md new file mode 100644 index 00000000..b2db5bd7 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.36.md @@ -0,0 +1,9 @@ +--- +title: 4.3.36 +sidebar_position: 59663 +--- + +### HarperDB 4.3.36 +11/14/2024 + +* lmdb-js upgrade for better free-space management diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.37.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.37.md new file mode 100644 index 00000000..57e23f5d --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.37.md @@ -0,0 +1,9 @@ +--- +title: 4.3.37 +sidebar_position: 59662 +--- + +### HarperDB 4.3.37 +12/6/2024 + +* lmdb-js upgrade for preventing crashes with shared user buffers diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.38.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.38.md new file mode 100644 index 00000000..640f3620 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.38.md @@ -0,0 +1,9 @@ +--- +title: 4.3.38 +sidebar_position: 59661 +--- + +### HarperDB 4.3.38 +1/10/2025 + +* Fixes for audit log cleanup diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.4.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.4.md new file mode 100644 index 00000000..f50f1bb6 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.4.md @@ -0,0 +1,10 @@ +--- +title: 4.3.4 +sidebar_position: 59695 +--- + +### HarperDB 4.3.4 +4/9/2024 + +* Fixed a buffer overrun issue with decompressing compressed data +* Better keep-alive of transactions with long running queries \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.5.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.5.md new file mode 100644 index 00000000..40d030e5 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.5.md @@ -0,0 +1,9 @@ +--- +title: 4.3.5 +sidebar_position: 59694 +--- + +### HarperDB 4.3.5 +4/10/2024 + +* Fixed a buffer overrun issue with decompressing compressed data \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.6.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.6.md new file mode 100644 index 00000000..92b28286 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.6.md @@ -0,0 +1,13 @@ +--- +title: 4.3.6 +sidebar_position: 59693 +--- + +### HarperDB 4.3.6 +4/12/2024 + +* Fixed parsing of dates from epoch millisecond times in queries +* Fixed CRDT incrementation of different data types +* Adjustments to text/plain content type q-value handling +* Fixed parsing of passwords with a colon +* Added MQTT events for connections, authorization, and disconnections \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.7.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.7.md new file mode 100644 index 00000000..8f45995a --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.7.md @@ -0,0 +1,13 @@ +--- +title: 4.3.7 +sidebar_position: 59692 +--- + +### HarperDB 4.3.7 +4/16/2024 + +* Fixed transaction handling to stay on open on long compaction operations +* Fixed handling of sorting on non-indexed attributes +* Storage stability improvements +* Fixed authentication/authorization of WebSockets connection and use of cookies +* Fixes for clone node operations \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.8.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.8.md new file mode 100644 index 00000000..cd0fe88e --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.8.md @@ -0,0 +1,13 @@ +--- +title: 4.3.8 +sidebar_position: 59691 +--- + +### HarperDB 4.3.8 +4/26/2024 + +* Added support for the MQTT keep-alive feature (disconnecting if no control messages are received within keep-alive window) +* Improved handling of write queue timeouts, with configurability +* Fixed a memory leak that can occur with NATS reconnections after heartbeat misses +* Fixed a bug in clone node with a null port +* Add error events to MQTT events system \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.9.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.9.md new file mode 100644 index 00000000..dca6a92f --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.3.9.md @@ -0,0 +1,9 @@ +--- +title: 4.3.9 +sidebar_position: 59690 +--- + +### HarperDB 4.3.9 +4/30/2024 + +* lmdb-js upgrade \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.0.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.0.md new file mode 100644 index 00000000..f4e0da94 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.0.md @@ -0,0 +1,60 @@ +--- +title: 4.4.0 +sidebar_position: 59599 +--- + +# 4.4.0 + +#### HarperDB 4.4.0 + +10/14/2024 + +### Native Replication + +HarperDB has a completely [new native replication system](../../../developers/replication/) which is faster, more efficient, secure, and reliable than the previous replication system. The new system (codenamed "Plexus") uses direct WebSocket connections between servers with highly optimized encoding and is driven by direct tracking audit/transaction log for efficient and flexible data transfer. This replication has improved resilience with the ability to reach consensus consistency when one node goes down through cross-node catch-up. Network connections can be performed over the existing operations API port or a separate port, for improved configurability. + +The native replication system is much easier to configure, with multiple options for authentication and security, including PKI/mTLS security that is highly robust and easy to use in conjunction with existing PKI certificates. Replication can be configured through explicit subscriptions or for automated replication of all data in a database. With automated replication, gossiping is used to automatically discover and connect to other nodes in the cluster. + +#### Sharding + +The new replication system also includes provisional support for [sharding](../../../developers/replication/sharding). This sharding mechanism paves the way for greater scalability and performance, by allow data to be distributed across multiple nodes. + +#### Replicated Operations + +Certain operations can now be replicated across the cluster, including the deployment and management of components. This allows for a more seamless experience when managing a cluster of HarperDB instances. Restarts can also be "replicated", and if used, will perform a rolling restart of all the nodes in a cluster. + +### Computed Properties + +Computed properties allow applications to define properties that are computed from other properties, allowing for composite properties that are calculated from other data stored in records without requiring actual storage of the computed value. For example, you could have a computed property for a full name based on first and last, or age/duration based on a date. Computed properties are also foundational for custom indexes. See the [schema documentation ](../../../../developers/applications/defining-schemas), [Resource API](../../reference/resource), and our blog post on [computed properties](https:/www.harperdb.io/development/tutorials/how-to-create-custom-indexes-with-computed-properties) for more information. + +### Custom Indexing + +Custom indexes can now be defined using computed properties to allow for unlimited possibilities of indexing, including composite, full-text indexing, vector indexing. Again, see the [schema documentation](../../../../developers/applications/defining-schemas) for more information. + +### Native Graph Support + +HarperDB now includes provisional support for native [GraphQL querying functionality](../../reference/graphql). This allows for querying of graph data using GraphQL syntax. This is provisional and some APIs may be updated in the future. + +### Dynamic Certificate Management + +Certificates are now stored in system tables and can be dynamically managed. Certificates can be added, replaced, and deleted without restarting HarperDB. This includes both standard certificates and certificate authorities, as well as private keys (private keys are not stored in table, they securely stored in a file). + +#### Status Report on Startup + +On startup, HarperDB will now print out an informative status of all running services and ports they are listening on. + +#### Support for Response object + +Resource methods can now return a `Response` object (or an object with `headers` and `status`) to allow for more control over the response. + +### Auto-incrementing Primary Keys + +Primary keys can now be auto-incrementing, allowing for automatic generation of numeric primary keys on insert/creation. Primary keys defined with `ID` or `String` will continue to use GUIDs for auto-assigned primary keys, which occurs on insert or creation if the primary key is not provided. However, for keys that are defined as `Any`, `Int`, or `Long`, the primary key will be assigned using auto-incrementation. This is significantly more efficient than GUIDs since the key only requires 8 bytes of storage instead of 31 bytes, and doesn't require random number generation. + +#### Developer/Production Mode for Configuration + +When using interactive installation (when configuration is not provided through arguments or env vars), HarperDB now provides an option for developer or production mode with a set of default configuration for each mode better suited for developer or production environments. + +**Export by Protocol** + +Exported resources can be configured to be specifically exported by protocol (REST, MQTT, etc.) for more granular control over what is exported where. diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.1.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.1.md new file mode 100644 index 00000000..80fac940 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.1.md @@ -0,0 +1,12 @@ +--- +title: 4.4.1 +sidebar_position: 59598 +--- + +### HarperDB 4.4.1 +10/17/2024 + +* Fix issue where non-RSA keys were not being parsed correctly on startup. +* Fix a memory leak when cluster_network closes a hub connection +* Improved MQTT error handling, with less verbose logging of more common errors, and treat a missing subscription as an invalid/missing topic +* Record analytics and server-timing header even when cache resolution fails \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.10.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.10.md new file mode 100644 index 00000000..328a694a --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.10.md @@ -0,0 +1,9 @@ +--- +title: 4.4.10 +sidebar_position: 59589 +--- + +### HarperDB 4.4.10 +12/17/2024 + +* Fix for deploying packages and detecting node_modules directory \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.11.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.11.md new file mode 100644 index 00000000..6f5d7215 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.11.md @@ -0,0 +1,10 @@ +--- +title: 4.4.11 +sidebar_position: 59588 +--- + +### HarperDB 4.4.11 +12/18/2024 + +* Fix for initial certification creation on upgrade +* Docker build fix \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.12.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.12.md new file mode 100644 index 00000000..82c09692 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.12.md @@ -0,0 +1,10 @@ +--- +title: 4.4.12 +sidebar_position: 59587 +--- + +### HarperDB 4.4.12 +12/19/2024 + +* Move components installed by reference into hdb/components for consistency and compatibility with next.js +* Use npm install --force to ensure modules are installed \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.13.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.13.md new file mode 100644 index 00000000..681fc21d --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.13.md @@ -0,0 +1,15 @@ +--- +title: 4.4.13 +sidebar_position: 59586 +--- + +### HarperDB 4.4.13 +1/2/2025 + +* Fix for not using requestCert if the port doesn't need replication +* Fix for applying timeouts HTTP server for ancient node versions +* Updates for different replication configuration settings, including sharding and replication using stored credentials +* Mitigation crashing due GC'ed shared array buffers +* Fix for error handling with CLI failures +* Updated dependencies +* Fix for allow securePort to be set on authentication \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.14.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.14.md new file mode 100644 index 00000000..48103afe --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.14.md @@ -0,0 +1,12 @@ +--- +title: 4.4.14 +sidebar_position: 59585 +--- + +### HarperDB 4.4.14 +1/3/2025 + +* Fix for starting HTTP server if headersTimeout is omitted in the configuration +* Fix for avoiding ping timeouts for large/long-duration WS messages between nodes +* Don't report errors for component that only uses a directory +* Add flag for disabling WebSocket on REST component \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.15.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.15.md new file mode 100644 index 00000000..ec4ac263 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.15.md @@ -0,0 +1,11 @@ +--- +title: 4.4.15 +sidebar_position: 59584 +--- + +### HarperDB 4.4.15 +1/8/2025 + +* Fix for manage the state of replication sequences for node +* Fix for better concurrency with ongoing replication +* Fix for accessing audit log entries \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.16.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.16.md new file mode 100644 index 00000000..3e90a9b1 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.16.md @@ -0,0 +1,15 @@ +--- +title: 4.4.16 +sidebar_position: 59583 +--- + +### HarperDB 4.4.16 +1/22/2025 + +* Fix for cleaning up old audit entries and associated deletion entries +* Allow CLI operations to be run when cloning is enabled +* Report table size in describe operations +* Fix for cleaning up symlinks when dropping components +* Fix for enumerating components when symlinks are used +* Add an option for using a specific installation command with deploys +* Add an API for registering an HTTP upgrade listener with `server.upgrade` \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.17.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.17.md new file mode 100644 index 00000000..788b9810 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.17.md @@ -0,0 +1,12 @@ +--- +title: 4.4.17 +sidebar_position: 59582 +--- + +### HarperDB 4.4.17 +1/29/2025 + +* Provide statistics on the size of the audit log store +* Fix handling of symlinks to HarperDB package that to avoid NPM's errors in restricted containers +* Add option for rolling/consecutive restarts for deployments +* Fix for enabling root CAs for replication authorization \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.18.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.18.md new file mode 100644 index 00000000..cf341732 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.18.md @@ -0,0 +1,11 @@ +--- +title: 4.4.18 +sidebar_position: 59581 +--- + +### HarperDB 4.4.18 +1/29/2025 + +* Add option for disabling full table copy in replication +* Add option for startTime in route configuration +* Add/fix option to deploy with package from CLI \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.19.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.19.md new file mode 100644 index 00000000..53d42bb8 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.19.md @@ -0,0 +1,12 @@ +--- +title: 4.4.19 +sidebar_position: 59580 +--- + +### HarperDB 4.4.19 +2/4/2025 + +* LMDB upgrade for free-list verification on commit +* Add check to avoid compacting database multiple times with compactOnStart +* Fix handling of denied/absent subscription +* Add support for including symlinked directories in packaging a deployed component \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.2.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.2.md new file mode 100644 index 00000000..6137d48a --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.2.md @@ -0,0 +1,9 @@ +--- +title: 4.4.2 +sidebar_position: 59597 +--- + +### HarperDB 4.4.2 +10/18/2024 + +* Republish of 4.4.1 with Git merge correction. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.20.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.20.md new file mode 100644 index 00000000..845129ca --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.20.md @@ -0,0 +1,9 @@ +--- +title: 4.4.20 +sidebar_position: 59579 +--- + +### HarperDB 4.4.20 +2/11/2025 + +* LMDB upgrade for improved handling of page boundaries with free-space lists diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.21.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.21.md new file mode 100644 index 00000000..74d653bc --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.21.md @@ -0,0 +1,11 @@ +--- +title: 4.4.21 +sidebar_position: 59578 +--- + +### HarperDB 4.4.21 +2/25/2025 + +* Fix for saving audit log entries for large keys (> 1KB) +* Security fix for handling missing passwords +* Skip bin links for NPM installation to avoid access issues \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.22.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.22.md new file mode 100644 index 00000000..85ae1895 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.22.md @@ -0,0 +1,9 @@ +--- +title: 4.4.22 +sidebar_position: 59577 +--- + +### HarperDB 4.4.22 +3/5/2025 + +* Add new http configuration option `corsAccessControlAllowHeaders` \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.23.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.23.md new file mode 100644 index 00000000..42e37e0c --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.23.md @@ -0,0 +1,10 @@ +--- +title: 4.4.23 +sidebar_position: 59576 +--- + +### HarperDB 4.4.23 +3/7/2025 + +* Fix for subscriptions to children of segmented id +* Fix for better error reporting on NPM failures \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.24.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.24.md new file mode 100644 index 00000000..dbdf7972 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.24.md @@ -0,0 +1,10 @@ +--- +title: 4.4.24 +sidebar_position: 59575 +--- + +### HarperDB 4.4.24 +3/10/2025 + +* Use process.exit(0) to restart when enabled by env var +* Reset the cwd on thread restart \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.3.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.3.md new file mode 100644 index 00000000..e91428c4 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.3.md @@ -0,0 +1,13 @@ +--- +title: 4.4.3 +sidebar_position: 59596 +--- + +### HarperDB 4.4.3 +10/25/2024 + +* Fix for notification of records through classes that override get for multi-tier caching +* Fix for CLI operations +* Support for longer route parameters in Fastify routes +* Fix for accessing `harperdb` package/module from user threads +* Improvements to clone node for cloning without credentials \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.4.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.4.md new file mode 100644 index 00000000..8e6a0c48 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.4.md @@ -0,0 +1,11 @@ +--- +title: 4.4.4 +sidebar_position: 59595 +--- + +### HarperDB 4.4.4 +11/4/2024 + +* Re-introduce declarative roles and permissions +* Fix for OpenAPI endpoint +* Fix for exports of `harperdb` package/module \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.5.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.5.md new file mode 100644 index 00000000..f075ea02 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.5.md @@ -0,0 +1,15 @@ +--- +title: 4.4.5 +sidebar_position: 59594 +--- + +### HarperDB 4.4.5 +11/15/2024 + +* Fix for DOS vulnerability in large headers with cache-control and replication headers +* Fix for handling a change in the schema type for sub-fields in a nested object +* Add support for content type handlers to return iterators +* Fix for session management with custom authentication handler +* Updates for Node.js V23 compatibility +* Fix for sorting on nested properties +* Fix for querying on not_equal to a null with object values \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.6.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.6.md new file mode 100644 index 00000000..2d4b17b6 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.6.md @@ -0,0 +1,12 @@ +--- +title: 4.4.6 +sidebar_position: 59593 +--- + +### HarperDB 4.4.6 +11/25/2024 + +* Fix queries with only sorting applied +* Fix for handling invalidation events propagating through sources +* Expanded CLI support for deploying packages +* Support for deploying large packages \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.7.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.7.md new file mode 100644 index 00000000..e1723090 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.7.md @@ -0,0 +1,10 @@ +--- +title: 4.4.7 +sidebar_position: 59592 +--- + +### HarperDB 4.4.7 +11/27/2024 + +* Allow for package to deploy own modules +* Fix for preventing double sourcing of resources \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.8.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.8.md new file mode 100644 index 00000000..3bb02964 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.8.md @@ -0,0 +1,9 @@ +--- +title: 4.4.8 +sidebar_position: 59591 +--- + +### HarperDB 4.4.8 +12/2/2024 + +* Add multiple node versions of published docker containers \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.9.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.9.md new file mode 100644 index 00000000..fa576ba9 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.4.9.md @@ -0,0 +1,13 @@ +--- +title: 4.4.9 +sidebar_position: 59590 +--- + +### HarperDB 4.4.9 +12/12/2024 + +* Change enableRootCAs to default to true +* Fixes for install and clone commands +* Add rejectUnauthorized to the CLI options +* Fixes for cloning +* Install modules in own component when deploying package by payload \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.0.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.0.md new file mode 100644 index 00000000..8c1818fc --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.0.md @@ -0,0 +1,78 @@ +--- +title: 4.5.0 +sidebar_position: 59499 +--- + +# 4.5.0 + +#### HarperDB 4.5.0 + +3/13/2025 + +### Blob Storage +4.5 introduces a new [Blob storage system](../../reference/blob), that is designed to efficiently handle large binary objects, with built-in support for streaming large content/media in and out of storage. This provides significantly better performance and functionality for large unstructured data, such as HTML, images, video, and other large files. Components can leverage this functionality through the JavaScript `Blob` interface, and the new `createBlob` function. Blobs are fully replicated and integrated. Harper can also coerce strings to `Blob`s (when dictated by the field type), making it feasible to use blobs for large string data, including with MQTT messaging. + +### Password Hashing Upgrade +4.5 adds two new password hashing algorithms for better security (to replace md5): +`sha256`: This is a solid general purpose of password hashing, with good security properties and excellent performance. This is the default algorithm in 4.5. +`argon2id`: This provides the highest level of security, and is the recommended algorithm that do not require frequent password verifications. However, it is more CPU intensive, and may not be suitable for environments with a high frequency of password verifications. + +### Resource and Storage Analytics +4.5 includes numerous new analytics for resources and storage, including page faults, context switches, free space, disk usage, and other metrics. + +#### Default Replication Port +The default port for replication has been changed from 9925 to 9933. + +### Property Forwarding +Accessing record properties from resource instances should be accessible through standard property access syntax, regardless of whether the property was declared in a schema. Previously only properties declared in a schema were accessible through standard property access syntax. This change allows for more consistent and intuitive access to record properties, regardless of how they were defined. It is still recommended to declare properties in a schema for better performance and documentation. + +### Storage Reclamation +Harper now includes functionality for automatically trying to clean up and evict non-essential data when storage is running low. When free space drops below 40% (configurable), Harper will start to: +* Evict older entries from caching tables +* Evict older audit log entries +* Remove older rotated logs files +These efforts will become progressively more aggressive as free space decreases. + +### Expanded Sharding Functionality +When sharding is being used, Harper can now honor write requests with residency information that will not be written to the local node's table. Harper also now allows nodes to be declaratively configured as part of a shard. + +### Certificate Revocation +Certificates can now be revoked by configuring nodes with a list of revoked certificate serial numbers. + +### Built-in `loadEnv` Component +There is a [new `loadEnv` component loader](../../../developers/components/built-in) that can be used to load environmental variables from a .env in a component. + +### Cluster Status Information +The [`cluster_status` operation](../../../developers/operations-api/clustering) now includes new statistics for replication, including the timestamps of last received transactions, sent transactions, and committed transactions. + +### Improved URL path parsing +Resources can be defined with nested paths and directly accessed by the exact path without requiring a trailing slash. The `id.property` syntax for accessing properties in URLs will only be applied to properties that are declared in a schema. This allows for URLs to generally include dots in paths without being interpreted as property access. A new [`directURLMapping` option/flag](../../../deployments/configuration) on resources that allows for more direct URL path handling as well. + +### `server.authenticateUser` API +In addition to the `server.getUser` API that allows for retrieval of users by username, the `server.authenticateUser` API is now available which will _always_ verify the user by the provided password. + +#### Improved Message Delivery +Performance of delivery of messages has been improved. + +### HTTP/2 +HarperDB now supports HTTP/2 for all API endpoints. This can be enabled with the `http2` option in the configuration file. + +### `harperdb` symlink +Using `import from 'harperdb'` will more consistently work when directly running a component locally. + +### Transaction Reuse +By default, transactions can now be reused after calling `transaction.commit()`. + +### GraphQL configuration +The GraphQL query endpoint can be configured to listen on different ports. GraphQL query endpoing is now also disabled by default, to avoid any conflicts. + +### Glob support for components +Glob file handling for specifying files used by components has been improved for better consistency. + +### Table.getRecordCount +`Table.getRecordCount()` is now available to get the number of records in a table. + +### Removal of record counts from REST API +Previously the root path for a resource in the REST API would return a record count. However, this is a significant performance hazard and was never documented to exist, so this has been removed to ensure better performance and reliability. + +Note that downgrading from 4.5 to 4.4 is *not* supported. \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.1.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.1.md new file mode 100644 index 00000000..a9aaf906 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.1.md @@ -0,0 +1,15 @@ +--- +title: 4.5.1 +sidebar_position: 59498 +--- + +### HarperDB 4.5.1 +3/18/2025 + +* Fix/implementation for sharding data that is written for cache resolution +* Add support for replication.shard in configuration for defining local node's shard id +* Fix for source map handling in stack traces +* Improved error reporting for syntax errors in component code +* Improved logging on deployment and NPM installation +* Added shard information to cluster_status +* Fix for audit entry eviction when a table is deleted \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.10.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.10.md new file mode 100644 index 00000000..7d981ff2 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.10.md @@ -0,0 +1,10 @@ +--- +title: 4.5.10 +sidebar_position: 59489 +--- + +### HarperDB 4.5.10 +5/20/2025 + +* Expose the `resources` map for being able to set and access custom resources +* Fix for cleaning up blob files that are used when a database is deleted \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.11.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.11.md new file mode 100644 index 00000000..cba2d019 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.11.md @@ -0,0 +1,10 @@ +--- +title: 4.5.11 +sidebar_position: 59488 +--- + +### HarperDB 4.5.11 +6/27/2025 + +* Fix bug (workaround Node.js bug) with assigning the ciphers to a server and applying to TLS connections +* Fix for handling TLS array when checking certificates configuration \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.12.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.12.md new file mode 100644 index 00000000..6353bfc2 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.12.md @@ -0,0 +1,13 @@ +--- +title: 4.5.12 +sidebar_position: 59487 +--- + +### HarperDB 4.5.12 +7/9/2025 + +- Fix for dynamically setting `harperdb` package symlink on deploy +- Assign shard numbers from each node's config rather than from routes +- Handle certificates without a common name, falling back to the SANs +- Properly clean up blobs that are only transiently used for replication +- Ensure that we always set up server.shards even when there are no TLS connections diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.13.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.13.md new file mode 100644 index 00000000..2b8a6149 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.13.md @@ -0,0 +1,9 @@ +--- +title: 4.5.13 +sidebar_position: 59486 +--- + +### HarperDB 4.5.13 +7/12/2025 + +- Fix cleaning out audit entries when a blob has been removed diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.14.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.14.md new file mode 100644 index 00000000..0ad8f235 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.14.md @@ -0,0 +1,9 @@ +--- +title: 4.5.14 +sidebar_position: 59485 +--- + +### HarperDB 4.5.14 +7/15/2025 + +- Use proper back-pressure when copying a table for initial database sync diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.15.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.15.md new file mode 100644 index 00000000..2387680d --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.15.md @@ -0,0 +1,10 @@ +--- +title: 4.5.15 +sidebar_position: 59484 +--- + +### HarperDB 4.5.15 +7/21/2025 + +- Removed the `copyTablesToCatchUp` option and instead utilized the clone node designation of the leader node to copy tables +- Ensure that skipping large number of audit entries does not lock up the thread and cause a connection reset diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.16.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.16.md new file mode 100644 index 00000000..6874cce9 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.16.md @@ -0,0 +1,9 @@ +--- +title: 4.5.16 +sidebar_position: 59483 +--- + +### HarperDB 4.5.16 +7/30/2025 + +- Do not free/remove the shared user buffer that is used by all threads as an atomic counter for ids (for blobs and incremented ids), but retain it as a stable allocated buffer diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.2.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.2.md new file mode 100644 index 00000000..34fbe309 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.2.md @@ -0,0 +1,12 @@ +--- +title: 4.5.2 +sidebar_position: 59497 +--- + +### HarperDB 4.5.2 +3/25/2025 + +* For defined schemas, don't allow updates from remote nodes that could cause conflicts and repeated schema change requests +* New harper-chrome docker container for accessing Chrome binaries for use with tools like Puppeteer +* Improved rolling restart handling of errors with reaching individual nodes +* Defined cleaner operation object to avoid accident leaking of credentials with logging \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.3.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.3.md new file mode 100644 index 00000000..b2a9313e --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.3.md @@ -0,0 +1,10 @@ +--- +title: 4.5.3 +sidebar_position: 59496 +--- + +### HarperDB 4.5.3 +4/3/2025 + +* Fix for immediately reloading updated certificates and private key files to ensure that certificates properly match the private key +* Fix for analytics of storage size when tables are deleted \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.4.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.4.md new file mode 100644 index 00000000..e4344e01 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.4.md @@ -0,0 +1,11 @@ +--- +title: 4.5.4 +sidebar_position: 59495 +--- + +### HarperDB 4.5.4 +4/11/2025 + +* Fix for replication of (non-retained) published messages +* Make cookie domain be configurable to allow for cookies shared across sub-hostnames +* Fix for on-demand loading of shared blobs \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.5.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.5.md new file mode 100644 index 00000000..9baf3300 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.5.md @@ -0,0 +1,10 @@ +--- +title: 4.5.5 +sidebar_position: 59494 +--- + +### HarperDB 4.5.5 +4/15/2025 + +* Updates for better messaging with symlinks in Windows +* Fix for saving replicated blobs \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.6.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.6.md new file mode 100644 index 00000000..b4d304e8 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.6.md @@ -0,0 +1,11 @@ +--- +title: 4.5.6 +sidebar_position: 59493 +--- + +### HarperDB 4.5.6 +4/17/2025 + +* Fix for changing the type of the primary key attribute +* Added a new `includeExpensiveRecordCountEstimates` property to the REST component for returning record count estimates +* Fix for dropping attributes \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.7.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.7.md new file mode 100644 index 00000000..011ab48e --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.7.md @@ -0,0 +1,10 @@ +--- +title: 4.5.7 +sidebar_position: 59492 +--- + +### HarperDB 4.5.7 +4/23/2025 + +* Fix for handling buffers from replicated sharded blob records to prevent overwriting while using +* Updated included studio version for fix for logging in \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.8.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.8.md new file mode 100644 index 00000000..b6cabf4e --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.8.md @@ -0,0 +1,12 @@ +--- +title: 4.5.8 +sidebar_position: 59491 +--- + +### HarperDB 4.5.8 +4/30/2025 + +* Fix MQTT subscription topics with trailing slashes to ensure they are not treated as a wildcard +* Fix the arguments that are used for the default connect/subscribe calls so they pass the second argument from connect like `connect(incomingMessages, query) -> subscribe(query)` +* Add support for replication connections using any configured certificate authorities to verify the server certificates +* Added more descriptive error messages on errors in user residency functions \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.9.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.9.md new file mode 100644 index 00000000..a4741506 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/4.5.9.md @@ -0,0 +1,9 @@ +--- +title: 4.5.9 +sidebar_position: 59490 +--- + +### HarperDB 4.5.9 +5/14/2025 + +* Remove --no-bin-links directive for NPM that was causing installs of dependencies to fail \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/_category_.json b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/_category_.json new file mode 100644 index 00000000..9a7bca50 --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Tucker (Version 4)", + "position": -4 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/index.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/index.md new file mode 100644 index 00000000..7d10beac --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/index.md @@ -0,0 +1,44 @@ +--- +title: Harper Tucker (Version 4) +--- + +# Harper Tucker (Version 4) + +HarperDB version 4 ([Tucker release](./tucker)) represents major step forward in database technology. This release line has ground-breaking architectural advancements including: + +## [4.5](./4.5.0) +* Blob Storage - 4.5 introduces a new [Blob storage system](../../reference/blob). +* Password Hashing Upgrade - two new password hashing algorithms for better security (to replace md5). +* New resource and storage Analytics + +## [4.4](./4.4.0) + +* Native replication (codename "Plexus") which is faster, more efficient, secure, and reliable than the previous replication system and provides provisional sharding capabilities with a foundation for the future +* Computed properties that allow applications to define properties that are computed from other properties, allowing for composite properties that are calculated from other data stored in records without requiring actual storage of the computed value +* Custom indexing including composite, full-text indexing, and vector indexing + +## [4.3](./4.3.0) + +* Relationships, joins, and broad new querying capabilities for complex and nested conditions, sorting, joining, and selecting with significant query optimizations +* More advanced transaction support for CRDTs and storage of large integers (with BigInt) +* Better management with new upgraded local studio and new CLI features + +## [4.2](./4.2.0) + +* New component architecture and Resource API for advanced, robust custom database application development +* Real-time capabilites through MQTT, WebSockets, and Server-Sent Events +* REST interface for intuitive, fast, and standards-compliant HTTP interaction +* Native caching capabilities for high-performance cache scenarios +* Clone node functionality + +## [4.1](./4.1.0) + +* New streaming iterators mechanism that allows query results to be delivered to clients _while_ querying results are being processed, for incredibly fast time-to-first-byte and concurrent processing/delivery +* New thread-based concurrency model for more efficient resource usage + +## [4.0](./4.0.0) + +* New clustering technology that delivers robust, resilient and high-performance replication +* Major storage improvements with highly-efficient adaptive-structure modified MessagePack format, with on-demand deserialization capabilities + +Did you know our release names are dedicated to employee pups? For our fourth release, [meet Tucker!](./tucker) diff --git a/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/tucker.md b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/tucker.md new file mode 100644 index 00000000..e890cf6c --- /dev/null +++ b/site/versioned_docs/version-4.5/technical-details/release-notes/v4-tucker/tucker.md @@ -0,0 +1,11 @@ +--- +title: Harper Tucker (Version 4) +--- + +# Harper Tucker (Version 4) + +Did you know our release names are dedicated to employee pups? For our fourth release, we have Tucker. + +![picture of grey and white dog](/img/v4.5/dogs/tucker.png) + +_G’day, I’m Tucker. My dad is David Cockerill, a software engineer here at Harper. I am a 3-year-old Labrador Husky mix. I love to protect my dad from all the squirrels and rabbits we have in our yard. I have very ticklish feet and love belly rubs!_ diff --git a/site/versioned_docs/version-4.6/administration/_category_.json b/site/versioned_docs/version-4.6/administration/_category_.json new file mode 100644 index 00000000..828e0998 --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Administration", + "position": 2, + "link": { + "type": "generated-index", + "title": "Administration Documentation", + "description": "Guides for managing and administering HarperDB instances", + "keywords": [ + "administration" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.6/administration/administration.md b/site/versioned_docs/version-4.6/administration/administration.md new file mode 100644 index 00000000..0c2c9d99 --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/administration.md @@ -0,0 +1,32 @@ +--- +title: Best Practices and Recommendations +--- + +# Best Practices and Recommendations + +Harper is designed for minimal administrative effort, and with managed services these are handled for you. But there are important things to consider for managing your own Harper servers. + +### Data Protection and (Backup and) Recovery + +As a distributed database, data protection and recovery can benefit from different data protection strategies than a traditional single-server database. But multiple aspects of data protection and recovery should be considered: + +- Availability: As a distributed database Harper is intrinsically built for high-availability and a cluster will continue to run even with complete server(s) failure. This is the first and primary defense for protecting against any downtime or data loss. Harper provides fast horizontal scaling functionality with node cloning, which facilitates ease of establishing high availability clusters. +- [Audit log](./logging/audit-logging): Harper defaults to tracking data changes so malicious data changes can be found, attributed, and reverted. This provides security-level defense against data loss, allowing for fine-grained isolation and reversion of individual data without the large-scale reversion/loss of data associated with point-in-time recovery approaches. +- Snapshots: When used as a source-of-truth database for crucial data, we recommend using snapshot tools to regularly snapshot databases as a final backup/defense against data loss (this should only be used as a last resort in recovery). Harper has a [`get_backup`](../developers/operations-api/databases-and-tables#get-backup) operation, which provides direct support for making and retrieving database snapshots. An HTTP request can be used to get a snapshot. Alternatively, volume snapshot tools can be used to snapshot data at the OS/VM level. Harper can also provide scripts for replaying transaction logs from snapshots to facilitate point-in-time recovery when necessary (often customization may be preferred in certain recovery situations to minimize data loss). + +### Horizontal Scaling with Node Cloning + +Harper provides rapid horizontal scaling capabilities through [node cloning functionality described here](./cloning). + +### Monitoring + +Harper provides robust capabilities for analytics and observability to facilitate effective and informative monitoring: + +- Analytics provides statistics on usage, request counts, load, memory usage with historical tracking. The analytics data can be [accessed through querying](../technical-details/reference/analytics). +- A large variety of real-time statistics about load, system information, database metrics, thread usage can be retrieved through the [`system_information` API](../developers/operations-api/system-operations). +- Information about the current cluster configuration and status can be found in the [cluster APIs](../developers/operations-api/clustering). +- Analytics and system information can easily be exported to Prometheus with our [Prometheus exporter component](https:/github.com/HarperDB-Add-Ons/prometheus_exporter), making it easy visualize and monitor Harper with Graphana. + +### Replication Transaction Logging + +Harper utilizes NATS for replication, which maintains a transaction log. See the [transaction log documentation for information on how to query this log](./logging/transaction-logging). diff --git a/site/versioned_docs/version-4.6/administration/cloning.md b/site/versioned_docs/version-4.6/administration/cloning.md new file mode 100644 index 00000000..dcea866a --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/cloning.md @@ -0,0 +1,153 @@ +--- +title: Clone Node +--- + +# Clone Node + +Clone node is a configurable node script that when pointed to another instance of Harper will create a clone of that +instance's config, databases and setup full replication. If it is run in a location where there is no existing Harper install, +it will, along with cloning, install Harper. If it is run in a location where there is another Harper instance, it will +only clone config, databases and replication that do not already exist. + +Clone node is triggered when Harper is installed or started with certain environment or command line (CLI) variables set (see below). + +**Leader node** - the instance of Harper you are cloning.\ +**Clone node** - the new node which will be a clone of the leader node. + +To start clone run `harperdb` in the CLI with either of the following variables set: + +#### Environment variables + +- `HDB_LEADER_URL` - The URL of the leader node's operation API (usually port 9925). +- `HDB_LEADER_USERNAME` - The leader node admin username. +- `HDB_LEADER_PASSWORD` - The leader node admin password. +- `REPLICATION_HOSTNAME` - _(optional)_ The clones replication hostname. This value will be added to `replication.hostname` on the clone node. If this value is not set, replication will not be set up between the leader and clone. + +For example: + +``` +HDB_LEADER_URL=https:/node-1.my-domain.com:9925 REPLICATION_HOSTNAME=node-1.my-domain.com HDB_LEADER_USERNAME=... HDB_LEADER_PASSWORD=... harperdb +``` + +#### Command line variables + +- `--HDB_LEADER_URL` - The URL of the leader node's operation API (usually port 9925). +- `--HDB_LEADER_USERNAME` - The leader node admin username. +- `--HDB_LEADER_PASSWORD` - The leader node admin password. +- `--REPLICATION_HOSTNAME` - _(optional)_ The clones clustering host. This value will be added to `replication.hostname` on the clone node. If this value is not set, replication will not be set up between the leader and clone. + +For example: + +``` +harperdb --HDB_LEADER_URL https:/node-1.my-domain.com:9925 --REPLICATION_HOSTNAME node-1.my-domain.com --HDB_LEADER_USERNAME ... --HDB_LEADER_PASSWORD ... +``` + +Each time clone is run it will set a value `cloned: true` in `harperdb-config.yaml`. This value will prevent clone from +running again. If you want to run clone again set this value to `false`. If Harper is started with the clone variables +still present and `cloned` is true, Harper will just start as normal. + +Clone node does not require any additional configuration apart from the variables referenced above. +However, if you wish to set any configuration during clone this can be done by passing the config as environment/CLI +variables or cloning overtop of an existing `harperdb-config.yaml` file. + +More can be found in the Harper config documentation [here](../deployments/configuration). + +### Excluding database and components + +To set any specific (optional) clone config, including the exclusion of any database and/or replication, there is a file +called `clone-node-config.yaml` that can be used. + +The file must be located in the `ROOTPATH` directory of your clone (the `hdb` directory where you clone will be installed. +If the directory does not exist, create one and add the file to it). + +The config available in `clone-node-config.yaml` is: + +```yaml +databaseConfig: + excludeDatabases: + - database: null + excludeTables: + - database: null + table: null +componentConfig: + exclude: + - name: null +``` + +_Note: only include the configuration that you are using. If no clone config file is provided nothing will be excluded, +unless it already exists on the clone._ + +`databaseConfig` - Set any databases or tables that you wish to exclude from cloning. + +`componentConfig` - Set any components that you do not want cloned. Clone node will not clone the component code, +it will only clone the component reference that exists in the leader harperdb-config file. + +### Cloning configuration + +Clone node will not clone any configuration that is classed as unique to the leader node. This includes `replication.hostname`, `replication.url`,`clustering.nodeName`, +`rootPath` and any other path related values, for example `storage.path`, `logging.root`, `componentsRoot`, +any authentication certificate/key paths. + +### Cloning system database + +Harper uses a database called `system` to store operational information. Clone node will only clone the user and role +tables from this database. It will also set up replication on this table, which means that any existing and future user and roles +that are added will be replicated throughout the cluster. + +Cloning the user and role tables means that once clone node is complete, the clone will share the same login credentials with +the leader. + +### Replication + +If clone is run with the `REPLICATION_HOSTNAME` variable set, a fully replicating clone will be created. + +If any databases are excluded from the clone, replication will not be set up on these databases. + +### JWT Keys + +If cloning with replication, the leader's JWT private and public keys will be cloned. To disable this, include `CLONE_KEYS=false` in your clone variables. + +### Cloning overtop of an existing Harper instance + +Clone node will not overwrite any existing config, database or replication. It will write/clone any config database or replication +that does not exist on the node it is running on. + +An example of how this can be useful is if you want to set Harper config before the clone is created. To do this you +would create a harperdb-config.yaml file in your local `hdb` root directory with the config you wish to set. Then +when clone is run it will append the missing config to the file and install Harper with the desired config. + +Another useful example could be retroactively adding another database to an existing instance. Running clone on +an existing instance could create a full clone of another database and set up replication between the database on the +leader and the clone. + +### Cloning steps + +Clone node will execute the following steps when ran: + +1. Look for an existing Harper install. It does this by using the default (or user provided) `ROOTPATH`. +1. If an existing instance is found it will check for a `harperdb-config.yaml` file and search for the `cloned` value. If the value exists and is `true` clone will skip the clone logic and start Harper. +1. Clone harperdb-config.yaml values that don't already exist (excluding values unique to the leader node). +1. Fully clone any databases that don't already exist. +1. If classed as a "fresh clone", install Harper. An instance is classed as a fresh clone if there is no system database. +1. If `REPLICATION_HOSTNAME` is set, set up replication between the leader and clone. +1. Clone is complete, start Harper. + +### Cloning with Docker + +To run clone inside a container add the environment variables to your run command. + +For example: + +``` +docker run -d \ + -v :/home/harperdb/hdb \ + -e HDB_LEADER_PASSWORD=password \ + -e HDB_LEADER_USERNAME=admin \ + -e HDB_LEADER_URL=https:/1.123.45.6:9925 \ + -e REPLICATION_HOSTNAME=1.123.45.6 \ + -p 9925:9925 \ + -p 9926:9926 \ + harperdb/harperdb +``` + +Clone will only run once, when you first start the container. If the container restarts the environment variables will be ignored. diff --git a/site/versioned_docs/version-4.6/administration/compact.md b/site/versioned_docs/version-4.6/administration/compact.md new file mode 100644 index 00000000..1a71db14 --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/compact.md @@ -0,0 +1,60 @@ +--- +title: Compact +--- + +# Compact + +Database files can grow quickly as you use them, sometimes impeding performance. Harper has multiple compact features that can be used to reduce database file size and potentially improve performance. The compact process does not compress your data, it instead makes your database file smaller by eliminating free-space and fragmentation. + +There are two options that Harper offers for compacting a Database. + +_Note: Some of the storage configuration (such as compression) cannot be updated on existing databases, this is where the following options are useful. They will create a new compressed copy of the database with any updated configuration._ + +More information on the storage configuration options can be [found here](../deployments/configuration#storage) + +### Copy compaction + +It is recommended that, to prevent any record loss, Harper is not running when performing this operation. + +This will copy a Harper database with compaction. If you wish to use this new database in place of the original, you will need to move/rename it to the path of the original database. + +This command should be run in the [CLI](../deployments/harper-cli) + +```bash +harperdb copy-db +``` + +For example, to copy the default database: + +```bash +harperdb copy-db data /home/user/hdb/database/copy.mdb +``` + +### Compact on start + +Compact on start is a more automated option that will compact **all** databases when Harper is started. Harper will not start until compact is complete. Under the hood it loops through all non-system databases, creates a backup of each one and calls copy-db. After the copy/compaction is complete it will move the new database to where the original one is located and remove any backups. + +Compact on start is initiated by config in `harperdb-config.yaml` + +_Note: Compact on start will switch `compactOnStart` to `false` after it has run_ + +`compactOnStart` - _Type_: boolean; _Default_: false + +`compactOnStartKeepBackup` - _Type_: boolean; _Default_: false + +```yaml +storage: + compactOnStart: true + compactOnStartKeepBackup: false +``` + +Using CLI variables + +```bash +--STORAGE_COMPACTONSTART true --STORAGE_COMPACTONSTARTKEEPBACKUP true +``` + +```bash +STORAGE_COMPACTONSTART=true +STORAGE_COMPACTONSTARTKEEPBACKUP=true +``` diff --git a/site/versioned_docs/version-4.6/administration/harper-studio/create-account.md b/site/versioned_docs/version-4.6/administration/harper-studio/create-account.md new file mode 100644 index 00000000..73eb9d97 --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/harper-studio/create-account.md @@ -0,0 +1,27 @@ +--- +title: Create a Studio Account +--- + +# Create a Studio Account + +Start at the [Harper Studio sign up page](https:/studio.harperdb.io/sign-up). + +1. Provide the following information: + - First Name + - Last Name + - Email Address + - Subdomain + + _Part of the URL that will be used to identify your Harper Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com._ + + - Coupon Code (optional) + +1. Review the Privacy Policy and Terms of Service. +1. Click the sign up for free button. +1. You will be taken to a new screen to add an account password. Enter your password. + _Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character._ +1. Click the add account password button. + +You will receive a Studio welcome email confirming your registration. + +Note: Your email address will be used as your username and cannot be changed. diff --git a/site/versioned_docs/version-4.6/administration/harper-studio/enable-mixed-content.md b/site/versioned_docs/version-4.6/administration/harper-studio/enable-mixed-content.md new file mode 100644 index 00000000..40d9877d --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/harper-studio/enable-mixed-content.md @@ -0,0 +1,9 @@ +--- +title: Enable Mixed Content +--- + +# Enable Mixed Content + +Enabling mixed content is required in cases where you would like to connect the Harper Studio to Harper Instances via HTTP. This should not be used for production systems, but may be convenient for development and testing purposes. Doing so will allow your browser to reach HTTP traffic, which is considered insecure, through an HTTPS site like the Studio. + +A comprehensive guide is provided by Adobe [here](https:/experienceleague.adobe.com/docs/target/using/experiences/vec/troubleshoot-composer/mixed-content.html). diff --git a/site/versioned_docs/version-4.6/administration/harper-studio/index.md b/site/versioned_docs/version-4.6/administration/harper-studio/index.md new file mode 100644 index 00000000..011c5923 --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/harper-studio/index.md @@ -0,0 +1,21 @@ +--- +title: Harper Studio +--- + +# Harper Studio + +Harper Studio is the web-based GUI for Harper. Studio enables you to administer, navigate, and monitor all of your Harper instances in a simple, user-friendly interface without any knowledge of the underlying Harper API. It’s free to sign up, get started today! + +[Sign up for free!](https:/studio.harperdb.io/sign-up) + +Harper now includes a simplified local Studio that is packaged with all Harper installations and served directly from the instance. It can be enabled in the [configuration file](../../deployments/configuration#localstudio). This section is dedicated to the hosted Studio accessed at [studio.harperdb.io](https:/studio.harperdb.io). + +--- + +## How does Studio Work? + +While Harper Studio is web based and hosted by us, all database interactions are performed on the Harper instance the studio is connected to. The Harper Studio loads in your browser, at which point you login to your Harper instances. Credentials are stored in your browser cache and are not transmitted back to Harper. All database interactions are made via the Harper Operations API directly from your browser to your instance. + +## What type of instances can I manage? + +Harper Studio enables users to manage both Harper Cloud instances and privately hosted instances all from a single UI. All Harper instances feature identical behavior whether they are hosted by us or by you. diff --git a/site/versioned_docs/version-4.6/administration/harper-studio/instance-configuration.md b/site/versioned_docs/version-4.6/administration/harper-studio/instance-configuration.md new file mode 100644 index 00000000..1c3dd2d5 --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/harper-studio/instance-configuration.md @@ -0,0 +1,108 @@ +--- +title: Instance Configuration +--- + +# Instance Configuration + +Harper instance configuration can be viewed and managed directly through the Harper Studio. Harper Cloud instances can be resized in two different ways via this page, either by modifying machine RAM or by increasing drive storage. Enterprise instances can have their licenses modified by modifying licensed RAM. + +All instance configuration is handled through the **config** page of the Harper Studio, accessed with the following instructions: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. + +1. Click the appropriate organization that the instance belongs to. + +1. Select your desired instance. + +1. Click config in the instance control bar. + +_Note, the **config** page will only be available to super users and certain items are restricted to Studio organization owners._ + +## Instance Overview + +The **instance overview** panel displays the following instance specifications: + +- Instance URL + +- Applications URL + +- Instance Node Name (for clustering) + +- Instance API Auth Header (this user) + + _The Basic authentication header used for the logged in Harper database user_ + +- Created Date (Harper Cloud only) + +- Region (Harper Cloud only) + + _The geographic region where the instance is hosted._ + +- Total Price + +- RAM + +- Storage (Harper Cloud only) + +- Disk IOPS (Harper Cloud only) + +## Update Instance RAM + +Harper Cloud instance size and Enterprise instance licenses can be modified with the following instructions. This option is only available to Studio organization owners. + +Note: For Harper Cloud instances, upgrading RAM may add additional CPUs to your instance as well. Click here to see how many CPUs are provisioned for each instance size. + +1. In the **update ram** panel at the bottom left: + - Select the new instance size. + - If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. + - If you do have a credit card associated, you will be presented with the updated billing information. + - Click **Upgrade**. + +1. The instance will shut down and begin reprovisioning/relicensing itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. + +1. Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. + +_Note, if Harper Cloud instance reprovisioning takes longer than 20 minutes, please submit a support ticket here: https:/harperdbhelp.zendesk.com/hc/en-us/requests/new._ + +## Update Instance Storage + +The Harper Cloud instance storage size can be increased with the following instructions. This option is only available to Studio organization owners. + +Note: Instance storage can only be upgraded once every 6 hours. + +1. In the **update storage** panel at the bottom left: + - Select the new instance storage size. + - If you do not have a credit card associated with your account, an **Add Credit Card To Account** button will appear. Click that to be taken to the billing screen where you can enter your credit card information before returning to the **config** tab to proceed with the upgrade. + - If you do have a credit card associated, you will be presented with the updated billing information. + - Click **Upgrade**. + +1. The instance will shut down and begin reprovisioning itself. The instance will not be available during this time. You will be returned to the instance dashboard and the instance status will show UPDATING INSTANCE. +1. Once your instance upgrade is complete, it will appear on the instance dashboard as status OK with your newly selected instance size. + +_Note, if this process takes longer than 20 minutes, please submit a support ticket here: https:/harperdbhelp.zendesk.com/hc/en-us/requests/new._ + +## Remove Instance + +The Harper instance can be deleted/removed from the Studio with the following instructions. Once this operation is started it cannot be undone. This option is only available to Studio organization owners. + +1. In the **remove instance** panel at the bottom left: + - Enter the instance name in the text box. + - The Studio will present you with a warning. + - Click **Remove**. + +1. The instance will begin deleting immediately. + +## Restart Instance + +The Harper Cloud instance can be restarted with the following instructions. + +1. In the **restart instance** panel at the bottom right: + - Enter the instance name in the text box. + - The Studio will present you with a warning. + - Click **Restart**. + +1. The instance will begin restarting immediately. + +## Instance Config (Read Only) + +A JSON preview of the instance config is available for reference at the bottom of the page. This is a read only visual and is not editable via the Studio. To make changes to the instance config, review the [configuration file documentation](../../deployments/configuration#using-the-configuration-file-and-naming-conventions). diff --git a/site/versioned_docs/version-4.6/administration/harper-studio/instance-metrics.md b/site/versioned_docs/version-4.6/administration/harper-studio/instance-metrics.md new file mode 100644 index 00000000..eae954f1 --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/harper-studio/instance-metrics.md @@ -0,0 +1,16 @@ +--- +title: Instance Metrics +--- + +# Instance Metrics + +The Harper Studio display instance status and metrics on the instance status page, which can be accessed with the following instructions: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **status** in the instance control bar. + +Once on the instance browse page you can view host system information, [Harper logs](../logging/standard-logging), and Harper Cloud alarms (if it is a cloud instance). + +_Note, the **status** page will only be available to super users._ diff --git a/site/versioned_docs/version-4.6/administration/harper-studio/instances.md b/site/versioned_docs/version-4.6/administration/harper-studio/instances.md new file mode 100644 index 00000000..f79f577f --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/harper-studio/instances.md @@ -0,0 +1,146 @@ +--- +title: Instances +--- + +# Instances + +The Harper Studio allows you to administer all of your HarperDinstances in one place. Harper currently offers the following instance types: + +- **Harper Cloud Instance** Managed installations of Harper, what we call [Harper Cloud](../../deployments/harper-cloud/). +- **5G Wavelength Instance** Managed installations of Harper running on the Verizon network through AWS Wavelength, what we call 5G Wavelength Instances. _Note, these instances are only accessible via the Verizon network._ +- **Enterprise Instance** Any Harper installation that is managed by you. These include instances hosted within your cloud provider accounts (for example, from the AWS or Digital Ocean Marketplaces), privately hosted instances, or instances installed locally. + +All interactions between the Studio and your instances take place directly from your browser. Harper stores metadata about your instances, which enables the Studio to display these instances when you log in. Beyond that, all traffic is routed from your browser to the Harper instances using the standard [Harper API](../../developers/operations-api/). + +## Organization Instance List + +A summary view of all instances within an organization can be viewed by clicking on the appropriate organization from the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. Each instance gets their own card. Harper Cloud and Enterprise instances are listed together. + +## Create a New Instance + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization for the instance to be created under. +1. Click the **Create New Harper Cloud Instance + Register Enterprise Instance** card. +1. Select your desired Instance Type. +1. For a Harper Cloud Instance or a Harper 5G Wavelength Instance, click **Create Harper Cloud Instance**. + 1. Fill out Instance Info. + 1. Enter Instance Name + + _This will be used to build your instance URL. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com. The Instance URL will be previewed below._ + + 1. Enter Instance Username + + _This is the username of the initial Harper instance super user._ + + 1. Enter Instance Password + + _This is the password of the initial Harper instance super user._ + + 1. Click **Instance Details** to move to the next page. + 1. Select Instance Specs + 1. Select Instance RAM + + _Harper Cloud Instances are billed based on Instance RAM, this will select the size of your provisioned instance._ _More on instance specs__._ + + 1. Select Storage Size + + _Each instance has a mounted storage volume where your Harper data will reside. Storage is provisioned based on space and IOPS._ _More on IOPS Impact on Performance__._ + + 1. Select Instance Region + + _The geographic area where your instance will be provisioned._ + + 1. Click **Confirm Instance Details** to move to the next page. + 1. Review your Instance Details, if there is an error, use the back button to correct it. + 1. Review the [Privacy Policy](https:/harperdb.io/legal/privacy-policy/) and [Terms of Service](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. + 1. Click **Add Instance**. + 1. Your Harper Cloud instance will be provisioned in the background. Provisioning typically takes 5-15 minutes. You will receive an email notification when your instance is ready. + +## Register Enterprise Instance + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization for the instance to be created under. +1. Click the **Create New Harper Cloud Instance + Register Enterprise Instance** card. +1. Select **Register Enterprise Instance**. + 1. Fill out Instance Info. + 1. Enter Instance Name + + _This is used for descriptive purposes only._ + + 1. Enter Instance Username + + _The username of a Harper super user that is already configured in your Harper installation._ + + 1. Enter Instance Password + + _The password of a Harper super user that is already configured in your Harper installation._ + + 1. Enter Host + + _The host to access the Harper instance. For example, `harperdb.myhost.com` or `localhost`._ + + 1. Enter Port + + _The port to access the Harper instance. Harper defaults `9925` for HTTP and `31283` for HTTPS._ + + 1. Select SSL + + _If your instance is running over SSL, select the SSL checkbox. If not, you will need to enable mixed content in your browser to allow the HTTPS Studio to access the HTTP instance. If there are issues connecting to the instance, the Studio will display a red error message._ + + 1. Click **Instance Details** to move to the next page. + 1. Select Instance Specs + 1. Select Instance RAM + + _Harper instances are billed based on Instance RAM. Selecting additional RAM will enable the ability for faster and more complex queries._ + + 1. Click **Confirm Instance Details** to move to the next page. + 1. Review your Instance Details, if there is an error, use the back button to correct it. + 1. Review the [Privacy Policy](https:/harperdb.io/legal/privacy-policy/) and [Terms of Service](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/), if you agree, click the **I agree** radio button to confirm. + 1. Click **Add Instance**. + 1. The Harper Studio will register your instance and restart it for the registration to take effect. Your instance will be immediately available after this is complete. + +## Delete an Instance + +Instance deletion has two different behaviors depending on the instance type. + +- **Harper Cloud Instance** This instance will be permanently deleted, including all data. This process is irreversible and cannot be undone. +- **Enterprise Instance** The instance will be removed from the Harper Studio only. This does not uninstall Harper from your system and your data will remain intact. + +An instance can be deleted as follows: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card and click the trash can icon. +1. Enter the instance name into the text box. + + _This is done for confirmation purposes to ensure you do not accidentally delete an instance._ + +1. Click the **Do It** button. + +## Upgrade an Instance + +Harper instances can be resized on the [Instance Configuration](./instance-configuration) page. + +## Instance Log In/Log Out + +The Studio enables users to log in and out of different database users from the instance control panel. To log out of an instance: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card and click the lock icon. +1. You will immediately be logged out of the instance. + +To log in to an instance: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Identify the proper instance card, it will have an unlocked icon and a status reading PLEASE LOG IN, and click the center of the card. +1. Enter the database username. + + _The username of a Harper user that is already configured in your Harper instance._ + +1. Enter the database password. + + _The password of a Harper user that is already configured in your Harper instance._ + +1. Click **Log In**. diff --git a/site/versioned_docs/version-4.6/administration/harper-studio/login-password-reset.md b/site/versioned_docs/version-4.6/administration/harper-studio/login-password-reset.md new file mode 100644 index 00000000..96f40020 --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/harper-studio/login-password-reset.md @@ -0,0 +1,42 @@ +--- +title: Login and Password Reset +--- + +# Login and Password Reset + +## Log In to Your Harper Studio Account + +To log into your existing Harper Studio account: + +1. Navigate to the [Harper Studio](https:/studio.harperdb.io/). +1. Enter your email address. +1. Enter your password. +1. Click **sign in**. + +## Reset a Forgotten Password + +To reset a forgotten password: + +1. Navigate to the Harper Studio password reset page. +1. Enter your email address. +1. Click **send password reset email**. +1. If the account exists, you will receive an email with a temporary password. +1. Navigate back to the Harper Studio login page. +1. Enter your email address. +1. Enter your temporary password. +1. Click **sign in**. +1. You will be taken to a new screen to reset your account password. Enter your new password. + _Passwords must be a minimum of 8 characters with at least 1 lower case character, 1 upper case character, 1 number, and 1 special character._ +1. Click the **add account password** button. + +## Change Your Password + +If you are already logged into the Studio, you can change your password though the user interface. + +1. Navigate to the Harper Studio profile page. +1. In the **password** section, enter: + - Current password. + - New password. + - New password again _(for verification)_. + +1. Click the **Update Password** button. diff --git a/site/versioned_docs/version-4.6/administration/harper-studio/manage-applications.md b/site/versioned_docs/version-4.6/administration/harper-studio/manage-applications.md new file mode 100644 index 00000000..370194ce --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/harper-studio/manage-applications.md @@ -0,0 +1,59 @@ +--- +title: Manage Applications +--- + +# Manage Applications + +[Harper Applications](../../developers/applications/) are enabled by default and can be configured further through the Harper Studio. It is recommended to read through the [Applications](../../developers/applications/) documentation first to gain a strong understanding of Harper Applications behavior. + +All Applications configuration and development is handled through the **applications** page of the Harper Studio, accessed with the following instructions: + +1. Navigate to the Harper Studio Organizations page. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **applications** in the instance control bar. + +_Note, the **applications** page will only be available to super users._ + +## Manage Applications + +The Applications editor is not required for development and deployment, though it is a useful tool to maintain and manage your Harper Applications. The editor provides the ability to create new applications or import/deploy remote application packages. + +The left bar is the applications file navigator, allowing you to select files to edit and add/remove files and folders. By default, this view is empty because there are no existing applications. To get started, either create a new application or import/deploy a remote application. + +The right side of the screen is the file editor. Here you can make edit individual files of your application directly in the Harper Studio. + +## Things to Keep in Mind + +To learn more about developing Harper Applications, make sure to read through the [Applications](../../developers/applications/) documentation. + +When working with Applications in the Harper Studio, by default the editor will restart the Harper Applications server every time a file is saved. Note, this behavior can be turned off by toggling the `auto` toggle at the top right of the applications page. If you are constantly editing your application, it may result in errors causing the application not to run. These errors will not be visible on the application page, however they will be available in the Harper logs, which can be found on the [status page](./instance-metrics). + +The Applications editor stores unsaved changes in cache. This means that occasionally your editor will show a discrepancy from the code that is stored and running on your Harper instance. You can identify if the code in your Studio differs if the "save" and "revert" buttons are active. To revert the cached version in your editor to the version of the file stored on your Harper instance click the "revert" button. + +## Accessing Your Application Endpoints + +Accessing your application endpoints varies with which type of endpoint you're creating. All endpoints, regardless of type, will be accessed via the [Harper HTTP port found in the Harper configuration file](../../deployments/configuration#http). The default port is `9926`, but you can verify what your instances is set to by navigating to the [instance config page](./instance-configuration) and examining the read only JSON version of your instance's config file looking specifically for either the `http: port: 9926` or `http: securePort: 9926` configs. If `port` is set, you will access your endpoints via `http` and if `securePort` is set, you will access your endpoints via `https`. + +Below is a breakdown of how to access each type of endpoint. In these examples, we will use a locally hosted instance with `securePort` set to `9926`: `https:/localhost:9926`. + +- **Standard REST Endpoints**\ + Standard REST endpoints are defined via the `@export` directive to tables in your schema definition. You can read more about these in the [Adding an Endpoint section of the Applications documentation](../../developers/applications/#adding-an-endpoint). Here, if we are looking to access a record with ID `1` from table `Dog` on our instance, [per the REST documentation](../../developers/rest), we could send a `GET` (or since this is a GET, we could post the URL in our browser) to `https:/localhost:9926/Dog/1`. +- **Augmented REST Endpoints**\ + Harper Applications enable you to write [Custom Functionality with JavaScript](../../developers/applications/#custom-functionality-with-javascript) for your resources. Accessing these endpoints is identical to accessing the standard REST endpoints above, though you may have defined custom behavior in each function. Taking the example from the [Applications documentation](../../developers/applications/#custom-functionality-with-javascript), if we are looking to access the `DogWithHumanAge` example, we could send the GET to `https:/localhost:9926/DogWithHumanAge/1`. +- **Fastify Routes**\ + If you need more functionality than the REST applications can provide, you can define your own custom endpoints using [Fastify Routes](../../developers/applications/#define-fastify-routes). The paths to these routes are defined via the application `config.yaml` file. You can read more about how you can customize the configuration options in the [Define Fastify Routes documentation](../../developers/applications/define-routes). By default, routes are accessed via the following pattern: `[Instance URL]:[HTTP Port]/[Project Name]/[Route URL]`. Using the example from the [Harper Application Template](https:/github.com/HarperDB/application-template/), where we've named our project `application-template`, we would access the `getAll` route at `https:/localhost/application-template/getAll`. + +## Creating a New Application + +1. From the application page, click the "+ app" button at the top right. +1. Click "+ Create A New Application Using The Default Template". +1. Enter a name for your project, note project names must contain only alphanumeric characters, dashes and underscores. +1. Click OK. +1. Your project will be available in the applications file navigator on the left. Click a file to select a file to edit. + +## Editing an Application + +1. From the applications page, click the file you would like to edit from the file navigator on the left. +1. Edit the file with any changes you'd like. +1. Click "save" at the top right. Note, as mentioned above, when you save a file, the Harper Applications server will be restarted immediately. diff --git a/site/versioned_docs/version-4.6/administration/harper-studio/manage-databases-browse-data.md b/site/versioned_docs/version-4.6/administration/harper-studio/manage-databases-browse-data.md new file mode 100644 index 00000000..33482198 --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/harper-studio/manage-databases-browse-data.md @@ -0,0 +1,123 @@ +--- +title: Manage Databases / Browse Data +--- + +# Manage Databases / Browse Data + +Manage instance databases/tables and browse data in tabular format with the following instructions: + +1. Navigate to the Harper Studio Organizations page. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **browse** in the instance control bar. + +Once on the instance browse page you can view data, manage databases and tables, add new data, and more. + +## Manage Databases and Tables + +#### Create a Database + +1. Click the plus icon at the top right of the databases section. +1. Enter the database name. +1. Click the green check mark. + +#### Delete a Database + +Deleting a database is permanent and irreversible. Deleting a database removes all tables and data within it. + +1. Click the minus icon at the top right of the databases section. +1. Identify the appropriate database to delete and click the red minus sign in the same row. +1. Click the red check mark to confirm deletion. + +#### Create a Table + +1. Select the desired database from the databases section. +1. Click the plus icon at the top right of the tables section. +1. Enter the table name. +1. Enter the primary key. + + _The primary key is also often referred to as the hash attribute in the studio, and it defines the unique identifier for each row in your table._ + +1. Click the green check mark. + +#### Delete a Table + +Deleting a table is permanent and irreversible. Deleting a table removes all data within it. + +1. Select the desired database from the databases section. +1. Click the minus icon at the top right of the tables section. +1. Identify the appropriate table to delete and click the red minus sign in the same row. +1. Click the red check mark to confirm deletion. + +## Manage Table Data + +The following section assumes you have selected the appropriate table from the database/table browser. + +#### Filter Table Data + +1. Click the magnifying glass icon at the top right of the table browser. +1. This expands the search filters. +1. The results will be filtered appropriately. + +#### Load CSV Data + +1. Click the data icon at the top right of the table browser. You will be directed to the CSV upload page where you can choose to import a CSV by URL or upload a CSV file. +1. To import a CSV by URL: + 1. Enter the URL in the **CSV file URL** textbox. + 1. Click **Import From URL**. + 1. The CSV will load, and you will be redirected back to browse table data. +1. To upload a CSV file: + 1. Click **Click or Drag to select a .csv file** (or drag your CSV file from your file browser). + 1. Navigate to your desired CSV file and select it. + 1. Click **Insert X Records**, where X is the number of records in your CSV. + 1. The CSV will load, and you will be redirected back to browse table data. + +#### Add a Record + +1. Click the plus icon at the top right of the table browser. +1. The Studio will pre-populate existing table attributes in JSON format. + + _The primary key is not included, but you can add it in and set it to your desired value. Auto-maintained fields are not included and cannot be manually set. You may enter a JSON array to insert multiple records in a single transaction._ + +1. Enter values to be added to the record. + + _You may add new attributes to the JSON; they will be reflexively added to the table._ + +1. Click the **Add New** button. + +#### Edit a Record + +1. Click the record/row you would like to edit. +1. Modify the desired values. + + _You may add new attributes to the JSON; they will be reflexively added to the table._ + +1. Click the **save icon**. + +#### Delete a Record + +Deleting a record is permanent and irreversible. If transaction logging is turned on, the delete transaction will be recorded as well as the data that was deleted. + +1. Click the record/row you would like to delete. +1. Click the **delete icon**. +1. Confirm deletion by clicking the **check icon**. + +## Browse Table Data + +The following section assumes you have selected the appropriate table from the database/table browser. + +#### Browse Table Data + +The first page of table data is automatically loaded on table selection. Paging controls are at the bottom of the table. Here you can: + +- Page left and right using the arrows. +- Type in the desired page. +- Change the page size (the amount of records displayed in the table). + +#### Refresh Table Data + +Click the refresh icon at the top right of the table browser. + +#### Automatically Refresh Table Data + +Toggle the auto switch at the top right of the table browser. The table data will now automatically refresh every 15 seconds. Filters and pages will remain set for refreshed data. diff --git a/site/versioned_docs/version-4.6/administration/harper-studio/manage-instance-roles.md b/site/versioned_docs/version-4.6/administration/harper-studio/manage-instance-roles.md new file mode 100644 index 00000000..3662013c --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/harper-studio/manage-instance-roles.md @@ -0,0 +1,77 @@ +--- +title: Manage Instance Roles +--- + +# Manage Instance Roles + +Harper users and roles can be managed directly through the Harper Studio. It is recommended to read through the [users & roles documentation](../../developers/security/users-and-roles) to gain a strong understanding of how they operate. + +Instance role configuration is handled through the **roles** page of the Harper Studio, accessed with the following instructions: + +1. Navigate to the Harper Studio Organizations page. + +1. Click the appropriate organization that the instance belongs to. + +1. Select your desired instance. + +1. Click **roles** in the instance control bar. + +_Note, the **roles** page will only be available to super users._ + +The _roles management_ screen consists of the following panels: + +- **super users** + + Displays all super user roles for this instance. + +- **cluster users** + + Displays all cluster user roles for this instance. + +- **standard roles** + + Displays all standard roles for this instance. + +- **role permission editing** + + Once a role is selected for editing, permissions will be displayed here in JSON format. + +_Note, when new tables are added that are not configured, the Studio will generate configuration values with permissions defaulting to `false`._ + +## Role Management + +#### Create a Role + +1. Click the plus icon at the top right of the appropriate role section. + +1. Enter the role name. + +1. Click the green check mark. + +1. Optionally toggle the **manage databases/tables** switch to specify the `structure_user` config. + +1. Configure the role permissions in the role permission editing panel. + + _Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel._ + +1. Click **Update Role Permissions**. + +#### Modify a Role + +1. Click the appropriate role from the appropriate role section. + +1. Modify the role permissions in the role permission editing panel. + + _Note, to have the Studio generate attribute permissions JSON, toggle **show all attributes** at the top right of the role permission editing panel._ + +1. Click **Update Role Permissions**. + +#### Delete a Role + +Deleting a role is permanent and irreversible. A role cannot be remove if users are associated with it. + +1. Click the minus icon at the top right of the roles section. + +1. Identify the appropriate role to delete and click the red minus sign in the same row. + +1. Click the red check mark to confirm deletion. diff --git a/site/versioned_docs/version-4.6/administration/harper-studio/manage-instance-users.md b/site/versioned_docs/version-4.6/administration/harper-studio/manage-instance-users.md new file mode 100644 index 00000000..82b06734 --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/harper-studio/manage-instance-users.md @@ -0,0 +1,53 @@ +--- +title: Manage Instance Users +--- + +# Manage Instance Users + +Harper users and roles can be managed directly through the Harper Studio. It is recommended to read through the [users & roles documentation](../../developers/security/users-and-roles) to gain a strong understanding of how they operate. + +Instance user configuration is handled through the **users** page of the Harper Studio, accessed with the following instructions: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. + +1. Click the appropriate organization that the instance belongs to. + +1. Select your desired instance. + +1. Click **users** in the instance control bar. + +_Note, the **users** page will only be available to super users._ + +## Add a User + +Harper instance users can be added with the following instructions. + +1. In the **add user** panel on the left enter: + - New user username. + - New user password. + - Select a role. + + _Learn more about role management here: [Manage Instance Roles](./manage-instance-roles)._ + +1. Click **Add User**. + +## Edit a User + +Harper instance users can be modified with the following instructions. + +1. In the **existing users** panel, click the row of the user you would like to edit. + +1. To change a user’s password: + 1. In the **Change user password** section, enter the new password. + 1. Click **Update Password**. + +1. To change a user’s role: + 1. In the **Change user role** section, select the new role. + 1. Click **Update Role**. + +1. To delete a user: + 1. In the **Delete User** section, type the username into the textbox. + + _This is done for confirmation purposes._ + + 1. Click **Delete User**. diff --git a/site/versioned_docs/version-4.6/administration/harper-studio/manage-replication.md b/site/versioned_docs/version-4.6/administration/harper-studio/manage-replication.md new file mode 100644 index 00000000..8987d71c --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/harper-studio/manage-replication.md @@ -0,0 +1,90 @@ +--- +title: Manage Replication +--- + +# Manage Replication + +Harper instance clustering and replication can be configured directly through the Harper Studio. It is recommended to read through the [clustering documentation](../../developers/clustering/) first to gain a strong understanding of Harper clustering behavior. + +All clustering configuration is handled through the **replication** page of the Harper Studio, accessed with the following instructions: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. + +1. Click the appropriate organization that the instance belongs to. + +1. Select your desired instance. + +1. Click **replication** in the instance control bar. + +Note, the **replication** page will only be available to super users. + +--- + +## Initial Configuration + +Harper instances do not have clustering configured by default. The Harper Studio will walk you through the initial configuration. Upon entering the **replication** screen for the first time you will need to complete the following configuration. Configurations are set in the **enable clustering** panel on the left while actions are described in the middle of the screen. It is worth reviewing the [Creating a Cluster User](../../developers/clustering/creating-a-cluster-user) document before proceeding. + +1. Enter Cluster User username. (Defaults to `cluster_user`). +1. Enter Cluster Password. +1. Review and/or Set Cluster Node Name. +1. Click **Enable Clustering**. + +At this point the Studio will restart your Harper Instance, required for the configuration changes to take effect. + +--- + +## Manage Clustering + +Once initial clustering configuration is completed you a presented with a clustering management screen with the following properties: + +- **connected instances** + + Displays all instances within the Studio Organization that this instance manages a connection with. + +- **unconnected instances** + + Displays all instances within the Studio Organization that this instance does not manage a connection with. + +- **unregistered instances** + + Displays all instances outside the Studio Organization that this instance manages a connection with. + +- **manage clustering** + + Once instances are connected, this will display clustering management options for all connected instances and all databases and tables. + +--- + +## Connect an Instance + +Harper Instances can be clustered together with the following instructions. + +1. Ensure clustering has been configured on both instances and a cluster user with identical credentials exists on both. + +1. Identify the instance you would like to connect from the **unconnected instances** panel. + +1. Click the plus icon next the appropriate instance. + +1. If configurations are correct, all databases will sync across the cluster, then appear in the **manage clustering** panel. If there is a configuration issue, a red exclamation icon will appear, click it to learn more about what could be causing the issue. + +--- + +## Disconnect an Instance + +Harper Instances can be disconnected with the following instructions. + +1. Identify the instance you would like to disconnect from the **connected instances** panel. + +1. Click the minus icon next the appropriate instance. + +--- + +## Manage Replication + +Subscriptions must be configured in order to move data between connected instances. Read more about subscriptions here: Creating A Subscription. The **manage clustering** panel displays a table with each row representing an channel per instance. Cells are bolded to indicate a change in the column. Publish and subscribe replication can be configured per table with the following instructions: + +1. Identify the instance, database, and table for replication to be configured. + +1. For publish, click the toggle switch in the **publish** column. + +1. For subscribe, click the toggle switch in the **subscribe** column. diff --git a/site/versioned_docs/version-4.6/administration/harper-studio/organizations.md b/site/versioned_docs/version-4.6/administration/harper-studio/organizations.md new file mode 100644 index 00000000..e36b6fa6 --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/harper-studio/organizations.md @@ -0,0 +1,109 @@ +--- +title: Organizations +--- + +# Organizations + +Harper Studio organizations provide the ability to group Harper Cloud Instances. Organization behavior is as follows: + +- Billing occurs at the organization level to a single credit card. +- Organizations retain their own unique Harper Cloud subdomain. +- Cloud instances reside within an organization. +- Studio users can be invited to organizations to share instances. + +An organization is automatically created for you when you sign up for Harper Studio. If you only have one organization, the Studio will automatically bring you to your organization’s page. + +--- + +## List Organizations + +A summary view of all organizations your user belongs to can be viewed on the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. You can navigate to this page at any time by clicking the **all organizations** link at the top of the Harper Studio. + +## Create a New Organization + +A new organization can be created as follows: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +1. Click the **Create a New Organization** card. +1. Fill out new organization details + - Enter Organization Name + _This is used for descriptive purposes only._ + - Enter Organization Subdomain + _Part of the URL that will be used to identify your Harper Cloud Instances. For example, with subdomain “demo” and instance name “c1” the instance URL would be: https:/c1-demo.harperdbcloud.com._ +1. Click Create Organization. + +## Delete an Organization + +An organization cannot be deleted until all instances have been removed. An organization can be deleted as follows: + +1. Navigate to the Harper Studio Organizations page. +1. Identify the proper organization card and click the trash can icon. +1. Enter the organization name into the text box. + + _This is done for confirmation purposes to ensure you do not accidentally delete an organization._ + +1. Click the **Do It** button. + +## Manage Users + +Harper Studio organization owners can manage users including inviting new users, removing users, and toggling ownership. + +#### Inviting a User + +A new user can be invited to an organization as follows: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +1. Click the appropriate organization card. +1. Click **users** at the top of the screen. +1. In the **add user** box, enter the new user’s email address. +1. Click **Add User**. + +Users may or may not already be Harper Studio users when adding them to an organization. If the Harper Studio account already exists, the user will receive an email notification alerting them to the organization invitation. If the user does not have a Harper Studio account, they will receive an email welcoming them to Harper Studio. + +--- + +#### Toggle a User’s Organization Owner Status + +Organization owners have full access to the organization including the ability to manage organization users, create, modify, and delete instances, and delete the organization. Users must have accepted their invitation prior to being promoted to an owner. A user’s organization owner status can be toggled owner as follows: + +1. Navigate to the Harper Studio Organizations page. +1. Click the appropriate organization card. +1. Click **users** at the top of the screen. +1. Click the appropriate user from the **existing users** section. +1. Toggle the **Is Owner** switch to the desired status. + +--- + +#### Remove a User from an Organization + +Users may be removed from an organization at any time. Removing a user from an organization will not delete their Harper Studio account, it will only remove their access to the specified organization. A user can be removed from an organization as follows: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +1. Click the appropriate organization card. +1. Click **users** at the top of the screen. +1. Click the appropriate user from the **existing users** section. +1. Type **DELETE** in the text box in the **Delete User** row. + + _This is done for confirmation purposes to ensure you do not accidentally delete a user._ + +1. Click **Delete User**. + +## Manage Billing + +Billing is configured per organization and will be billed to the stored credit card at appropriate intervals (monthly or annually depending on the registered instance). Billing settings can be configured as follows: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/?redirect=/organizations) page. +1. Click the appropriate organization card. +1. Click **billing** at the top of the screen. + +Here organization owners can view invoices, manage coupons, and manage the associated credit card. + +_Harper billing and payments are managed via Stripe._ + +### Add a Coupon + +Coupons are applicable towards any paid tier or enterprise instance and you can change your subscription at any time. Coupons can be added to your Organization as follows: + +1. In the coupons panel of the **billing** page, enter your coupon code. +1. Click **Add Coupon**. +1. The coupon will then be available and displayed in the coupons panel. diff --git a/site/versioned_docs/version-4.6/administration/harper-studio/query-instance-data.md b/site/versioned_docs/version-4.6/administration/harper-studio/query-instance-data.md new file mode 100644 index 00000000..3e36c7cf --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/harper-studio/query-instance-data.md @@ -0,0 +1,52 @@ +--- +title: Query Instance Data +--- + +# Query Instance Data + +SQL queries can be executed directly through the Harper Studio with the following instructions: + +1. Navigate to the [Harper Studio Organizations](https:/studio.harperdb.io/organizations) page. +1. Click the appropriate organization that the instance belongs to. +1. Select your desired instance. +1. Click **query** in the instance control bar. +1. Enter your SQL query in the SQL query window. +1. Click **Execute**. + +_Please note, the Studio will execute the query exactly as entered. For example, if you attempt to `SELECT _` from a table with millions of rows, you will most likely crash your browser.\* + +## Browse Query Results Set + +#### Browse Results Set Data + +The first page of results set data is automatically loaded on query execution. Paging controls are at the bottom of the table. Here you can: + +- Page left and right using the arrows. +- Type in the desired page. +- Change the page size (the amount of records displayed in the table). + +#### Refresh Results Set + +Click the refresh icon at the top right of the results set table. + +#### Automatically Refresh Results Set + +Toggle the auto switch at the top right of the results set table. The results set will now automatically refresh every 15 seconds. Filters and pages will remain set for refreshed data. + +## Query History + +Query history is stored in your local browser cache. Executed queries are listed with the most recent at the top in the **query history** section. + +#### Rerun Previous Query + +- Identify the query from the **query history** list. +- Click the appropriate query. It will be loaded into the **sql query** input box. +- Click **Execute**. + +#### Clear Query History + +Click the trash can icon at the top right of the **query history** section. + +## Create Charts + +The Harper Studio includes a charting feature where you can build charts based on your specified queries. Visit the Charts documentation for more information. diff --git a/site/versioned_docs/version-4.6/administration/jobs.md b/site/versioned_docs/version-4.6/administration/jobs.md new file mode 100644 index 00000000..c487f424 --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/jobs.md @@ -0,0 +1,112 @@ +--- +title: Jobs +--- + +# Jobs + +Harper Jobs are asynchronous tasks performed by the Operations API. + +## Job Summary + +Jobs uses an asynchronous methodology to account for the potential of a long-running operation. For example, exporting millions of records to S3 could take some time, so that job is started and the id is provided to check on the status. + +The job status can be **COMPLETE** or **IN_PROGRESS**. + +## Example Job Operations + +Example job operations include: + +[csv data load](../developers/operations-api/bulk-operations#csv-data-load) + +[csv file load](../developers/operations-api/bulk-operations#csv-file-load) + +[csv url load](../developers/operations-api/bulk-operations#csv-url-load) + +[import from s3](../developers/operations-api/bulk-operations#import-from-s3) + +[delete_records_before](../developers/operations-api/bulk-operations#delete-records-before) + +[export_local](../developers/operations-api/bulk-operations#export-local) + +[export_to_s3](../developers/operations-api/bulk-operations#export-to-s3) + +Example Response from a Job Operation + +``` +{ + "message": "Starting job with id 062a1892-6a0a-4282-9791-0f4c93b12e16" +} +``` + +Whenever one of these operations is initiated, an asynchronous job is created and the request contains the ID of that job which can be used to check on its status. + +## Managing Jobs + +To check on a job's status, use the [get_job](../developers/operations-api/jobs#get-job) operation. + +Get Job Request + +``` +{ + "operation": "get_job", + "id": "4a982782-929a-4507-8794-26dae1132def" +} +``` + +Get Job Response + +``` +[ + { + "__createdtime__": 1611615798782, + "__updatedtime__": 1611615801207, + "created_datetime": 1611615798774, + "end_datetime": 1611615801206, + "id": "4a982782-929a-4507-8794-26dae1132def", + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "start_datetime": 1611615798805, + "status": "COMPLETE", + "type": "csv_url_load", + "user": "HDB_ADMIN", + "start_datetime_converted": "2021-01-25T23:03:18.805Z", + "end_datetime_converted": "2021-01-25T23:03:21.206Z" + } +] +``` + +## Finding Jobs + +To find jobs (if the ID is not known) use the [search_jobs_by_start_date](../developers/operations-api/jobs#search-jobs-by-start-date) operation. + +Search Jobs Request + +``` +{ + "operation": "search_jobs_by_start_date", + "from_date": "2021-01-25T22:05:27.464+0000", + "to_date": "2021-01-25T23:05:27.464+0000" +} +``` + +Search Jobs Response + +``` +[ + { + "id": "942dd5cb-2368-48a5-8a10-8770ff7eb1f1", + "user": "HDB_ADMIN", + "type": "csv_url_load", + "status": "COMPLETE", + "start_datetime": 1611613284781, + "end_datetime": 1611613287204, + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "created_datetime": 1611613284764, + "__createdtime__": 1611613284767, + "__updatedtime__": 1611613287207, + "start_datetime_converted": "2021-01-25T22:21:24.781Z", + "end_datetime_converted": "2021-01-25T22:21:27.204Z" + } +] +``` diff --git a/site/versioned_docs/version-4.6/administration/logging/audit-logging.md b/site/versioned_docs/version-4.6/administration/logging/audit-logging.md new file mode 100644 index 00000000..209b4981 --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/logging/audit-logging.md @@ -0,0 +1,126 @@ +--- +title: Audit Logging +--- + +# Audit Logging + +### Audit log + +The audit log uses a standard Harper table to track transactions. For each table a user creates, a corresponding table will be created to track transactions against that table. + +Audit log is enabled by default. To disable the audit log, set `logging.auditLog` to false in the config file, `harperdb-config.yaml`. Then restart Harper for those changes to take place. Note, the audit is required to be enabled for real-time messaging. + +### Audit Log Operations + +#### read_audit_log + +The `read_audit_log` operation is flexible, enabling users to query with many parameters. All operations search on a single table. Filter options include timestamps, usernames, and table hash values. Additional examples found in the [Harper API documentation](../../developers/operations-api/logs). + +**Search by Timestamp** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "timestamp", + "search_values": [1660585740558] +} +``` + +There are three outcomes using timestamp. + +- `"search_values": []` - All records returned for specified table +- `"search_values": [1660585740558]` - All records after provided timestamp +- `"search_values": [1660585740558, 1760585759710]` - Records "from" and "to" provided timestamp + +--- + +**Search by Username** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "username", + "search_values": ["admin"] +} +``` + +The above example will return all records whose `username` is "admin." + +--- + +**Search by Primary Key** + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "hash_value", + "search_values": [318] +} +``` + +The above example will return all records whose primary key (`hash_value`) is 318. + +--- + +#### read_audit_log Response + +The example that follows provides records of operations performed on a table. One thing of note is that the `read_audit_log` operation gives you the `original_records`. + +```json +{ + "operation": "update", + "user_name": "HDB_ADMIN", + "timestamp": 1607035559122.277, + "hash_values": [1, 2], + "records": [ + { + "id": 1, + "breed": "Muttzilla", + "age": 6, + "__updatedtime__": 1607035559122 + }, + { + "id": 2, + "age": 7, + "__updatedtime__": 1607035559121 + } + ], + "original_records": [ + { + "__createdtime__": 1607035556801, + "__updatedtime__": 1607035556801, + "age": 5, + "breed": "Mutt", + "id": 2, + "name": "Penny" + }, + { + "__createdtime__": 1607035556801, + "__updatedtime__": 1607035556801, + "age": 5, + "breed": "Mutt", + "id": 1, + "name": "Harper" + } + ] +} +``` + +#### delete_audit_logs_before + +Just like with transaction logs, you can clean up your audit logs with the `delete_audit_logs_before` operation. It will delete audit log data according to the given parameters. The example below will delete records older than the timestamp provided. + +```json +{ + "operation": "delete_audit_logs_before", + "schema": "dev", + "table": "cat", + "timestamp": 1598290282817 +} +``` diff --git a/site/versioned_docs/version-4.6/administration/logging/index.md b/site/versioned_docs/version-4.6/administration/logging/index.md new file mode 100644 index 00000000..e04b8adb --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/logging/index.md @@ -0,0 +1,11 @@ +--- +title: Logging +--- + +# Logging + +Harper provides many different logging options for various features and functionality. + +- [Standard Logging](./standard-logging): Harper maintains a log of events that take place throughout operation. +- [Audit Logging](./audit-logging): Harper uses a standard Harper table to track transactions. For each table a user creates, a corresponding table will be created to track transactions against that table. +- [Transaction Logging](./transaction-logging): Harper stores a verbose history of all transactions logged for specified database tables, including original data records. diff --git a/site/versioned_docs/version-4.6/administration/logging/standard-logging.md b/site/versioned_docs/version-4.6/administration/logging/standard-logging.md new file mode 100644 index 00000000..56711178 --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/logging/standard-logging.md @@ -0,0 +1,65 @@ +--- +title: Standard Logging +--- + +# Standard Logging + +Harper maintains a log of events that take place throughout operation. Log messages can be used for diagnostics purposes as well as monitoring. + +All logs (except for the install log) are stored in the main log file in the hdb directory `/log/hdb.log`. The install log is located in the Harper application directory most likely located in your npm directory `npm/harperdb/logs`. + +Each log message has several key components for consistent reporting of events. A log message has a format of: + +``` + [] [] ...[]: +``` + +For example, a typical log entry looks like: + +``` +2023-03-09T14:25:05.269Z [notify] [main/0]: HarperDB successfully started. +``` + +The components of a log entry are: + +- timestamp - This is the date/time stamp when the event occurred +- level - This is an associated log level that gives a rough guide to the importance and urgency of the message. The available log levels in order of least urgent (and more verbose) are: `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. +- thread/ID - This reports the name of the thread and the thread ID that the event was reported on. Note that NATS logs are recorded by their process name and there is no thread id for them since they are a separate process. Key threads are: + - main - This is the thread that is responsible for managing all other threads and routes incoming requests to the other threads + - http - These are the worker threads that handle the primary workload of incoming HTTP requests to the operations API and custom functions. + - Clustering\* - These are threads and processes that handle replication. + - job - These are job threads that have been started to handle operations that are executed in a separate job thread. +- tags - Logging from a custom function will include a "custom-function" tag in the log entry. Most logs will not have any additional tags. +- message - This is the main message that was reported. + +We try to keep logging to a minimum by default, to do this the default log level is `error`. If you require more information from the logs, increasing the log level down will provide that. + +The log level can be changed by modifying `logging.level` in the config file `harperdb-config.yaml`. + +## Clustering Logging + +Harper clustering utilizes two [NATS](https:/nats.io/) servers, named Hub and Leaf. The Hub server is responsible for establishing the mesh network that connects instances of Harper and the Leaf server is responsible for managing the message stores (streams) that replicate and store messages between instances. Due to the verbosity of these servers there is a separate log level configuration for them. To adjust their log verbosity, set `clustering.logLevel` in the config file `harperdb-config.yaml`. Valid log levels from least verbose are `error`, `warn`, `info`, `debug` and `trace`. + +## Log File vs Standard Streams + +Harper logs can optionally be streamed to standard streams. Logging to standard streams (stdout/stderr) is primarily used for container logging drivers. For more traditional installations, we recommend logging to a file. Logging to both standard streams and to a file can be enabled simultaneously. To log to standard streams effectively, make sure to directly run `harperdb` and don't start it as a separate process (don't use `harperdb start`) and `logging.stdStreams` must be set to true. Note, logging to standard streams only will disable clustering catchup. + +## Logging Rotation + +Log rotation allows for managing log files, such as compressing rotated log files, archiving old log files, determining when to rotate, and the like. This will allow for organized storage and efficient use of disk space. For more information see “logging” in our [config docs](../../deployments/configuration). + +## Read Logs via the API + +To access specific logs you may query the Harper API. Logs can be queried using the `read_log` operation. `read_log` returns outputs from the log based on the provided search criteria. + +```json +{ + "operation": "read_log", + "start": 0, + "limit": 1000, + "level": "error", + "from": "2021-01-25T22:05:27.464+0000", + "until": "2021-01-25T23:05:27.464+0000", + "order": "desc" +} +``` diff --git a/site/versioned_docs/version-4.6/administration/logging/transaction-logging.md b/site/versioned_docs/version-4.6/administration/logging/transaction-logging.md new file mode 100644 index 00000000..9003ff04 --- /dev/null +++ b/site/versioned_docs/version-4.6/administration/logging/transaction-logging.md @@ -0,0 +1,87 @@ +--- +title: Transaction Logging +--- + +# Transaction Logging + +Harper offers two options for logging transactions executed against a table. The options are similar but utilize different storage layers. + +## Transaction log + +The first option is `read_transaction_log`. The transaction log is built upon clustering streams. Clustering streams are per-table message stores that enable data to be propagated across a cluster. Harper leverages streams for use with the transaction log. When clustering is enabled all transactions that occur against a table are pushed to its stream, and thus make up the transaction log. + +If you would like to use the transaction log, but have not set up clustering yet, please see ["How to Cluster"](../../developers/clustering/). + +## Transaction Log Operations + +### read_transaction_log + +The `read_transaction_log` operation returns a prescribed set of records, based on given parameters. The example below will give a maximum of 2 records within the timestamps provided. + +```json +{ + "operation": "read_transaction_log", + "schema": "dev", + "table": "dog", + "from": 1598290235769, + "to": 1660249020865, + "limit": 2 +} +``` + +_See example response below._ + +### read_transaction_log Response + +```json +[ + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619736, + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38, + "__updatedtime__": 1660165619688, + "__createdtime__": 1660165619688 + } + ] + }, + { + "operation": "update", + "user": "admin", + "timestamp": 1660165620040, + "records": [ + { + "id": 1, + "dog_name": "Penny B", + "__updatedtime__": 1660165620036 + } + ] + } +] +``` + +_See example request above._ + +### delete_transaction_logs_before + +The `delete_transaction_logs_before` operation will delete transaction log data according to the given parameters. The example below will delete records older than the timestamp provided. + +```json +{ + "operation": "delete_transaction_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1598290282817 +} +``` + +_Note: Streams are used for catchup if a node goes down. If you delete messages from a stream there is a chance catchup won't work._ + +Read on for `read_audit_log`, the second option, for logging transactions executed against a table. diff --git a/site/versioned_docs/version-4.6/deployments/_category_.json b/site/versioned_docs/version-4.6/deployments/_category_.json new file mode 100644 index 00000000..8fdd6e17 --- /dev/null +++ b/site/versioned_docs/version-4.6/deployments/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Deployments", + "position": 3, + "link": { + "type": "generated-index", + "title": "Deployments Documentation", + "description": "Installation and deployment guides for HarperDB", + "keywords": [ + "deployments" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.6/deployments/configuration.md b/site/versioned_docs/version-4.6/deployments/configuration.md new file mode 100644 index 00000000..32a3585f --- /dev/null +++ b/site/versioned_docs/version-4.6/deployments/configuration.md @@ -0,0 +1,1204 @@ +--- +title: Configuration File +--- + +# Configuration File + +Harper is configured through a [YAML](https:/yaml.org/) file called `harperdb-config.yaml` located in the Harper root directory (by default this is a directory named `hdb` located in the home directory of the current user). + +Some configuration will be populated by default in the config file on install, regardless of whether it is used. + +--- + +## Using the Configuration File and Naming Conventions + +The configuration elements in `harperdb-config.yaml` use camelcase, such as `operationsApi`. + +To change a configuration value, edit the `harperdb-config.yaml` file and save any changes. **HarperDB must be restarted for changes to take effect.** + +Alternatively, all configuration values can also be modified using environment variables, command line arguments, or the operations API via the [`set_configuration` operation](../developers/operations-api/configuration#set-configuration). + +For nested configuration elements, use underscores to represent parent-child relationships. When accessed this way, elements are case-insensitive. + +For example, to disable logging rotation in the `logging` section: + +```yaml +logging: + rotation: + enabled: false +``` + +You could apply this change using: + +- Environment variable: `LOGGING_ROTATION_ENABLED=false` +- Command line variable: `--LOGGING_ROTATION_ENABLED false` +- Operations API (`set_configuration`): `logging_rotation_enabled: false` + +To change the `port` in the `http` section, use: + +- Environment variable: `HTTP_PORT=` +- Command line variable: `--HTTP_PORT ` +- Operations API (`set_configuration`): `http_port: ` + +To set the `operationsApi.network.port` to `9925`, use: + +- Environment variable: `OPERATIONSAPI_NETWORK_PORT=9925` +- Command line variable: `--OPERATIONSAPI_NETWORK_PORT 9925` +- Operations API (`set_configuration`): `operationsApi_network_port: 9925` + +_Note: Component configuration cannot be added or updated via CLI or ENV variables._ + +## Importing installation configuration + +To use a custom configuration file to set values on install, use the CLI/ENV variable `HDB_CONFIG` and set it to the path of your custom configuration file. + +To install Harper overtop of an existing configuration file, set `HDB_CONFIG` to the root path of your install `/harperdb-config.yaml` + +--- + +## Configuration Options + +### `http` + +`sessionAffinity` - _Type_: string; _Default_: null + +Harper is a multi-threaded server designed to scale to utilize many CPU cores with high concurrency. Session affinity can help improve the efficiency and fairness of thread utilization by routing multiple requests from the same client to the same thread. This provides a fairer method of request handling by keeping a single user contained to a single thread, can improve caching locality (multiple requests from a single user are more likely to access the same data), and can provide the ability to share information in-memory in user sessions. Enabling session affinity will cause subsequent requests from the same client to be routed to the same thread. + +To enable `sessionAffinity`, you need to specify how clients will be identified from the incoming requests. If you are using Harper to directly serve HTTP requests from users from different remote addresses, you can use a setting of `ip`. However, if you are using Harper behind a proxy server or application server, all the remote ip addresses will be the same and Harper will effectively only run on a single thread. Alternately, you can specify a header to use for identification. If you are using basic authentication, you could use the "Authorization" header to route requests to threads by the user's credentials. If you have another header that uniquely identifies users/clients, you can use that as the value of sessionAffinity. But be careful to ensure that the value does provide sufficient uniqueness and that requests are effectively distributed to all the threads and fully utilizing all your CPU cores. + +```yaml +http: + sessionAffinity: ip +``` + +`compressionThreshold` - _Type_: number; _Default_: 1200 (bytes) + +For HTTP clients that support (Brotli) compression encoding, responses that are larger than than this threshold will be compressed (also note that for clients that accept compression, any streaming responses from queries are compressed as well, since the size is not known beforehand). + +```yaml +http: + compressionThreshold: 1200 +``` + +`cors` - _Type_: boolean; _Default_: true + +Enable Cross Origin Resource Sharing, which allows requests across a domain. + +`corsAccessList` - _Type_: array; _Default_: null + +An array of allowable domains with CORS + +`corsAccessControlAllowHeaders` - _Type_: string; _Default_: 'Accept, Content-Type, Authorization' + +A string representation of a comma separated list of header keys for the [Access-Control-Allow-Headers](https:/developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers) header for OPTIONS requests. + +`headersTimeout` - _Type_: integer; _Default_: 60,000 milliseconds (1 minute) + +Limit the amount of time the parser will wait to receive the complete HTTP headers with. + +`maxHeaderSize` - _Type_: integer; _Default_: 16394 + +The maximum allowed size of HTTP headers in bytes. + +`keepAliveTimeout` - _Type_: integer; _Default_: 30,000 milliseconds (30 seconds) + +Sets the number of milliseconds of inactivity the server needs to wait for additional incoming data after it has finished processing the last response. + +`port` - _Type_: integer; _Default_: 9926 + +The port used to access the component server. + +`securePort` - _Type_: integer; _Default_: null + +The port the Harper component server uses for HTTPS connections. This requires a valid certificate and key. + +`http2` - _Type_: boolean; _Default_: false + +Enables HTTP/2 for the HTTP server. + +`timeout` - _Type_: integer; _Default_: Defaults to 120,000 milliseconds (2 minutes) + +The length of time in milliseconds after which a request will timeout. + +```yaml +http: + cors: true + corsAccessList: + - null + headersTimeout: 60000 + maxHeaderSize: 8192 + https: false + keepAliveTimeout: 30000 + port: 9926 + securePort: null + timeout: 120000 +``` + +`mlts` - _Type_: boolean | object; _Default_: false + +This can be configured to enable mTLS based authentication for incoming connections. If enabled with default options (by setting to `true`), the client certificate will be checked against the certificate authority specified with `tls.certificateAuthority`. And if the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. + +You can also define specific mTLS options by specifying an object for mtls with the following (optional) properties which may be included: + +`user` - _Type_: string; _Default_: Common Name + +This configures a specific username to authenticate as for mTLS connections. If a `user` is defined, any authorized mTLS connection (that authorizes against the certificate authority) will be authenticated as this user. This can also be set to `null`, which indicates that no authentication is performed based on the mTLS authorization. When combined with `required: true`, this can be used to enforce that users must have authorized mTLS _and_ provide credential-based authentication. + +`required` - _Type_: boolean; _Default_: false + +This can be enabled to require client certificates (mTLS) for all incoming MQTT connections. If enabled, any connection that doesn't provide an authorized certificate will be rejected/closed. By default, this is disabled, and authentication can take place with mTLS _or_ standard credential authentication. + +```yaml +http: + mtls: true +``` + +or + +```yaml +http: + mtls: + required: true + user: user-name +``` + +--- + +### `threads` + +The `threads` provides control over how many threads, how much heap memory they may use, and debugging of the threads: + +`count` - _Type_: number; _Default_: One less than the number of logical cores/processors + +The `threads.count` option specifies the number of threads that will be used to service the HTTP requests for the operations API and custom functions. Generally, this should be close to the number of CPU logical cores/processors to ensure the CPU is fully utilized (a little less because Harper does have other threads at work), assuming Harper is the main service on a server. + +```yaml +threads: + count: 11 +``` + +`debug` - _Type_: boolean | object; _Default_: false + +This enables debugging. If simply set to true, this will enable debugging on the main thread on port 9229 with the 127.0.0.1 host interface. This can also be an object for more debugging control. + +`debug.port` - The port to use for debugging the main thread `debug.startingPort` - This will set up a separate port for debugging each thread. This is necessary for debugging individual threads with devtools. `debug.host` - Specify the host interface to listen on `debug.waitForDebugger` - Wait for debugger before starting + +```yaml +threads: + debug: + port: 9249 +``` + +`maxHeapMemory` - _Type_: number; + +```yaml +threads: + maxHeapMemory: 300 +``` + +This specifies the heap memory limit for each thread, in megabytes. The default heap limit is a heuristic based on available memory and thread count. + +--- + +### `replication` + +The `replication` section configures [Harper replication](../developers/replication/), which is used to create Harper clusters and replicate data between the instances. + +```yaml +replication: + hostname: server-one + url: wss:/server-one:9925 + databases: '*' + routes: + - wss:/server-two:9925 + port: null + securePort: 9933, + enableRootCAs: true +``` + +`hostname` - _Type_: string; + +The hostname of the current Harper instance. + +`url` - _Type_: string; + +The URL of the current Harper instance. + +`databases` - _Type_: string/array; _Default_: "\*" (all databases) + +Configure which databases to replicate. This can be a string for all database or an array for specific databases. + +```yaml +replication: + databases: + - db1 + - db2 +``` + +`routes` - _Type_: array; + +An array of routes to connect to other nodes. Each element in the array can be either a string or an object with `hostname`, `port` and optionally `startTime` properties. + +`startTime` - _Type_: string; ISO formatted UTC date string. + +Replication will attempt to catch up on all remote data upon setup. To start replication from a specific date, set this property. + +`revokedCertificates` - _Type_: array; + +An array of serial numbers of revoked certificates. If a connection is attempted with a certificate that is in this list, the connection will be rejected. + +```yaml +replication: + copyTablesToCatchUp: true + hostname: server-one + routes: + - wss:/server-two:9925 # URL based route + - hostname: server-three # define a hostname and port + port: 9930 + startTime: 2024-02-06T15:30:00Z + revokedCertificates: + - 1769F7D6A + - QA69C7E2S +``` + +`port` - _Type_: integer; + +The port to use for replication connections. + +`securePort` - _Type_: integer; _Default_: 9933 + +The port to use for secure replication connections. + +`enableRootCAs` - _Type_: boolean; _Default_: true + +When true, Harper will verify certificates against the Node.js bundled CA store. The bundled CA store is a snapshot of the Mozilla CA store that is fixed at release time. + +`copyTablesToCatchUp` - _Type_: boolean; _Default_: true + +Replication will first attempt to catch up using the audit log. If unsuccessful, it will perform a full table copy. When set to `false`, replication will only use the audit log. + +`shard` - _Type_: integer; + +This defines the shard id of this instance and is used in conjunction with the [Table Resource functions](../developers/replication/sharding#custom-sharding) `setResidency` & `setResidencyById` to programmatically route traffic to the proper shard. + +--- + +### `clustering` using NATS + +The `clustering` section configures the NATS clustering engine, this is used to replicate data between instances of Harper. + +_Note: There exist two ways to create clusters and replicate data in Harper. One option is to use native Harper replication over Websockets. The other option is to use_ [_NATS_](https:/nats.io/about/) _to facilitate the cluster._ + +Clustering offers a lot of different configurations, however in a majority of cases the only options you will need to pay attention to are: + +- `clustering.enabled` Enable the clustering processes. +- `clustering.hubServer.cluster.network.port` The port other nodes will connect to. This port must be accessible from other cluster nodes. +- `clustering.hubServer.cluster.network.routes`The connections to other instances. +- `clustering.nodeName` The name of your node, must be unique within the cluster. +- `clustering.user` The name of the user credentials used for Inter-node authentication. + +`enabled` - _Type_: boolean; _Default_: false + +Enable clustering. + +_Note: If you enabled clustering but do not create and add a cluster user you will get a validation error. See `user` description below on how to add a cluster user._ + +```yaml +clustering: + enabled: true +``` + +`clustering.hubServer.cluster` + +Clustering’s `hubServer` facilitates the Harper mesh network and discovery service. + +```yaml +clustering: + hubServer: + cluster: + name: harperdb + network: + port: 9932 + routes: + - host: 3.62.184.22 + port: 9932 + - host: 3.735.184.8 + port: 9932 +``` + +`name` - _Type_: string, _Default_: harperdb + +The name of your cluster. This name needs to be consistent for all other nodes intended to be meshed in the same network. + +`port` - _Type_: integer, _Default_: 9932 + +The port the hub server uses to accept cluster connections + +`routes` - _Type_: array, _Default_: null + +An object array that represent the host and port this server will cluster to. Each object must have two properties `port` and `host`. Multiple entries can be added to create network resiliency in the event one server is unavailable. Routes can be added, updated and removed either by directly editing the `harperdb-config.yaml` file or by using the `cluster_set_routes` or `cluster_delete_routes` API endpoints. + +`host` - _Type_: string + +The host of the remote instance you are creating the connection with. + +`port` - _Type_: integer + +The port of the remote instance you are creating the connection with. This is likely going to be the `clustering.hubServer.cluster.network.port` on the remote instance. + +`clustering.hubServer.leafNodes` + +```yaml +clustering: + hubServer: + leafNodes: + network: + port: 9931 +``` + +`port` - _Type_: integer; _Default_: 9931 + +The port the hub server uses to accept leaf server connections. + +`clustering.hubServer.network` + +```yaml +clustering: + hubServer: + network: + port: 9930 +``` + +`port` - _Type_: integer; _Default_: 9930 + +Use this port to connect a client to the hub server, for example using the NATs SDK to interact with the server. + +`clustering.leafServer` + +Manages streams, streams are ‘message stores’ that store table transactions. + +```yaml +clustering: + leafServer: + network: + port: 9940 + routes: + - host: 3.62.184.22 + port: 9931 + - host: node3.example.com + port: 9931 + streams: + maxAge: 3600 + maxBytes: 10000000 + maxMsgs: 500 + path: /user/hdb/clustering/leaf +``` + +`port` - _Type_: integer; _Default_: 9940 + +Use this port to connect a client to the leaf server, for example using the NATs SDK to interact with the server. + +`routes` - _Type_: array; _Default_: null + +An object array that represent the host and port the leaf node will directly connect with. Each object must have two properties `port` and `host`. Unlike the hub server, the leaf server will establish connections to all listed hosts. Routes can be added, updated and removed either by directly editing the `harperdb-config.yaml` file or by using the `cluster_set_routes` or `cluster_delete_routes` API endpoints. + +`host` - _Type_: string + +The host of the remote instance you are creating the connection with. + +`port` - _Type_: integer + +The port of the remote instance you are creating the connection with. This is likely going to be the `clustering.hubServer.cluster.network.port` on the remote instance. + +`clustering.leafServer.streams` + +`maxAge` - _Type_: integer; _Default_: null + +The maximum age of any messages in the stream, expressed in seconds. + +`maxBytes` - _Type_: integer; _Default_: null + +The maximum size of the stream in bytes. Oldest messages are removed if the stream exceeds this size. + +`maxMsgs` - _Type_: integer; _Default_: null + +How many messages may be in a stream. Oldest messages are removed if the stream exceeds this number. + +`path` - _Type_: string; _Default_: \/clustering/leaf + +The directory where all the streams are kept. + +```yaml +clustering: + leafServer: + streams: + maxConsumeMsgs: 100 + maxIngestThreads: 2 +``` + +`maxConsumeMsgs` - _Type_: integer; _Default_: 100 + +The maximum number of messages a consumer can process in one go. + +`maxIngestThreads` - _Type_: integer; _Default_: 2 + +The number of Harper threads that are delegated to ingesting messages. + +--- + +`logLevel` - _Type_: string; _Default_: error + +Control the verbosity of clustering logs. + +```yaml +clustering: + logLevel: error +``` + +There exists a log level hierarchy in order as `trace`, `debug`, `info`, `warn`, and `error`. When the level is set to `trace` logs will be created for all possible levels. Whereas if the level is set to `warn`, the only entries logged will be `warn` and `error`. The default value is `error`. + +`nodeName` - _Type_: string; _Default_: null + +The name of this node in your Harper cluster topology. This must be a value unique from the rest of the cluster node names. + +_Note: If you want to change the node name make sure there are no subscriptions in place before doing so. After the name has been changed a full restart is required._ + +```yaml +clustering: + nodeName: great_node +``` + +`tls` + +Transport Layer Security default values are automatically generated on install. + +```yaml +clustering: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem + insecure: true + verify: true +``` + +`certificate` - _Type_: string; _Default_: \/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: \/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: \/keys/privateKey.pem + +Path to the private key file. + +`insecure` - _Type_: boolean; _Default_: true + +When true, will skip certificate verification. For use only with self-signed certs. + +`republishMessages` - _Type_: boolean; _Default_: false + +When true, all transactions that are received from other nodes are republished to this node's stream. When subscriptions are not fully connected between all nodes, this ensures that messages are routed to all nodes through intermediate nodes. This also ensures that all writes, whether local or remote, are written to the NATS transaction log. However, there is additional overhead with republishing, and setting this is to false can provide better data replication performance. When false, you need to ensure all subscriptions are fully connected between every node to every other node, and be aware that the NATS transaction log will only consist of local writes. + +`verify` - _Type_: boolean; _Default_: true + +When true, hub server will verify client certificate using the CA certificate. + +--- + +`user` - _Type_: string; _Default_: null + +The username given to the `cluster_user`. All instances in a cluster must use the same clustering user credentials (matching username and password). + +Inter-node authentication takes place via a special Harper user role type called `cluster_user`. + +The user can be created either through the API using an `add_user` request with the role set to `cluster_user`, or on install using environment variables `CLUSTERING_USER=cluster_person` `CLUSTERING_PASSWORD=pass123!` or CLI variables `harperdb --CLUSTERING_USER cluster_person` `--CLUSTERING_PASSWORD` `pass123!` + +```yaml +clustering: + user: cluster_person +``` + +--- + +### `localStudio` + +The `localStudio` section configures the local Harper Studio, a GUI for Harper hosted on the server. A hosted version of the Harper Studio with licensing and provisioning options is available at https:/studio.harperdb.io. Note, all database traffic from either `localStudio` or Harper Studio is made directly from your browser to the instance. + +`enabled` - _Type_: boolean; _Default_: false + +Enabled the local studio or not. + +```yaml +localStudio: + enabled: false +``` + +--- + +### `logging` + +The `logging` section configures Harper logging across all Harper functionality. This includes standard text logging of application and database events as well as structured data logs of record changes. Logging of application/database events are logged in text format to the `~/hdb/log/hdb.log` file (or location specified by `logging.root` or `logging.path`). Many of the logging configuration properties can be set and applied without a restart (are dynamically applied). + +In addition, structured logging of data changes are also available: + +`auditLog` - _Type_: boolean; _Default_: false + +Enabled table transaction logging. + +```yaml +logging: + auditLog: false +``` + +To access the audit logs, use the API operation `read_audit_log`. It will provide a history of the data, including original records and changes made, in a specified table. + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog" +} +``` + +`file` - _Type_: boolean; _Default_: true + +Defines whether to log to a file. + +```yaml +logging: + file: true +``` + +`auditRetention` - _Type_: string|number; _Default_: 3d + +This specifies how long audit logs should be retained. + +`level` - _Type_: string; _Default_: warn + +Control the verbosity of text event logs. + +```yaml +logging: + level: warn +``` + +There exists a log level hierarchy in order as `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify`. When the level is set to `trace` logs will be created for all possible levels. Whereas if the level is set to `fatal`, the only entries logged will be `fatal` and `notify`. The default value is `error`. + +`console` - _Type_: boolean; _Default_: true + +Controls whether console.log and other console.\* calls (as well as another JS components that writes to `process.stdout` and `process.stderr`) are logged to the log file. By default, these are not logged to the log file, but this can be enabled: + +```yaml +logging: + console: true +``` + +`root` - _Type_: string; _Default_: \/log + +The directory path where the log files will be written. + +```yaml +logging: + root: ~/hdb/log +``` + +`path` - _Type_: string; _Default_: \/log/hdb.log + +The file path where the log file will be written. + +```yaml +logging: + root: ~/hdb/log/hdb.log +``` + +`rotation` + +Rotation provides the ability for a user to systematically rotate and archive the `hdb.log` file. To enable `interval` and/or `maxSize` must be set. + +_**Note:**_ `interval` and `maxSize` are approximates only. It is possible that the log file will exceed these values slightly before it is rotated. + +```yaml +logging: + rotation: + enabled: true + compress: false + interval: 1D + maxSize: 100K + path: /user/hdb/log +``` + +`enabled` - _Type_: boolean; _Default_: true + +Enables logging rotation. + +`compress` - _Type_: boolean; _Default_: false + +Enables compression via gzip when logs are rotated. + +`interval` - _Type_: string; _Default_: null + +The time that should elapse between rotations. Acceptable units are D(ays), H(ours) or M(inutes). + +`maxSize` - _Type_: string; _Default_: null + +The maximum size the log file can reach before it is rotated. Must use units M(egabyte), G(igabyte), or K(ilobyte). + +`path` - _Type_: string; _Default_: \/log + +Where to store the rotated log file. File naming convention is `HDB-YYYY-MM-DDT-HH-MM-SSSZ.log`. + +`stdStreams` - _Type_: boolean; _Default_: false + +Log Harper logs to the standard output and error streams. + +```yaml +logging: + stdStreams: false +``` + +`auditAuthEvents` + +`logFailed` - _Type_: boolean; _Default_: false + +Log all failed authentication events. + +_Example:_ `[error] [auth-event]: {"username":"admin","status":"failure","type":"authentication","originating_ip":"127.0.0.1","request_method":"POST","path":"/","auth_strategy":"Basic"}` + +`logSuccessful` - _Type_: boolean; _Default_: false + +Log all successful authentication events. + +_Example:_ `[notify] [auth-event]: {"username":"admin","status":"success","type":"authentication","originating_ip":"127.0.0.1","request_method":"POST","path":"/","auth_strategy":"Basic"}` + +```yaml +logging: + auditAuthEvents: + logFailed: false + logSuccessful: false +``` + +## Defining Separate Logging Configurations + +Harper's logger supports defining multiple logging configurations for different components in the system. Each logging configuration can be assigned its own `path` (or `root`), `level`, `tag`, and flag to enable/disable logging to `stdStreams`. All logging defaults to the configuration of the "main" logger as configured above, but when logging is configured for different loggers, they will use their own configuration. Separate loggers can be defined: + +`logging.external` + +The `logging.external` section can be used to define logging for all external components that use the [`logger` API](../technical-details/reference/globals). For example: +```yaml +logging: + external: + level: warn + path: ~/hdb/log/apps.log +``` + +`http.logging` + +This section defines log configuration for HTTP logging. By default, HTTP requests are not logged, but defining this section will enable HTTP logging. Note that there can be substantive overhead to logging all HTTP requests. In addition to the standard logging configuration, the `http.logging` section also allows the following configuration properties to be set: +* `timing` - This will log timing information +* `headers` - This will log the headers in each request (which can be very verbose) +* `id` - This will assign a unique id to each request and log it in the entry for each request. This is assigned as the `request.requestId` property and can be used to by other logging to track a request. +Note that the `level` will determine which HTTP requests are logged: +* `info` (or more verbose) - All HTTP requests +* `warn` - HTTP requests with a status code of 400 or above +* `error` - HTTP requests with a status code of 500 + +For example: +```yaml +http: + logging: + timing: true + level: info + path: ~/hdb/log/http.log + ... rest of http config +``` + +`authentication.logging` + +This section defines log configuration for authentication. This takes the standard logging configuration options of `path` (or `root`), `level`, `tag`, and flag to enable/disable logging to `stdStreams`. + +`mqtt.logging` + +This section defines log configuration for MQTT. This takes the standard logging configuration options of `path` (or `root`), `level`, `tag`, and flag to enable/disable logging to `stdStreams`. + +`replication.logging` + +This section defines log configuration for replication. This takes the standard logging configuration options of `path` (or `root`), `level`, `tag`, and flag to enable/disable logging to `stdStreams`. + +`tls.logging` + +This section defines log configuration for TLS. This takes the standard logging configuration options of `path` (or `root`), `level`, `tag`, and flag to enable/disable logging to `stdStreams`. + +`storage.logging` + +This section defines log configuration for setting up and reading the database files. This takes the standard logging configuration options of `path` (or `root`), `level`, `tag`, and flag to enable/disable logging to `stdStreams`. + +`storage.logging` + +This section defines log configuration for setting up and reading the database files. This takes the standard logging configuration options of `path` (or `root`), `level`, `tag`, and flag to enable/disable logging to `stdStreams`. + +`analytics.logging` + +This section defines log configuration for analytics. This takes the standard logging configuration options of `path` (or `root`), `level`, `tag`, and flag to enable/disable logging to `stdStreams`. + +*** + +### `authentication` + +The authentication section defines the configuration for the default authentication mechanism in Harper. + +```yaml +authentication: + authorizeLocal: true + cacheTTL: 30000 + enableSessions: true + operationTokenTimeout: 1d + refreshTokenTimeout: 30d +``` + +`authorizeLocal` - _Type_: boolean; _Default_: true + +This will automatically authorize any requests from the loopback IP address as the superuser. This should be disabled for any Harper servers that may be accessed by untrusted users from the same instance. For example, this should be disabled if you are using a local proxy, or for general server hardening. + +`cacheTTL` - _Type_: number; _Default_: 30000 + +This defines the length of time (in milliseconds) that an authentication (a particular Authorization header or token) can be cached. + +`enableSessions` - _Type_: boolean; _Default_: true + +This will enable cookie-based sessions to maintain an authenticated session. This is generally the preferred mechanism for maintaining authentication in web browsers as it allows cookies to hold an authentication token securely without giving JavaScript code access to token/credentials that may open up XSS vulnerabilities. + +`operationTokenTimeout` - _Type_: string; _Default_: 1d + +Defines the length of time an operation token will be valid until it expires. Example values: https:/github.com/vercel/ms. + +`refreshTokenTimeout` - _Type_: string; _Default_: 1d + +Defines the length of time a refresh token will be valid until it expires. Example values: https:/github.com/vercel/ms. + +### `operationsApi` + +The `operationsApi` section configures the Harper Operations API.\ +All the `operationsApi` configuration is optional. Any configuration that is not provided under this section will default to the `http` configuration section. + +`network` + +```yaml +operationsApi: + network: + cors: true + corsAccessList: + - null + domainSocket: /user/hdb/operations-server + headersTimeout: 60000 + keepAliveTimeout: 5000 + port: 9925 + securePort: null + timeout: 120000 +``` + +`cors` - _Type_: boolean; _Default_: true + +Enable Cross Origin Resource Sharing, which allows requests across a domain. + +`corsAccessList` - _Type_: array; _Default_: null + +An array of allowable domains with CORS + +`domainSocket` - _Type_: string; _Default_: \/hdb/operations-server + +The path to the Unix domain socket used to provide the Operations API through the CLI + +`headersTimeout` - _Type_: integer; _Default_: 60,000 milliseconds (1 minute) + +Limit the amount of time the parser will wait to receive the complete HTTP headers with. + +`keepAliveTimeout` - _Type_: integer; _Default_: 5,000 milliseconds (5 seconds) + +Sets the number of milliseconds of inactivity the server needs to wait for additional incoming data after it has finished processing the last response. + +`port` - _Type_: integer; _Default_: 9925 + +The port the Harper operations API interface will listen on. + +`securePort` - _Type_: integer; _Default_: null + +The port the Harper operations API uses for HTTPS connections. This requires a valid certificate and key. + +`timeout` - _Type_: integer; _Default_: Defaults to 120,000 milliseconds (2 minutes) + +The length of time in milliseconds after which a request will timeout. + +`tls` + +This configures the Transport Layer Security for HTTPS support. + +```yaml +operationsApi: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`certificate` - _Type_: string; _Default_: \/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: \/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: \/keys/privateKey.pem + +Path to the private key file. + +--- + +### `componentsRoot` + +`componentsRoot` - _Type_: string; _Default_: \/components + +The path to the folder containing the local component files. + +```yaml +componentsRoot: ~/hdb/components +``` + +--- + +### `rootPath` + +`rootPath` - _Type_: string; _Default_: home directory of the current user + +The Harper database and applications/API/interface are decoupled from each other. The `rootPath` directory specifies where the Harper application persists data, config, logs, and Custom Functions. + +```yaml +rootPath: /Users/jonsnow/hdb +``` + +--- + +### `storage` + +`writeAsync` - _Type_: boolean; _Default_: false + +The `writeAsync` option turns off disk flushing/syncing, allowing for faster write operation throughput. However, this does not provide storage integrity guarantees, and if a server crashes, it is possible that there may be data loss requiring restore from another backup/another node. + +```yaml +storage: + writeAsync: false +``` + +`caching` - _Type_: boolean; _Default_: true + +The `caching` option enables in-memory caching of records, providing faster access to frequently accessed objects. This can incur some extra overhead for situations where reads are extremely random and don't benefit from caching. + +```yaml +storage: + caching: true +``` + +`compression` - _Type_: boolean; _Default_: true + +The `compression` option enables compression of records in the database. This can be helpful for very large records in reducing storage requirements and potentially allowing more data to be cached. This uses the very fast LZ4 compression algorithm, but this still incurs extra costs for compressing and decompressing. + +```yaml +storage: + compression: false +``` + +`compression.dictionary` _Type_: number; _Default_: null + +Path to a compression dictionary file + +`compression.threshold` _Type_: number; _Default_: Either `4036` or if `storage.pageSize` provided `storage.pageSize - 60` + +Only entries that are larger than this value (in bytes) will be compressed. + +```yaml +storage: + compression: + dictionary: /users/harperdb/dict.txt + threshold: 1000 +``` + +`compactOnStart` - _Type_: boolean; _Default_: false + +When `true` all non-system databases will be compacted when starting Harper, read more [here](../administration/compact). + +`compactOnStartKeepBackup` - _Type_: boolean; _Default_: false + +Keep the backups made by compactOnStart. + +```yaml +storage: + compactOnStart: true + compactOnStartKeepBackup: false +``` + +`maxTransactionQueueTime` - _Type_: time; _Default_: 45s + +The `maxTransactionQueueTime` specifies how long the write queue can get before write requests are rejected (with a 503). + +```yaml +storage: + maxTransactionQueueTime: 2m +``` + +`noReadAhead` - _Type_: boolean; _Default_: false + +The `noReadAhead` option advises the operating system to not read ahead when reading from the database. This provides better memory utilization for databases with small records (less than one page), but can degrade performance in situations where large records are used or frequent range queries are used. + +```yaml +storage: + noReadAhead: true +``` + +`prefetchWrites` - _Type_: boolean; _Default_: true + +The `prefetchWrites` option loads data prior to write transactions. This should be enabled for databases that are larger than memory (although it can be faster to disable this for smaller databases). + +```yaml +storage: + prefetchWrites: true +``` + +`path` - _Type_: string; _Default_: `/database` + +The `path` configuration sets where all database files should reside. + +```yaml +storage: + path: /users/harperdb/storage +``` + +_**Note:**_ This configuration applies to all database files, which includes system tables that are used internally by Harper. For this reason if you wish to use a non default `path` value you must move any existing schemas into your `path` location. Existing schemas is likely to include the system schema which can be found at `/schema/system`. + +`blobPaths` - _Type_: string; _Default_: `/blobs` + +The `blobPaths` configuration sets where all the blob files should reside. This can be an array of paths, and if there are multiple, the blobs will be distributed across the paths. + +```yaml +storage: + blobPaths: + - /users/harperdb/big-storage +``` + +`pageSize` - _Type_: number; _Default_: Defaults to the default page size of the OS + +Defines the page size of the database. + +```yaml +storage: + pageSize: 4096 +``` + +`reclamation` + +The reclamation section provides configuration for the reclamation process, which is responsible for reclaiming space when free space is low. For example: + +```yaml +storage: + reclamation: + threshold: 0.4 # Start storage reclamation efforts when free space has reached 40% of the volume space (default) + interval: 1h # Reclamation will run every hour (default) + evictionFactor: 100000 # A factor used to determine how much aggressively to evict cached entries (default) +``` + +--- + +### `tls` + +The section defines the certificates, keys, and settings for Transport Layer Security (TLS) for HTTPS and TLS socket support. This is used for both the HTTP and MQTT protocols. The `tls` section can be a single object with the settings below, or it can be an array of objects, where each object is a separate TLS configuration. By using an array, the TLS configuration can be used to define multiple certificates for different domains/hosts (negotiated through SNI). + +```yaml +tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`certificate` - _Type_: string; _Default_: \/keys/certificate.pem + +Path to the certificate file. + +`certificateAuthority` - _Type_: string; _Default_: \/keys/ca.pem + +Path to the certificate authority file. + +`privateKey` - _Type_: string; _Default_: \/keys/privateKey.pem + +Path to the private key file. + +`ciphers` - _Type_: string; + +Allows specific ciphers to be set. + +If you want to define multiple certificates that are applied based on the domain/host requested via SNI, you can define an array of TLS configurations. Each configuration can have the same properties as the root TLS configuration, but can (optionally) also have an additional `host` property to specify the domain/host that the certificate should be used for: + +```yaml +tls: + - certificate: ~/hdb/keys/certificate1.pem + certificateAuthority: ~/hdb/keys/ca1.pem + privateKey: ~/hdb/keys/privateKey1.pem + host: example.com # the host is optional, and if not provided, this certificate's common name will be used as the host name. + - certificate: ~/hdb/keys/certificate2.pem + certificateAuthority: ~/hdb/keys/ca2.pem + privateKey: ~/hdb/keys/privateKey2.pem +``` + +Note that a `tls` section can also be defined in the `operationsApi` section, which will override the root `tls` section for the operations API. + +--- + +### `mqtt` + +The MQTT protocol can be configured in this section. + +```yaml +mqtt: + network: + port: 1883 + securePort: 8883 + mtls: false + webSocket: true + requireAuthentication: true +``` + +`port` - _Type_: number; _Default_: 1883 + +This is the port to use for listening for insecure MQTT connections. + +`securePort` - _Type_: number; _Default_: 8883 + +This is the port to use for listening for secure MQTT connections. This will use the `tls` configuration for certificates. + +`webSocket` - _Type_: boolean; _Default_: true + +This enables access to MQTT through WebSockets. This will handle WebSocket connections on the http port (defaults to 9926), that have specified a (sub) protocol of `mqtt`. + +`requireAuthentication` - _Type_: boolean; _Default_: true + +This indicates if authentication should be required for establishing an MQTT connection (whether through MQTT connection credentials or mTLS). Disabling this allows unauthenticated connections, which are then subject to authorization for publishing and subscribing (and by default tables/resources do not authorize such access, but that can be enabled at the resource level). + +`mlts` - _Type_: boolean | object; _Default_: false + +This can be configured to enable mTLS based authentication for incoming connections. If enabled with default options (by setting to `true`), the client certificate will be checked against the certificate authority specified in the `tls` section. And if the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. + +You can also define specific mTLS options by specifying an object for mtls with the following (optional) properties which may be included: + +`user` - _Type_: string; _Default_: Common Name + +This configures a specific username to authenticate as for mTLS connections. If a `user` is defined, any authorized mTLS connection (that authorizes against the certificate authority) will be authenticated as this user. This can also be set to `null`, which indicates that no authentication is performed based on the mTLS authorization. When combined with `required: true`, this can be used to enforce that users must have authorized mTLS _and_ provide credential-based authentication. + +`required` - _Type_: boolean; _Default_: false + +This can be enabled to require client certificates (mTLS) for all incoming MQTT connections. If enabled, any connection that doesn't provide an authorized certificate will be rejected/closed. By default, this is disabled, and authentication can take place with mTLS _or_ standard credential authentication. + +`certificateAuthority` - _Type_: string; _Default_: Path from `tls.certificateAuthority` + +This can define a specific path to use for the certificate authority. By default, certificate authorization checks against the CA specified at `tls.certificateAuthority`, but if you need a specific/distinct CA for MQTT, you can set this. + +For example, you could specify that mTLS is required and will authenticate as "user-name": + +```yaml +mqtt: + network: + mtls: + user: user-name + required: true +``` + +--- + +### `databases` + +The `databases` section is an optional configuration that can be used to define where database files should reside down to the table level. This configuration should be set before the database and table have been created. The configuration will not create the directories in the path, that must be done by the user. + +To define where a database and all its tables should reside use the name of your database and the `path` parameter. + +```yaml +databases: + nameOfDatabase: + path: /path/to/database +``` + +To define where specific tables within a database should reside use the name of your database, the `tables` parameter, the name of your table and the `path` parameter. + +```yaml +databases: + nameOfDatabase: + tables: + nameOfTable: + path: /path/to/table +``` + +This same pattern can be used to define where the audit log database files should reside. To do this use the `auditPath` parameter. + +```yaml +databases: + nameOfDatabase: + auditPath: /path/to/database +``` + +**Setting the database section through the command line, environment variables or API** + +When using command line variables,environment variables or the API to configure the databases section a slightly different convention from the regular one should be used. To add one or more configurations use a JSON object array. + +Using command line variables: + +```bash +--DATABASES [{\"nameOfSchema\":{\"tables\":{\"nameOfTable\":{\"path\":\"\/path\/to\/table\"}}}}] +``` + +Using environment variables: + +```bash +DATABASES=[{"nameOfSchema":{"tables":{"nameOfTable":{"path":"/path/to/table"}}}}] +``` + +Using the API: + +```json +{ + "operation": "set_configuration", + "databases": [ + { + "nameOfDatabase": { + "tables": { + "nameOfTable": { + "path": "/path/to/table" + } + } + } + } + ] +} +``` + +--- + +### Components + +`` - _Type_: string + +The name of the component. This will be used to name the folder where the component is installed and must be unique. + +`package` - _Type_: string + +A reference to your [component](../technical-details/reference/components/applications#adding-components-to-root) package. This could be a remote git repo, a local folder/file or an NPM package. Harper will add this package to a package.json file and call `npm install` on it, so any reference that works with that paradigm will work here. + +Read more about npm install [here](https:/docs.npmjs.com/cli/v8/commands/npm-install) + +`port` - _Type_: number _Default_: whatever is set in `http.port` + +The port that your component should listen on. If no port is provided it will default to `http.port` + +```yaml +: + package: 'HarperDB-Add-Ons/package-name' + port: 4321 +``` diff --git a/site/versioned_docs/version-4.6/deployments/harper-cli.md b/site/versioned_docs/version-4.6/deployments/harper-cli.md new file mode 100644 index 00000000..1a7d5503 --- /dev/null +++ b/site/versioned_docs/version-4.6/deployments/harper-cli.md @@ -0,0 +1,194 @@ +--- +title: Harper CLI +--- + +# Harper CLI + +## Harper CLI + +The Harper command line interface (CLI) is used to administer [self-installed Harper instances](./install-harper/). + +### Installing Harper + +To install Harper with CLI prompts, run the following command: + +```bash +harperdb install +``` + +Alternatively, Harper installations can be automated with environment variables or command line arguments; [see a full list of configuration parameters here](./configuration#using-the-configuration-file-and-naming-conventions). Note, when used in conjunction, command line arguments will override environment variables. + +**Environment Variables** + +```bash +#minimum required parameters for no additional CLI prompts +export TC_AGREEMENT=yes +export HDB_ADMIN_USERNAME=HDB_ADMIN +export HDB_ADMIN_PASSWORD=password +export ROOTPATH=/tmp/hdb/ +export OPERATIONSAPI_NETWORK_PORT=9925 +harperdb install +``` + +**Command Line Arguments** + +```bash +#minimum required parameters for no additional CLI prompts +harperdb install --TC_AGREEMENT yes --HDB_ADMIN_USERNAME HDB_ADMIN --HDB_ADMIN_PASSWORD password --ROOTPATH /tmp/hdb/ --OPERATIONSAPI_NETWORK_PORT 9925 +``` + +--- + +### Starting Harper + +To start Harper after it is installed, run the following command: + +```bash +harperdb start +``` + +--- + +### Stopping Harper + +To stop Harper once it is running, run the following command: + +```bash +harperdb stop +``` + +--- + +### Restarting Harper + +To restart Harper once it is running, run the following command: + +```bash +harperdb restart +``` + +--- + +### Getting the Harper Version + +To check the version of Harper that is installed run the following command: + +```bash +harperdb version +``` + +--- + +### Renew self-signed certificates + +To renew the Harper generated self-signed certificates, run: + +```bash +harperdb renew-certs +``` + +--- + +### Copy a database with compaction + +To copy a Harper database with compaction (to eliminate free-space and fragmentation), use + +```bash +harperdb copy-db +``` + +For example, to copy the default database: + +```bash +harperdb copy-db data /home/user/hdb/database/copy.mdb +``` + +--- + +### Get all available CLI commands + +To display all available Harper CLI commands along with a brief description run: + +```bash +harperdb help +``` + +--- + +### Get the status of Harper and clustering + +To display the status of the Harper process, the clustering hub and leaf processes, the clustering network and replication statuses, run: + +```bash +harperdb status +``` + +--- + +### Backups + +Harper uses a transactional commit process that ensures that data on disk is always transactionally consistent with storage. This means that Harper maintains database integrity in the event of a crash. It also means that you can use any standard volume snapshot tool to make a backup of a Harper database. Database files are stored in the hdb/database directory. As long as the snapshot is an atomic snapshot of these database files, the data can be copied/moved back into the database directory to restore a previous backup (with Harper shut down) , and database integrity will be preserved. Note that simply copying an in-use database file (using `cp`, for example) is _not_ a snapshot, and this would progressively read data from the database at different points in time, which yields unreliable copy that likely will not be usable. Standard copying is only reliable for a database file that is not in use. + +--- + +## Operations API through the CLI + +Some of the API operations are available through the CLI, this includes most operations that do not require nested parameters. To call the operation use the following convention: ` =`. By default, the result will be formatted as YAML, if you would like the result in JSON pass: `json=true`. + +Some examples are: + +```bash +$ harperdb describe_table database=dev table=dog + +schema: dev +name: dog +hash_attribute: id +audit: true +schema_defined: false +attributes: + - attribute: id + is_primary_key: true + - attribute: name + indexed: true +clustering_stream_name: 3307bb542e0081253klnfd3f1cf551b +record_count: 10 +last_updated_record: 1724483231970.9949 +``` + +`harperdb set_configuration logging_level=error` + +`harperdb deploy_component project=my-cool-app package=https:/github.com/HarperDB/application-template` + +`harperdb get_components` + +`harperdb search_by_id database=dev table=dog ids='["1"]' get_attributes='["*"]' json=true` + +`harperdb search_by_value table=dog search_attribute=name search_value=harper get_attributes='["id", "name"]'` + +`harperdb sql sql='select * from dev.dog where id="1"'` + +### Remote Operations + +The CLI can also be used to run operations on remote Harper instances. To do this, pass the `target` parameter with the HTTP address of the remote instance. You generally will also need to provide credentials and specify the `username` and `password` parameters, or you can set environment variables `CLI_TARGET_USERNAME` and `CLI_TARGET_PASSWORD`, for example: + +```bash +export CLI_TARGET_USERNAME=HDB_ADMIN +export CLI_TARGET_PASSWORD=password +harperdb describe_database database=dev target=https:/server.com:9925 +``` + +The same set of operations API are available for remote operations as well. + +#### Remote Component Deployment + +When using remote operations, you can deploy a local component to the remote instance. If you omit the `package` parameter, you can deploy the current directory. This will package the current directory and send it to the target server (also `deploy` is allowed as an alias to `deploy_component`): + +```bash +harperdb deploy target=https:/server.com:9925 +``` + +If you are interacting with a cluster, you may wish to include the `replicated=true` parameter to ensure that the deployment operation is replicated to all nodes in the cluster. You will also need to restart afterwards to apply the changes (here seen with the replicated parameter): + +```bash +harperdb restart target=https:/server.com:9925 replicated=true +``` diff --git a/site/versioned_docs/version-4.6/deployments/harper-cloud/alarms.md b/site/versioned_docs/version-4.6/deployments/harper-cloud/alarms.md new file mode 100644 index 00000000..8b695c37 --- /dev/null +++ b/site/versioned_docs/version-4.6/deployments/harper-cloud/alarms.md @@ -0,0 +1,20 @@ +--- +title: Alarms +--- + +# Alarms + +Harper Cloud instance alarms are triggered when certain conditions are met. Once alarms are triggered organization owners will immediately receive an email alert and the alert will be available on the [Instance Configuration](../../administration/harper-studio/instance-configuration) page. The below table describes each alert and their evaluation metrics. + +### Heading Definitions + +- **Alarm**: Title of the alarm. +- **Threshold**: Definition of the alarm threshold. +- **Intervals**: The number of occurrences before an alarm is triggered and the period that the metric is evaluated over. +- **Proposed Remedy**: Recommended solution to avoid the alert in the future. + +| Alarm | Threshold | Intervals | Proposed Remedy | +| ------- | ---------- | --------- | ------------------------------------------------------------------------------------------------------------------------------ | +| Storage | > 90% Disk | 1 x 5min | [Increased storage volume](../../administration/harper-studio/instance-configuration#update-instance-storage) | +| CPU | > 90% Avg | 2 x 5min | [Increase instance size for additional CPUs](../../administration/harper-studio/instance-configuration#update-instance-ram) | +| Memory | > 90% RAM | 2 x 5min | [Increase instance size](../../administration/harper-studio/instance-configuration#update-instance-ram) | diff --git a/site/versioned_docs/version-4.6/deployments/harper-cloud/index.md b/site/versioned_docs/version-4.6/deployments/harper-cloud/index.md new file mode 100644 index 00000000..fbf2d81e --- /dev/null +++ b/site/versioned_docs/version-4.6/deployments/harper-cloud/index.md @@ -0,0 +1,9 @@ +--- +title: Harper Cloud +--- + +# Harper Cloud + +[Harper Cloud](https:/studio.harperdb.io/) is the easiest way to test drive Harper, it’s Harper-as-a-Service. Cloud handles deployment and management of your instances in just a few clicks. Harper Cloud is currently powered by AWS with additional cloud providers on our roadmap for the future. + +You can create a new Harper Cloud instance in the Harper Studio. diff --git a/site/versioned_docs/version-4.6/deployments/harper-cloud/instance-size-hardware-specs.md b/site/versioned_docs/version-4.6/deployments/harper-cloud/instance-size-hardware-specs.md new file mode 100644 index 00000000..72979d8d --- /dev/null +++ b/site/versioned_docs/version-4.6/deployments/harper-cloud/instance-size-hardware-specs.md @@ -0,0 +1,23 @@ +--- +title: Instance Size Hardware Specs +--- + +# Instance Size Hardware Specs + +While Harper Cloud bills by RAM, each instance has other specifications associated with the RAM selection. The following table describes each instance size in detail\*. + +| AWS EC2 Instance Size | RAM (GiB) | # vCPUs | Network (Gbps) | Processor | +| --------------------- | --------- | ------- | -------------- | -------------------------------------- | +| t3.micro | 1 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| t3.small | 2 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| t3.medium | 4 | 2 | Up to 5 | 2.5 GHz Intel Xeon Platinum 8000 | +| m5.large | 8 | 2 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.xlarge | 16 | 4 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.2xlarge | 32 | 8 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.4xlarge | 64 | 16 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.8xlarge | 128 | 32 | 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.12xlarge | 192 | 48 | 10 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.16xlarge | 256 | 64 | 20 | Up to 3.1 GHz Intel Xeon Platinum 8000 | +| m5.24xlarge | 384 | 96 | 25 | Up to 3.1 GHz Intel Xeon Platinum 8000 | + +\*Specifications are subject to change. For the most up to date information, please refer to AWS documentation: [https:/aws.amazon.com/ec2/instance-types/](https:/aws.amazon.com/ec2/instance-types/). diff --git a/site/versioned_docs/version-4.6/deployments/harper-cloud/iops-impact.md b/site/versioned_docs/version-4.6/deployments/harper-cloud/iops-impact.md new file mode 100644 index 00000000..7c2390df --- /dev/null +++ b/site/versioned_docs/version-4.6/deployments/harper-cloud/iops-impact.md @@ -0,0 +1,45 @@ +--- +title: IOPS Impact on Performance +--- + +# IOPS Impact on Performance + +Harper, like any database, can place a tremendous load on its storage resources. Storage, not CPU or memory, will more often be the bottleneck of server, virtual machine, or a container running Harper. Understanding how storage works, and how much storage performance your workload requires, is key to ensuring that Harper performs as expected. + +## IOPS Overview + +The primary measure of storage performance is the number of input/output operations per second (IOPS) that a storage device can perform. Different storage devices can have dramatically different performance profiles. A hard drive (HDD) might only perform a hundred or so IOPS, while a solid state drive (SSD) might be able to perform tens or hundreds of thousands of IOPS. + +Cloud providers like AWS, which powers Harper Cloud, don’t typically attach individual disks to a virtual machine or container. Instead, they combine large numbers of storage drives to create very high performance storage servers. Chunks (volumes) of that storage are then carved out and presented to many different virtual machines and containers. Due to the shared nature of this type of storage, the cloud provider places configurable limits on the number of IOPS that a volume can perform. The same way that cloud providers charge more for larger capacity volumes, they also charge more for volumes with more IOPS. + +## Harper Cloud Storage + +Harper Cloud utilizes AWS Elastic Block Storage (EBS) General Purpose SSD (gp3) volumes. This is the most common storage type used in AWS, as it provides reasonable performance for most workloads, at a reasonable price. + +AWS EBS gp3 volumes have a baseline performance level of 3,000 IOPS, as a result, all Harper Cloud storage options will offer 3,000 IOPS. We plan to offer scalable IOPS as an option in the future. + +You can read more about AWS EBS volume IOPS here: https:/docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html. + +## Estimating IOPS for Harper Instance + +The number of IOPS required for a particular workload is influenced by many factors. Testing your particular application is the best way to determine the number of IOPS required. A reliable method is to estimate about two IOPS for every index, including the primary key itself. So if a table has two indices besides primary key, estimate that an insert or update will require about six IOPS. Note that that can often be closer to one IOPS per index under load due to internal batching of writes, and sometimes even better when doing sequential inserts. Again it is best to test to verify this with application specific data and write patterns. + +For assistance in estimating IOPS requirements feel free to contact Harper Support or join our Community Slack Channel. + +## Example Use Case IOPS Requirements + +- **Sensor Data Collection** + + In the case of IoT sensors where data collection will be sustained, high IOPS are required. While there are not typically large queries going on in this case, there is a high volume of data being ingested. This implies that IOPS will be sustained at a high level. For example, if you are collecting 100 records per second you would expect to need roughly 3,000 IOPS just to handle the data inserts. + +- **Data Analytics/BI Server** + + Providing a server for analytics purposes typically requires a larger machine. Typically these cases involve large scale SQL joins and aggregations, which puts a large strain on reads. Harper utilizes an in-memory cache, which provides a significant performance boost on machines with large amounts of memory. However, if disparate datasets are constantly being queried and/or new data is frequently being loaded, you will find that the system still needs to have high IOPS to meet performance demand. + +- **Web Services** + + Typical web service implementations with discrete reads and writes often do not need high IOPS to perform as expected. This is often the case in more transactional systems without the requirement for high performance load. A good rule to follow is that any Harper operation that requires a data scan will be IOPS intensive, but if these are not frequent then the EBS boost will suffice. Queries utilizing equals operations in either SQL or NoSQL do not require a scan due to Harper’s native indexing. + +- **High Performance Database** + + Ultimately, if performance is your top priority, Harper should be run on bare metal hardware. Cloud providers offer these options at a higher cost, but they come with obvious performance improvements. diff --git a/site/versioned_docs/version-4.6/deployments/harper-cloud/verizon-5g-wavelength-instances.md b/site/versioned_docs/version-4.6/deployments/harper-cloud/verizon-5g-wavelength-instances.md new file mode 100644 index 00000000..9de7cfa9 --- /dev/null +++ b/site/versioned_docs/version-4.6/deployments/harper-cloud/verizon-5g-wavelength-instances.md @@ -0,0 +1,31 @@ +--- +title: Verizon 5G Wavelength +--- + +# Verizon 5G Wavelength + +These instances are only accessible from the Verizon network. When accessing your Harper instance please ensure you are connected to the Verizon network, examples include Verizon 5G Internet, Verizon Hotspots, or Verizon mobile devices. + +Harper on Verizon 5G Wavelength brings Harper closer to the end user exclusively on the Verizon network resulting in as little as single-digit millisecond response time from Harper to the client. + +Instances are built via AWS Wavelength. You can read more about [AWS Wavelength here](https:/aws.amazon.com/wavelength/). + +Harper 5G Wavelength Instance Specs While Harper 5G Wavelength bills by RAM, each instance has other specifications associated with the RAM selection. The following table describes each instance size in detail\*. + +| AWS EC2 Instance Size | RAM (GiB) | # vCPUs | Network (Gbps) | Processor | +| --------------------- | --------- | ------- | -------------- | ------------------------------------------- | +| t3.medium | 4 | 2 | Up to 5 | Up to 3.1 GHz Intel Xeon Platinum Processor | +| t3.xlarge | 16 | 4 | Up to 5 | Up to 3.1 GHz Intel Xeon Platinum Processor | +| r5.2xlarge | 64 | 8 | Up to 10 | Up to 3.1 GHz Intel Xeon Platinum Processor | + +\*Specifications are subject to change. For the most up to date information, please refer to [AWS documentation](https:/aws.amazon.com/ec2/instance-types/). + +## Harper 5G Wavelength Storage + +Harper 5G Wavelength utilizes AWS Elastic Block Storage (EBS) General Purpose SSD (gp2) volumes. This is the most common storage type used in AWS, as it provides reasonable performance for most workloads, at a reasonable price. + +AWS EBS gp2 volumes have a baseline performance level, which determines the number of IOPS it can perform indefinitely. The larger the volume, the higher its baseline performance. Additionally, smaller gp2 volumes are able to burst to a higher number of IOPS for periods of time. + +Smaller gp2 volumes are perfect for trying out the functionality of Harper, and might also work well for applications that don’t perform many database transactions. For applications that perform a moderate or high number of transactions, we recommend that you use a larger Harper volume. Learn more about the [impact of IOPS on performance here](./iops-impact). + +You can read more about [AWS EBS gp2 volume IOPS here](https:/docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html#ebsvolumetypes_gp2). diff --git a/site/versioned_docs/version-4.6/deployments/install-harper/index.md b/site/versioned_docs/version-4.6/deployments/install-harper/index.md new file mode 100644 index 00000000..360f6758 --- /dev/null +++ b/site/versioned_docs/version-4.6/deployments/install-harper/index.md @@ -0,0 +1,61 @@ +--- +title: Install Harper +--- + +# Install Harper + +## Install Harper + +This documentation contains information for installing Harper locally. Note that if you’d like to get up and running quickly, you can try a [managed instance with Harper Cloud](https:/studio.harperdb.io/sign-up). Harper is a cross-platform database; we recommend Linux for production use, but Harper can run on Windows and Mac as well, for development purposes. Installation is usually very simple and just takes a few steps, but there are a few different options documented here. + +Harper runs on Node.js, so if you do not have it installed, you need to do that first (if you have installed, you can skip to installing Harper, itself). Node.js can be downloaded and installed from [their site](https:/nodejs.org/). For Linux and Mac, we recommend installing and managing Node versions with [NVM, which has instructions for installation](https:/github.com/nvm-sh/nvm). Generally NVM can be installed with the following command: + +```bash +curl -o- https:/raw.githubusercontent.com/nvm-sh/nvm/v0.39.5/install.sh | bash +``` + +And then logout and login, and then install Node.js using nvm. We recommend using LTS, but support all currently maintained Node versions (which is currently version 14 and newer, and make sure to always uses latest minor/patch for the major version): + +```bash +nvm install --lts +``` + +#### `Install and Start Harper ` + +Then you can install Harper with NPM and start it: + +```bash +npm install -g harperdb +harperdb +``` + +Harper will automatically start after installation. Harper's installation can be configured with numerous options via CLI arguments, for more information visit the [Harper Command Line Interface](../harper-cli) guide. + +If you are setting up a production server on Linux, [we have much more extensive documentation on how to configure volumes for database storage, set up a systemd script, and configure your operating system to use as a database server in our linux installation guide](./linux). + +## With Docker + +If you would like to run Harper in Docker, install [Docker Desktop](https:/docs.docker.com/desktop/) on your Mac or Windows computer. Otherwise, install the [Docker Engine](https:/docs.docker.com/engine/install/) on your Linux server. + +Once Docker Desktop or Docker Engine is installed, visit our [Docker Hub page](https:/hub.docker.com/r/harperdb/harperdb) for information and examples on how to run a Harper container. + +## Offline Install + +If you need to install Harper on a device that doesn't have an Internet connection, you can choose your version and download the npm package and install it directly (you’ll still need Node.js and NPM): + +[Download Install Package](https:/products-harperdb-io.s3.us-east-2.amazonaws.com/index.html) + +Once you’ve downloaded the .tgz file, run the following command from the directory where you’ve placed it: + +```bash +npm install -g harperdb-X.X.X.tgz harperdb install +``` + +## Installation on Less Common Platforms + +Harper comes with binaries for standard AMD64/x64 or ARM64 CPU architectures on Linux, Windows (x64 only), and Mac (including Apple Silicon). However, if you are installing on a less common platform (Alpine, for example), you will need to ensure that you have build tools installed for the installation process to compile the binaries (this is handled automatically), including: + +- [Go](https:/go.dev/dl/): version 1.19.1 +- GCC +- Make +- Python v3.7, v3.8, v3.9, or v3.10 diff --git a/site/versioned_docs/version-4.6/deployments/install-harper/linux.md b/site/versioned_docs/version-4.6/deployments/install-harper/linux.md new file mode 100644 index 00000000..15da9a7b --- /dev/null +++ b/site/versioned_docs/version-4.6/deployments/install-harper/linux.md @@ -0,0 +1,225 @@ +--- +title: On Linux +--- + +# On Linux + +If you wish to install locally or already have a configured server, see the basic [Installation Guide](./) + +The following is a recommended way to configure Linux and install Harper. These instructions should work reasonably well for any public cloud or on-premises Linux instance. + +--- + +These instructions assume that the following has already been completed: + +1. Linux is installed +1. Basic networking is configured +1. A non-root user account dedicated to Harper with sudo privileges exists +1. An additional volume for storing Harper files is attached to the Linux instance +1. Traffic to ports 9925 (Harper Operations API) 9926 (Harper Application Interface) and 9932 (Harper Clustering) is permitted + +While you will need to access Harper through port 9925 for the administration through the operations API, and port 9932 for clustering, for higher level of security, you may want to consider keeping both of these ports restricted to a VPN or VPC, and only have the application interface (9926 by default) exposed to the public Internet. + +For this example, we will use an AWS Ubuntu Server 22.04 LTS m5.large EC2 Instance with an additional General Purpose SSD EBS volume and the default “ubuntu” user account. + +--- + +### (Optional) LVM Configuration + +Logical Volume Manager (LVM) can be used to stripe multiple disks together to form a single logical volume. If striping disks together is not a requirement, skip these steps. + +Find disk that already has a partition + +```bash +used_disk=$(lsblk -P -I 259 | grep "nvme.n1.*part" | grep -o "nvme.n1") +``` + +Create array of free disks + +```bash +declare -a free_disks +mapfile -t free_disks < <(lsblk -P -I 259 | grep "nvme.n1.*disk" | grep -o "nvme.n1" | grep -v "$used_disk") +``` + +Get quantity of free disks + +```bash +free_disks_qty=${#free_disks[@]} +``` + +Construct pvcreate command + +```bash +cmd_string="" +for i in "${free_disks[@]}" +do +cmd_string="$cmd_string /dev/$i" +done +``` + +Initialize disks for use by LVM + +```bash +pvcreate_cmd="pvcreate $cmd_string" +sudo $pvcreate_cmd +``` + +Create volume group + +```bash +vgcreate_cmd="vgcreate hdb_vg $cmd_string" +sudo $vgcreate_cmd +``` + +Create logical volume + +```bash +sudo lvcreate -n hdb_lv -i $free_disks_qty -l 100%FREE hdb_vg +``` + +### Configure Data Volume + +Run `lsblk` and note the device name of the additional volume + +```bash +lsblk +``` + +Create an ext4 filesystem on the volume (The below commands assume the device name is nvme1n1. If you used LVM to create logical volume, replace /dev/nvme1n1 with /dev/hdb_vg/hdb_lv) + +```bash +sudo mkfs.ext4 -L hdb_data /dev/nvme1n1 +``` + +Mount the file system and set the correct permissions for the directory + +```bash +mkdir /home/ubuntu/hdb +sudo mount -t ext4 /dev/nvme1n1 /home/ubuntu/hdb +sudo chown -R ubuntu:ubuntu /home/ubuntu/hdb +sudo chmod 775 /home/ubuntu/hdb +``` + +Create a fstab entry to mount the filesystem on boot + +```bash +echo "LABEL=hdb_data /home/ubuntu/hdb ext4 defaults,noatime 0 1" | sudo tee -a /etc/fstab +``` + +### Configure Linux and Install Prerequisites + +If a swap file or partition does not already exist, create and enable a 2GB swap file + +```bash +sudo dd if=/dev/zero of=/swapfile bs=128M count=16 +sudo chmod 600 /swapfile +sudo mkswap /swapfile +sudo swapon /swapfile +echo "/swapfile swap swap defaults 0 0" | sudo tee -a /etc/fstab +``` + +Increase the open file limits for the ubuntu user + +```bash +echo "ubuntu soft nofile 500000" | sudo tee -a /etc/security/limits.conf +echo "ubuntu hard nofile 1000000" | sudo tee -a /etc/security/limits.conf +``` + +Install Node Version Manager (nvm) + +```bash +curl -o- https:/raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh | bash +``` + +Load nvm (or logout and then login) + +```bash +. ~/.nvm/nvm.sh +``` + +Install Node.js using nvm ([read more about specific Node version requirements](https:/www.npmjs.com/package/harperdb#prerequisites)) + +```bash +nvm install +``` + +### `Install and Start Harper ` + +Here is an example of installing Harper with minimal configuration. + +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --ROOTPATH "/home/ubuntu/hdb" \ + --OPERATIONSAPI_NETWORK_PORT "9925" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" +``` + +Here is an example of installing Harper with commonly used additional configuration. + +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --ROOTPATH "/home/ubuntu/hdb" \ + --OPERATIONSAPI_NETWORK_PORT "9925" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" \ + --HTTP_SECUREPORT "9926" \ + --CLUSTERING_ENABLED "true" \ + --CLUSTERING_USER "cluster_user" \ + --CLUSTERING_PASSWORD "password" \ + --CLUSTERING_NODENAME "hdb1" +``` + +You can also use a custom configuration file to set values on install, use the CLI/ENV variable `HDB_CONFIG` and set it to the path of your [custom configuration file](../configuration): + +```bash +npm install -g harperdb +harperdb start \ + --TC_AGREEMENT "yes" \ + --HDB_ADMIN_USERNAME "HDB_ADMIN" \ + --HDB_ADMIN_PASSWORD "password" \ + --HDB_CONFIG "/path/to/your/custom/harperdb-config.yaml" +``` + +#### Start Harper on Boot + +Harper will automatically start after installation. If you wish Harper to start when the OS boots, you have two options: + +You can set up a crontab: + +```bash +(crontab -l 2>/dev/null; echo "@reboot PATH=\"/home/ubuntu/.nvm/versions/node/v18.15.0/bin:$PATH\" && harperdb start") | crontab - +``` + +Or you can create a systemd script at `/etc/systemd/system/harperdb.service` + +Pasting the following contents into the file: + +``` +[Unit] +Description=Harper + +[Service] +Type=simple +Restart=always +User=ubuntu +Group=ubuntu +WorkingDirectory=/home/ubuntu +ExecStart=/bin/bash -c 'PATH="/home/ubuntu/.nvm/versions/node/v18.15.0/bin:$PATH"; harperdb' + +[Install] +WantedBy=multi-user.target +``` + +And then running the following: + +``` +systemctl daemon-reload +systemctl enable harperdb +``` + +For more information visit the [Harper Command Line Interface guide](../harper-cli) and the [Harper Configuration File guide](../configuration). diff --git a/site/versioned_docs/version-4.6/deployments/upgrade-hdb-instance.md b/site/versioned_docs/version-4.6/deployments/upgrade-hdb-instance.md new file mode 100644 index 00000000..eda8cb37 --- /dev/null +++ b/site/versioned_docs/version-4.6/deployments/upgrade-hdb-instance.md @@ -0,0 +1,140 @@ +--- +title: Upgrade a Harper Instance +--- + +# Upgrade a Harper Instance + +This document describes best practices for upgrading self-hosted Harper instances. Harper can be upgraded using a combination of npm and built-in Harper upgrade scripts. Whenever upgrading your Harper installation it is recommended you make a backup of your data first. Note: This document applies to self-hosted Harper instances only. All [Harper Cloud instances](./harper-cloud/) will be upgraded by the Harper Cloud team. + +## Upgrading + +Upgrading Harper is a two-step process. First the latest version of Harper must be downloaded from npm, then the Harper upgrade scripts will be utilized to ensure the newest features are available on the system. + +1. Install the latest version of Harper using `npm install -g harperdb`. + + Note `-g` should only be used if you installed Harper globally (which is recommended). + +1. Run `harperdb` to initiate the upgrade process. + + Harper will then prompt you for all appropriate inputs and then run the upgrade directives. + +## Node Version Manager (nvm) + +[Node Version Manager (nvm)](http:/nvm.sh/) is an easy way to install, remove, and switch between different versions of Node.js as required by various applications. More information, including directions on installing nvm can be found here: https:/nvm.sh/. + +Harper supports Node.js versions 14.0.0 and higher, however, **please check our** [**NPM page**](https:/www.npmjs.com/package/harperdb) **for our recommended Node.js version.** To install a different version of Node.js with nvm, run the command: + +```bash +nvm install +``` + +To switch to a version of Node run: + +```bash +nvm use +``` + +To see the current running version of Node run: + +```bash +node --version +``` + +With a handful of different versions of Node.js installed, run nvm with the `ls` argument to list out all installed versions: + +```bash +nvm ls +``` + +When upgrading Harper, we recommend also upgrading your Node version. Here we assume you're running on an older version of Node; the execution may look like this: + +Switch to the older version of Node that Harper is running on (if it is not the current version): + +```bash +nvm use 14.19.0 +``` + +Make sure Harper is not running: + +```bash +harperdb stop +``` + +Uninstall Harper. Note, this step is not required, but will clean up old artifacts of Harper. We recommend removing all other Harper installations to ensure the most recent version is always running. + +```bash +npm uninstall -g harperdb +``` + +Switch to the newer version of Node: + +```bash +nvm use +``` + +Install Harper globally + +```bash +npm install -g harperdb +``` + +Run the upgrade script + +```bash +harperdb +``` + +Start Harper + +```bash +harperdb start +``` + +--- + +## Upgrading Nats to Plexus 4.4 + +To upgrade from NATS clustering to Plexus replication, follow these manual steps. They are designed for a fully replicating cluster to ensure minimal disruption during the upgrade process. + +The core of this upgrade is the _bridge node_. This node will run both NATS and Plexus simultaneously, ensuring that transactions are relayed between the two systems during the transition. The bridge node is crucial in preventing any replication downtime, as it will handle transactions from NATS nodes to Plexus nodes and vice versa. + +### Enabling Plexus + +To enable Plexus on a node that is already running NATS, you will need to update [two values](./configuration) in the `harperdb-config.yaml` file: + +```yaml +replication: + url: wss:/my-cluster-node-1:9925 + hostname: node-1 +``` + +`replication.url` – This should be set to the URL of the current Harper instance. + +`replication.hostname` – Since we are upgrading from NATS, this value should match the `clustering.nodeName` of the current instance. + +### Upgrade Steps + +1. Set up the bridge node: + - Choose one node to be the bridge node. + - On this node, follow the "Enabling Plexus" steps from the previous section, but **do not disable NATS clustering on this instance.** + - Stop the instance and perform the upgrade. + - Start the instance. This node should now be running both Plexus and NATS. +1. Upgrade a node: + - Choose a node that needs upgrading and enable Plexus by following the "Enable Plexus" steps. + - Disable NATS by setting `clustering.enabled` to `false`. + - Stop the instance and upgrade it. + - Start the instance. + - Call [`add_node`](../developers/operations-api/clustering#add-node) on the upgraded instance. In this call, omit `subscriptions` so that a fully replicating cluster is built. The target node for this call should be the bridge node. _Note: depending on your setup, you may need to expand this `add_node` call to include_ [_authorization and/or tls information_](../developers/operations-api/clustering#add-node)_._ + +```json +{ + "operation": "add_node", + "hostname:": "node-1", + "url": "wss:/my-cluster-node-1:9925" +} +``` + +1. Repeat Step 2 on all remaining nodes that need to be upgraded. +1. Disable NATS on the bridge node by setting `clustering.enabled` to `false` and restart the instance. + +Your cluster upgrade should now be complete, with no NATS processes running on any of the nodes. diff --git a/site/versioned_docs/version-4.6/developers/_category_.json b/site/versioned_docs/version-4.6/developers/_category_.json new file mode 100644 index 00000000..9fe399bf --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Developers", + "position": 1, + "link": { + "type": "generated-index", + "title": "Developers Documentation", + "description": "Comprehensive guides and references for building applications with HarperDB", + "keywords": [ + "developers" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.6/developers/applications/caching.md b/site/versioned_docs/version-4.6/developers/applications/caching.md new file mode 100644 index 00000000..62fecce4 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/applications/caching.md @@ -0,0 +1,292 @@ +--- +title: Caching +--- + +# Caching + +Harper has integrated support for caching data from external sources. With built-in caching capabilities and distributed high-performance low-latency responsiveness, Harper makes an ideal data caching server. Harper can store cached data in standard tables, as queryable structured data, so data can easily be consumed in one format (for example JSON or CSV) and provided to end users in different formats with different selected properties (for example MessagePack, with a subset of selected properties), or even with customized querying capabilities. Harper also manages and provides timestamps/tags for proper caching control, facilitating further downstreaming caching. With these combined capabilities, Harper is an extremely fast, interoperable, flexible, and customizable caching server. + +## Configuring Caching + +To set up caching, first you will need to define a table that you will use as your cache (to store the cached data). You can review the [introduction to building applications](./) for more information on setting up the application (and the [defining schemas documentation](./defining-schemas)), but once you have defined an application folder with a schema, you can add a table for caching to your `schema.graphql`: + +```graphql +type MyCache @table(expiration: 3600) @export { + id: ID @primaryKey +} +``` + +You may also note that we can define a time-to-live (TTL) expiration on the table, indicating when table records/entries should expire and be evicted from this table. This is generally necessary for "passive" caches where there is no active notification of when entries expire. However, this is not needed if you provide a means of notifying when data is invalidated and changed. The units for expiration, and other duration-based properties, are in seconds. + +While you can provide a single expiration time, there are actually several expiration timings that are potentially relevant, and can be independently configured. These settings are available as directive properties on the table configuration (like `expiration` above): stale expiration: The point when a request for a record should trigger a request to origin (but might possibly return the current stale record depending on policy) must-revalidate expiration: The point when a request for a record must make a request to origin first and return the latest value from origin. eviction expiration: The point when a record is actually removed from the caching table. + +You can provide a single expiration and it defines the behavior for all three. You can also provide three settings for expiration, through table directives: + +- expiration - The amount of time until a record goes stale. +- eviction - The amount of time after expiration before a record can be evicted (defaults to zero). +- scanInterval - The interval for scanning for expired records (defaults to one quarter of the total of expiration and eviction). + +## Define External Data Source + +Next, you need to define the source for your cache. External data sources could be HTTP APIs, other databases, microservices, or any other source of data. This can be defined as a resource class in your application's `resources.js` module. You can extend the `Resource` class (which is available as a global variable in the Harper environment) as your base class. The first method to implement is a `get()` method to define how to retrieve the source data. For example, if we were caching an external HTTP API, we might define it as such: + +```javascript +class ThirdPartyAPI extends Resource { + async get() { + return (await fetch(`http:/some-api.com/${this.getId()}`)).json(); + } +} +``` + +Next, we define this external data resource as the "source" for the caching table we defined above: + +```javascript +const { MyCache } = tables; +MyCache.sourcedFrom(ThirdPartyAPI); +``` + +Now we have a fully configured and connected caching table. If you access data from `MyCache` (for example, through the REST API, like `/MyCache/some-id`), Harper will check to see if the requested entry is in the table and return it if it is available (and hasn't expired). If there is no entry, or it has expired (it is older than one hour in this case), it will go to the source, calling the `get()` method, which will then retrieve the requested entry. Once the entry is retrieved, it will be saved/cached in the caching table (for one hour based on our expiration time). + +```mermaid +flowchart TD + Client1(Client 1)-->Cache(Caching Table) + Client2(Client 2)-->Cache + Cache-->Resource(Data Source Connector) + Resource-->API(Remote Data Source API) +``` + +Harper handles waiting for an existing cache resolution to finish and uses its result. This prevents a "cache stampede" when entries expire, ensuring that multiple requests to a cache entry will all wait on a single request to the data source. + +Cache tables with an expiration are periodically pruned for expired entries. Because this is done periodically, there is usually some amount of time between when a record has expired and when the record is actually evicted (the cached data is removed). But when a record is checked for availability, the expiration time is used to determine if the record is fresh (and the cache entry can be used). + +### Eviction with Indexing + +Eviction is the removal of a locally cached copy of data, but it does not imply the deletion of the actual data from the canonical or origin data source. Because evicted records still exist (just not in the local cache), if a caching table uses expiration (and eviction), and has indexing on certain attributes, the data is not removed from the indexes. The indexes that reference the evicted record are preserved, along with the attribute data necessary to maintain these indexes. Therefore eviction means the removal of non-indexed data (in this case evictions are stored as "partial" records). Eviction only removes the data that can be safely removed from a cache without affecting the integrity or behavior of the indexes. If a search query is performed that matches this evicted record, the record will be requested on-demand to fulfill the search query. + +### Specifying a Timestamp + +In the example above, we simply retrieved data to fulfill a cache request. We may want to supply the timestamp of the record we are fulfilling as well. This can be set on the context for the request: + +```javascript +class ThirdPartyAPI extends Resource { + async get() { + let response = await fetch(`http:/some-api.com/${this.getId()}`); + this.getContext().lastModified = response.headers.get('Last-Modified'); + return response.json(); + } +} +``` + +#### Specifying an Expiration + +In addition, we can also specify when a cached record "expires". When a cached record expires, this means that a request for that record will trigger a request to the data source again. This does not necessarily mean that the cached record has been evicted (removed), although expired records will be periodically evicted. If the cached record still exists, the data source can revalidate it and return it. For example: + +```javascript +class ThirdPartyAPI extends Resource { + async get() { + const context = this.getContext(); + let headers = new Headers(); + if (context.replacingVersion) / this is the existing cached record + headers.set('If-Modified-Since', new Date(context.replacingVersion).toUTCString()); + let response = await fetch(`http:/some-api.com/${this.getId()}`, { headers }); + let cacheInfo = response.headers.get('Cache-Control'); + let maxAge = cacheInfo?.match(/max-age=(\d)/)?.[1]; + if (maxAge) / we can set a specific expiration time by setting context.expiresAt + context.expiresAt = Date.now() + maxAge * 1000; / convert from seconds to milliseconds and add to current time + / we can just revalidate and return the record if the origin has confirmed that it has the same version: + if (response.status === 304) return context.replacingRecord; + ... +``` + +## Active Caching and Invalidation + +The cache we have created above is a "passive" cache; it only pulls data from the data source as needed, and has no knowledge of if and when data from the data source has actually changed, so it must rely on timer-based expiration to periodically retrieve possibly updated data. This means that it is possible that the cache may have stale data for a while (if the underlying data has changed, but the cached data hasn't expired), and the cache may have to refresh more than necessary if the data source data hasn't changed. Consequently it can be significantly more effective to implement an "active" cache, in which the data source is monitored and notifies the cache when any data changes. This ensures that when data changes, the cache can immediately load the updated data, and unchanged data can remain cached much longer (or indefinitely). + +### Invalidate + +One way to provide more active caching is to specifically invalidate individual records. Invalidation is useful when you know the source data has changed, and the cache needs to re-retrieve data from the source the next time that record is accessed. This can be done by executing the `invalidate()` method on a resource. For example, you could extend a table (in your resources.js) and provide a custom POST handler that does invalidation: + +```javascript +const { MyTable } = tables; +export class MyTableEndpoint extends MyTable { + async post(data) { + if (data.invalidate) + / use this flag as a marker + this.invalidate(); + } +} +``` + +(Note that if you are now exporting this endpoint through resources.js, you don't necessarily need to directly export the table separately in your schema.graphql). + +### Subscriptions + +We can provide more control of an active cache with subscriptions. If there is a way to receive notifications from the external data source of data changes, we can implement this data source as an "active" data source for our cache by implementing a `subscribe` method. A `subscribe` method should return an asynchronous iterable that iterates and returns events indicating the updates. One straightforward way of creating an asynchronous iterable is by defining the `subscribe` method as an asynchronous generator. If we had an endpoint that we could poll for changes every second, we could implement this like: + +```javascript +class ThirdPartyAPI extends Resource { + async *subscribe() { + setInterval(() => { / every second retrieve more data + / get the next data change event from the source + let update = (await fetch(`http:/some-api.com/latest-update`)).json(); + const event = { / define the change event (which will update the cache) + type: 'put', / this would indicate that the event includes the new data value + id: / the primary key of the record that updated + value: / the new value of the record that updated + timestamp: / the timestamp of when the data change occurred + }; + yield event; / this returns this event, notifying the cache of the change + }, 1000); + } + async get() { +... +``` + +Notification events should always include an `id` property to indicate the primary key of the updated record. The event should have a `value` property for `put` and `message` event types. The `timestamp` is optional and can be used to indicate the exact timestamp of the change. The following event `type`s are supported: + +- `put` - This indicates that the record has been updated and provides the new value of the record. +- `invalidate` - Alternately, you can notify with an event type of `invalidate` to indicate that the data has changed, but without the overhead of actually sending the data (the `value` property is not needed), so the data only needs to be sent if and when the data is requested through the cache. An `invalidate` will evict the entry and update the timestamp to indicate that there is new data that should be requested (if needed). +- `delete` - This indicates that the record has been deleted. +- `message` - This indicates a message is being passed through the record. The record value has not changed, but this is used for [publish/subscribe messaging](../real-time). +- `transaction` - This indicates that there are multiple writes that should be treated as a single atomic transaction. These writes should be included as an array of data notification events in the `writes` property. + +And the following properties can be defined on event objects: + +- `type`: The event type as described above. +- `id`: The primary key of the record that updated +- `value`: The new value of the record that updated (for put and message) +- `writes`: An array of event properties that are part of a transaction (used in conjunction with the transaction event type). +- `table`: The name of the table with the record that was updated. This can be used with events within a transaction to specify events across multiple tables. +- `timestamp`: The timestamp of when the data change occurred + +With an active external data source with a `subscribe` method, the data source will proactively notify the cache, ensuring a fresh and efficient active cache. Note that with an active data source, we still use the `sourcedFrom` method to register the source for a caching table, and the table will automatically detect and call the subscribe method on the data source. + +By default, Harper will only run the subscribe method on one thread. Harper is multi-threaded and normally runs many concurrent worker threads, but typically running a subscription on multiple threads can introduce overlap in notifications and race conditions and running on a subscription on a single thread is preferable. However, if you want to enable subscribe on multiple threads, you can define a `static subscribeOnThisThread` method to specify if the subscription should run on the current thread: + +```javascript +class ThirdPartyAPI extends Resource { + static subscribeOnThisThread(threadIndex) { + return threadIndex < 2; / run on two threads (the first two threads) + } + async *subscribe() { + .... +``` + +An alternative to using asynchronous generators is to use a subscription stream and send events to it. A default subscription stream (that doesn't generate its own events) is available from the Resource's default subscribe method: + +```javascript +class ThirdPartyAPI extends Resource { + subscribe() { + const subscription = super.subscribe(); + setupListeningToRemoteService().on('update', (event) => { + subscription.send(event); + }); + return subscription; + } +} +``` + +## Downstream Caching + +It is highly recommended that you utilize the [REST interface](../rest) for accessing caching tables, as it facilitates downstreaming caching for clients. Timestamps are recorded with all cached entries. Timestamps are then used for incoming [REST requests to specify the `ETag` in the response](../rest#cachingconditional-requests). Clients can cache data themselves and send requests using the `If-None-Match` header to conditionally get a 304 and preserve their cached data based on the timestamp/`ETag` of the entries that are cached in Harper. Caching tables also have [subscription capabilities](./caching#subscribing-to-caching-tables), which means that downstream caches can be fully "layered" on top of Harper, both as passive or active caches. + +## Write-Through Caching + +The cache we have defined so far only has data flowing from the data source to the cache. However, you may wish to support write methods, so that writes to the cache table can flow through to underlying canonical data source, as well as populate the cache. This can be accomplished by implementing the standard write methods, like `put` and `delete`. If you were using an API with standard RESTful methods, you can pass writes through to the data source like this: + +```javascript +class ThirdPartyAPI extends Resource { + async put(data) { + await fetch(`http:/some-api.com/${this.getId()}`, { + method: 'PUT', + body: JSON.stringify(data) + }); + } + async delete() { + await fetch(`http:/some-api.com/${this.getId()}`, { + method: 'DELETE', + }); + } + ... +``` + +When doing an insert or update to the MyCache table, the data will be sent to the underlying data source through the `put` method and the new record value will be stored in the cache as well. + +### Loading from Source in Methods + +When you are using a caching table, it is important to remember that any resource methods besides `get()`, will not automatically load data from the source. If you have defined a `put()`, `post()`, or `delete()` method and you need the source data, you can ensure it is loaded by calling the `ensureLoaded()` method. For example, if you want to modify the existing record from the source, adding a property to it: + +```javascript +class MyCache extends tables.MyCache { + async post(data) { + / if the data is not cached locally, retrieves from source: + await this.ensuredLoaded(); + / now we can be sure that the data is loaded, and can access properties + this.quantity = this.quantity - data.purchases; + } +} +``` + +### Subscribing to Caching Tables + +You can subscribe to a caching table just like any other table. The one difference is that normal tables do not usually have `invalidate` events, but an active caching table may have `invalidate` events. Again, this event type gives listeners an opportunity to choose whether or not to actually retrieve the value that changed. + +### Passive-Active Updates + +With our passive update examples, we have provided a data source handler with a `get()` method that returns the specific requested record as the response. However, we can also actively update other records in our response handler (if our data source provides data that should be propagated to other related records). This can be done transactionally, to ensure that all updates occur atomically. The context that is provided to the data source holds the transaction information, so we can simply pass the context to any update/write methods that we call. For example, let's say we are loading a blog post, which also includes comment records: + +```javascript +const { Post, Comment } = tables; +class BlogSource extends Resource { + get() { + const post = await (await fetch(`http:/my-blog-server/${this.getId()}`).json()); + for (let comment of post.comments) { + await Comment.put(comment, this); / save this comment as part of our current context and transaction + } + return post; + } +} +Post.sourcedFrom(BlogSource); +``` + +Here both the update to the post and the update to the comments will be atomically/transactionally committed together with the same timestamp. + +## Cache-Control header + +When interacting with cached data, you can also use the `Cache-Control` request header to specify certain caching behaviors. When performing a PUT (or POST) method, you can use the `max-age` directive to indicate how long the resource should be cached (until stale): + +```http +PUT /my-resource/id +Cache-Control: max-age=86400 +``` + +You can use the `only-if-cached` directive on GET requests to only return a resource if it is cached (otherwise will return 504). Note, that if the entry is not cached, this will still trigger a request for the source data from the data source. If you do not want source data retrieved, you can add the `no-store` directive. You can also use the `no-cache` directive if you do not want to use the cached resource. If you wanted to check if there is a cached resource without triggering a request to the data source: + +```http +GET /my-resource/id +Cache-Control: only-if-cached, no-store +``` + +You may also use the `stale-if-error` to indicate if it is acceptable to return a stale cached resource when the data source returns an error (network connection error, 500, 502, 503, or 504). The `must-revalidate` directive can indicate a stale cached resource can not be returned, even when the data source has an error (by default a stale cached resource is returned when there is a network connection error). + +## Caching Flow + +It may be helpful to understand the flow of a cache request. When a request is made to a caching table: + +- Harper will first create a resource instance to handle the process, and ensure that the data is loaded for the resource instance. To do this, it will first check if the record is in the table/cache. + - If the record is not in the cache, Harper will first check if there is a current request to get the record from the source. If there is, Harper will wait for the request to complete and return the record from the cache. + - If not, Harper will call the `get()` method on the source to retrieve the record. The record will then be stored in the cache. + - If the record is in the cache, Harper will check if the record is stale. If the record is not stale, Harper will immediately return the record from the cache. If the record is stale, Harper will call the `get()` method on the source to retrieve the record. + - The record will then be stored in the cache. This will write the record to the cache in a separate asynchronous/background write-behind transaction, so it does not block the current request, then return the data immediately once it has it. +- The `get()` method will be called on the resource instance to return the record to the client (or perform any querying on the record). If this is overriden, the method will be called at this time. + +### Caching Flow with Write-Through + +When a writes are performed on a caching table (in `put()` or `post()` method, for example), the flow is slightly different: + +- Harper will have first created a resource instance to handle the process, and this resource instance that will be the current `this` for a call to `put()` or `post()`. +- If a `put()` or `update()` is called, for example, this action will be record in the current transaction. +- Once the transaction is committed (which is done automatically as the request handler completes), the transaction write will be sent to the source to update the data. + - The local writes will wait for the source to confirm the writes have completed (note that this effectively allows you to perform a two-phase transactional write to the source, and the source can confirm the writes have completed before the transaction is committed locally). + - The transaction writes will then be written the local caching table. +- The transaction handler will wait for the local commit to be written, then the transaction will be resolved and a response will be sent to the client. diff --git a/site/versioned_docs/version-4.6/developers/applications/data-loader.md b/site/versioned_docs/version-4.6/developers/applications/data-loader.md new file mode 100644 index 00000000..488b7b19 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/applications/data-loader.md @@ -0,0 +1,176 @@ +--- +title: Data Loader +--- + +# Data Loader + +The Data Loader is a built-in component that provides a reliable mechanism for loading data from JSON or YAML files into Harper tables as part of component deployment. This feature is particularly useful for ensuring specific records exist in your database when deploying components, such as seed data, configuration records, or initial application data. + +## Configuration + +To use the Data Loader, first specify your data files in the `config.yaml` in your component directory: + +```yaml +dataLoader: + files: 'data/*.json' +``` + +The Data Loader is an [Extension](../../technical-details/reference/components#extensions) and supports the standard `files` configuration option. + +## Data File Format + +Data files can be structured as either JSON or YAML files containing the records you want to load. Each data file must specify records for a single table - if you need to load data into multiple tables, create separate data files for each table. + +### Basic Example + +Create a data file in your component's data directory (one table per file): + +```json +{ + "database": "myapp", + "table": "users", + "records": [ + { + "id": 1, + "username": "admin", + "email": "admin@example.com", + "role": "administrator" + }, + { + "id": 2, + "username": "user1", + "email": "user1@example.com", + "role": "standard" + } + ] +} +``` + +### Multiple Tables + +To load data into multiple tables, create separate data files for each table: + +**users.json:** +```json +{ + "database": "myapp", + "table": "users", + "records": [ + { + "id": 1, + "username": "admin", + "email": "admin@example.com" + } + ] +} +``` + +**settings.yaml:** +```yaml +database: myapp +table: settings +records: + - id: 1 + setting_name: app_name + setting_value: My Application + - id: 2 + setting_name: version + setting_value: "1.0.0" +``` + +## File Organization + +You can organize your data files in various ways: + +### Single File Pattern +```yaml +dataLoader: + files: 'data/seed-data.json' +``` + +### Multiple Files Pattern +```yaml +dataLoader: + files: + - 'data/users.json' + - 'data/settings.yaml' + - 'data/initial-products.json' +``` + +### Glob Pattern +```yaml +dataLoader: + files: 'data/**/*.{json,yaml,yml}' +``` + +## Loading Behavior + +When Harper starts up with a component that includes the Data Loader: + +1. The Data Loader reads all specified data files (JSON or YAML) +1. For each file, it validates that a single table is specified +1. Records are inserted or updated based on timestamp comparison: + - New records are inserted if they don't exist + - Existing records are updated only if the data file's modification time is newer than the record's updated time + - This ensures data files can be safely reloaded without overwriting newer changes +1. If records with the same primary key already exist, updates occur only when the file is newer + +Note: While the Data Loader can create tables automatically by inferring the schema from the provided records, it's recommended to define your table schemas explicitly using the [graphqlSchema](../applications/defining-schemas) component for better control and type safety. + +## Best Practices + +1. **Define Schemas First**: While the Data Loader can infer schemas, it's strongly recommended to define your table schemas and relations explicitly using the [graphqlSchema](../applications/defining-schemas) component before loading data. This ensures proper data types, constraints, and relationships between tables. + +1. **One Table Per File**: Remember that each data file can only load records into a single table. Organize your files accordingly. + +1. **Idempotency**: Design your data files to be idempotent - they should be safe to load multiple times without creating duplicate or conflicting data. + +1. **Version Control**: Include your data files in version control to ensure consistency across deployments. + +1. **Environment-Specific Data**: Consider using different data files for different environments (development, staging, production). + +1. **Data Validation**: Ensure your data files are valid JSON or YAML and match your table schemas before deployment. + +1. **Sensitive Data**: Avoid including sensitive data like passwords or API keys directly in data files. Use environment variables or secure configuration management instead. + +## Example Component Structure + +``` +my-component/ +├── config.yaml +├── data/ +│ ├── users.json +│ ├── roles.json +│ └── settings.json +├── schemas.graphql +└── roles.yaml +``` + +With this structure, your `config.yaml` might look like: + +```yaml +# Load environment variables first +loadEnv: + files: '.env' + +# Define schemas +graphqlSchema: + files: 'schemas.graphql' + +# Define roles +roles: + files: 'roles.yaml' + +# Load initial data +dataLoader: + files: 'data/*.json' + +# Enable REST endpoints +rest: true +``` + +## Related Documentation + +- [Built-In Components](../../technical-details/reference/components/built-in-extensions) +- [Extensions](../../technical-details/reference/components/extensions) +- [Bulk Operations](../operations-api/bulk-operations) - For loading data via the Operations API \ No newline at end of file diff --git a/site/versioned_docs/version-4.6/developers/applications/debugging.md b/site/versioned_docs/version-4.6/developers/applications/debugging.md new file mode 100644 index 00000000..bd9d2622 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/applications/debugging.md @@ -0,0 +1,39 @@ +--- +title: Debugging Applications +--- + +# Debugging Applications + +Harper components and applications run inside the Harper process, which is a standard Node.js process that can be debugged with standard JavaScript development tools like Chrome's devtools, VSCode, and WebStorm. Debugging can be performed by launching the Harper entry script with your IDE, or you can start Harper in dev mode and connect your debugger to the running process (defaults to standard 9229 port): + +``` +harperdb dev +# or to run and debug a specific app +harperdb dev /path/to/app +``` + +Once you have connected a debugger, you may set breakpoints in your application and fully debug it. Note that when using the `dev` command from the CLI, this will run Harper in single-threaded mode. This would not be appropriate for production use, but makes it easier to debug applications. + +For local debugging and development, it is recommended that you use standard console log statements for logging. For production use, you may want to use Harper's logging facilities, so you aren't logging to the console. The logging functions are available on the global `logger` variable that is provided by Harper. This logger can be used to output messages directly to the Harper log using standardized logging level functions, described below. The log level can be set in the [Harper Configuration File](../../deployments/configuration). + +Harper Logger Functions + +- `trace(message)`: Write a 'trace' level log, if the configured level allows for it. +- `debug(message)`: Write a 'debug' level log, if the configured level allows for it. +- `info(message)`: Write a 'info' level log, if the configured level allows for it. +- `warn(message)`: Write a 'warn' level log, if the configured level allows for it. +- `error(message)`: Write a 'error' level log, if the configured level allows for it. +- `fatal(message)`: Write a 'fatal' level log, if the configured level allows for it. +- `notify(message)`: Write a 'notify' level log. + +For example, you can log a warning: + +```javascript +logger.warn('You have been warned'); +``` + +If you want to ensure a message is logged, you can use `notify` as these messages will appear in the log regardless of log level configured. + +## Viewing the Log + +The Harper Log can be found in your local `~/hdb/log/hdb.log` file (or in the log folder if you have specified an alternate hdb root), or in the Studio Status page. Additionally, you can use the [`read_log` operation](../operations-api/logs) to query the Harper log. diff --git a/site/versioned_docs/version-4.6/developers/applications/define-routes.md b/site/versioned_docs/version-4.6/developers/applications/define-routes.md new file mode 100644 index 00000000..4351992d --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/applications/define-routes.md @@ -0,0 +1,119 @@ +--- +title: Define Fastify Routes +--- + +# Define Fastify Routes + +Harper’s applications provide an extension for loading [Fastify](https:/www.fastify.io/) routes as a way to handle endpoints. While we generally recommend building your endpoints/APIs with Harper's [REST interface](../rest) for better performance and standards compliance, Fastify's route can provide an extensive API for highly customized path handling. Below is a very simple example of a route declaration. + +The fastify route handler can be configured in your application's config.yaml (this is the default config if you used the [application template](https:/github.com/HarperDB/application-template)): + +```yaml +fastifyRoutes: # This loads files that define fastify routes using fastify's auto-loader + files: routes/*.js # specify the location of route definition modules + path: . # relative to the app-name, like http:/server/app-name/route-name +``` + +By default, route URLs are configured to be: + +- \[**Instance URL**]:\[**HTTP Port**]/\[**Project Name**]/\[**Route URL**] + +However, you can specify the path to be `/` if you wish to have your routes handling the root path of incoming URLs. + +- The route below, using the default config, within the **dogs** project, with a route of **breeds** would be available at **http:/localhost:9926/dogs/breeds**. + +In effect, this route is just a pass-through to Harper. The same result could have been achieved by hitting the core Harper API, since it uses **hdbCore.preValidation** and **hdbCore.request**, which are defined in the “helper methods” section, below. + +```javascript +export default async (server, { hdbCore, logger }) => { + server.route({ + url: '/', + method: 'POST', + preValidation: hdbCore.preValidation, + handler: hdbCore.request, + }); +}; +``` + +## Custom Handlers + +For endpoints where you want to execute multiple operations against Harper, or perform additional processing (like an ML classification, or an aggregation, or a call to a 3rd party API), you can define your own logic in the handler. The function below will execute a query against the dogs table, and filter the results to only return those dogs over 4 years in age. + +**IMPORTANT: This route has NO preValidation and uses hdbCore.requestWithoutAuthentication, which- as the name implies- bypasses all user authentication. See the security concerns and mitigations in the “helper methods” section, below.** + +```javascript +export default async (server, { hdbCore, logger }) => { + server.route({ + url: '/:id', + method: 'GET', + handler: (request) => { + request.body= { + operation: 'sql', + sql: `SELECT * FROM dev.dog WHERE id = ${request.params.id}` + }; + + const result = await hdbCore.requestWithoutAuthentication(request); + return result.filter((dog) => dog.age > 4); + } + }); +} +``` + +## Custom preValidation Hooks + +The simple example above was just a pass-through to Harper- the exact same result could have been achieved by hitting the core Harper API. But for many applications, you may want to authenticate the user using custom logic you write, or by conferring with a 3rd party service. Custom preValidation hooks let you do just that. + +Below is an example of a route that uses a custom validation hook: + +```javascript +import customValidation from '../helpers/customValidation'; + +export default async (server, { hdbCore, logger }) => { + server.route({ + url: '/:id', + method: 'GET', + preValidation: (request) => customValidation(request, logger), + handler: (request) => { + request.body = { + operation: 'sql', + sql: `SELECT * FROM dev.dog WHERE id = ${request.params.id}`, + }; + + return hdbCore.requestWithoutAuthentication(request); + }, + }); +}; +``` + +Notice we imported customValidation from the **helpers** directory. To include a helper, and to see the actual code within customValidation, see [Helper Methods](./define-routes#helper-methods). + +## Helper Methods + +When declaring routes, you are given access to 2 helper methods: hdbCore and logger. + +**hdbCore** + +hdbCore contains three functions that allow you to authenticate an inbound request, and execute operations against Harper directly, by passing the standard Operations API. + +- **preValidation** + + This is an array of functions used for fastify authentication. The second function takes the authorization header from the inbound request and executes the same authentication as the standard Harper Operations API (for example, `hdbCore.preValidation[1](./req, resp, callback)`). It will determine if the user exists, and if they are allowed to perform this operation. **If you use the request method, you have to use preValidation to get the authenticated user**. + +- **request** + + This will execute a request with Harper using the operations API. The `request.body` should contain a standard Harper operation and must also include the `hdb_user` property that was in `request.body` provided in the callback. + +- **requestWithoutAuthentication** + + Executes a request against Harper without any security checks around whether the inbound user is allowed to make this request. For security purposes, you should always take the following precautions when using this method: + - Properly handle user-submitted values, including url params. User-submitted values should only be used for `search_value` and for defining values in records. Special care should be taken to properly escape any values if user-submitted values are used for SQL. + +**logger** + +This helper allows you to write directly to the log file, hdb.log. It’s useful for debugging during development, although you may also use the console logger. There are 5 functions contained within logger, each of which pertains to a different **logging.level** configuration in your harperdb-config.yaml file. + +- logger.trace(‘Starting the handler for /dogs’) +- logger.debug(‘This should only fire once’) +- logger.warn(‘This should never ever fire’) +- logger.error(‘This did not go well’) +- logger.fatal(‘This did not go very well at all’) diff --git a/site/versioned_docs/version-4.6/developers/applications/defining-roles.md b/site/versioned_docs/version-4.6/developers/applications/defining-roles.md new file mode 100644 index 00000000..55dd5885 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/applications/defining-roles.md @@ -0,0 +1,54 @@ +--- +title: Defining Roles +--- + +In addition to [defining a database schema](./defining-schemas), you can also define roles in your application. Roles are a way to group permissions together and assign them to users as part of Harper's [role based access control](../security/users-and-roles). An application component may declare roles that should exist for the application in a roles configuration file. To use this, first specify your roles config file in the `config.yaml` in your application directory: + +```yaml +roles: + files: roles.yaml +``` + +Now you can create a roles.yaml in your application directory: + +```yaml +declared-role: + super_user: false # This is a boolean value that indicates if the role is a super user or not + # Now we can grant the permissions to databases, here we grant permissions to the default data database + data: # This is the same structure as role object that is used in the roles operations APIs + TableOne: + read: true + insert: true + TableTwo: + read: true + insert: false + update: true + delete: true + attributes: + name: + read: true + insert: false + update: true +``` + +With this in place, where Harper starts up, it will create the roles in the roles.yaml file if they do not already exist. If they do exist, it will update the roles with the new permissions. This allows you to manage your roles in your application code and have them automatically created or updated when the application starts. + +The structure of the roles.yaml file is: + +```yaml +: + permission: # contains the permissions for the role, this structure is optional, and you can place flags like super_user here as a shortcut + super_user: + : # each database with permissions can be added as named properties on the role + tables: # this structure is optional, and table names can be placed directly under the database as a shortcut + : + read: # indicates if the role has read permission to this table + insert: # indicates if the role has insert permission to this table + update: # indicates if the role has update permission to this table + delete: # indicates if the role has delete permission to this table + attributes: + : # individual attributes can have permissions as well + read: + insert: + update: +``` diff --git a/site/versioned_docs/version-4.6/developers/applications/defining-schemas.md b/site/versioned_docs/version-4.6/developers/applications/defining-schemas.md new file mode 100644 index 00000000..5337603b --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/applications/defining-schemas.md @@ -0,0 +1,272 @@ +--- +title: Defining Schemas +--- + +# Defining Schemas + +Schemas define tables and their attributes. Schemas can be declaratively defined in Harper's using GraphQL schema definitions. Schemas definitions can be used to ensure that tables exist (that are required for applications), and have the appropriate attributes. Schemas can define the primary key, data types for attributes, if they are required, and specify which attributes should be indexed. The [introduction to applications provides](./) a helpful introduction to how to use schemas as part of database application development. + +Schemas can be used to define the expected structure of data, but are also highly flexible and support heterogeneous data structures and by default allows data to include additional properties. The standard types for GraphQL schemas are specified in the [GraphQL schema documentation](https:/graphql.org/learn/schema/). + +An example schema that defines a couple tables might look like: + +```graphql +# schema.graphql: +type Dog @table { + id: ID @primaryKey + name: String + breed: String + age: Int +} + +type Breed @table { + id: ID @primaryKey +} +``` + +In this example, you can see that we specified the expected data structure for records in the Dog and Breed table. For example, this will enforce that Dog records are required to have a `name` property with a string (or null, unless the type were specified to be non-nullable). This does not preclude records from having additional properties (see `@sealed` for preventing additional properties. For example, some Dog records could also optionally include a `favoriteTrick` property. + +In this page, we will describe the specific directives that Harper uses for defining tables and attributes in a schema. + +### Type Directives + +#### `@table` + +The schema for tables are defined using GraphQL type definitions with a `@table` directive: + +```graphql +type TableName @table +``` + +By default the table name is inherited from the type name (in this case the table name would be "TableName"). The `@table` directive supports several optional arguments (all of these are optional and can be freely combined): + +- `@table(table: "table_name")` - This allows you to explicitly specify the table name. +- `@table(database: "database_name")` - This allows you to specify which database the table belongs to. This defaults to the "data" database. +- `@table(expiration: 3600)` - Sets an expiration time on entries in the table before they are automatically cleared (primarily useful for caching tables). This is specified in seconds. +- `@table(audit: true)` - This enables the audit log for the table so that a history of record changes are recorded. This defaults to [configuration file's setting for `auditLog`](../../deployments/configuration#logging). + +Database naming: the default "data" database is generally a good default choice for tables in applications that will not be reused in other applications (and don't need to worry about staying in a separate namespace). Application with many tables may wish to organize the tables into separate databases (but remember that transactions do not preserve atomicity across different databases, only across tables in the same database). For components that are designed for re-use, it is recommended that you use a database name that is specific to the component (e.g. "my-component-data") to avoid name collisions with other components. + +#### `@export` + +This indicates that the specified table should be exported as a resource that is accessible as an externally available endpoints, through REST, MQTT, or any of the external resource APIs. + +This directive also accepts a `name` parameter to specify the name that should be used for the exported resource (how it will appear in the URL path). For example: + +``` +type MyTable @table @export(name: "my-table") +``` + +This table would be available at the URL path `/my-table/`. Without the `name` parameter, the exported name defaults to the name of the table type ("MyTable" in this example). + +### Relationships: `@relationship` + +Defining relationships is the foundation of using "join" queries in Harper. A relationship defines how one table relates to another table using a foreign key. Using the `@relationship` directive will define a property as a computed property, which resolves to the an record/instance from a target type, based on the referenced attribute, which can be in this table or the target table. The `@relationship` directive must be used in combination with an attribute with a type that references another table. + +#### `@relationship(from: attribute)` + +This defines a relationship where the foreign key is defined in this table, and relates to the primary key of the target table. If the foreign key is single-valued, this establishes a many-to-one relationship with the target table. The foreign key may also be a multi-valued array, in which case this will be a many-to-many relationship. For example, we can define a foreign key that references another table and then define the relationship. Here we create a `brandId` attribute that will be our foreign key (it will hold an id that references the primary key of the Brand table), and we define a relationship to the `Brand` table through the `brand` attribute: + +```graphql +type Product @table @export { + id: ID @primaryKey + brandId: ID @indexed + brand: Brand @relationship(from: brandId) +} +type Brand @table @export { + id: ID @primaryKey +} +``` + +Once this is defined we can use the `brand` attribute as a [property in our product instances](../../technical-details/reference/resources/) and allow for querying by `brand` and selecting brand attributes as returned properties in [query results](../rest). + +Again, the foreign key may be a multi-valued array (array of keys referencing the target table records). For example, if we had a list of features that references a Feature table: + +```graphql +type Product @table @export { + id: ID @primaryKey + featureIds: [ID] @indexed # array of ids + features: [Feature] @relationship(from: featureIds) # array of referenced feature records +} +type Feature @table { + id: ID @primaryKey + ... +} +``` + +#### `@relationship(to: attribute)` + +This defines a relationship where the foreign key is defined in the target table and relates to primary key of this table. If the foreign key is single-valued, this establishes a one-to-many relationship with the target table. Note that the target table type must be an array element type (like `[Table]`). The foreign key may also be a multi-valued array, in which case this will be a many-to-many relationship. For example, we can define on a reciprocal relationship, from the example above, adding a relationship from brand back to product. Here we use continue to use the `brandId` attribute from the `Product` schema, and we define a relationship to the `Product` table through the `products` attribute: + +```graphql +type Brand @table @export { + id: ID @primaryKey + name: String + products: [Product] @relationship(to: brandId) +} +``` + +Once this is defined we can use the `products` attribute as a property in our brand instances and allow for querying by `products` and selecting product attributes as returned properties in query results. + +Note that schemas can also reference themselves with relationships, allowing records to define relationships like parent-child relationships between records in the same table. Also note, that for a many-to-many relationship, you must not combine the `to` and `from` property in the same relationship directive. + +### Computed Properties: `@computed` + +The `@computed` directive specifies that a field is computed based on other fields in the record. This is useful for creating derived fields that are not stored in the database, but are computed when specific record fields is queried/accessed. The `@computed` directive must be used in combination with a field that is a function that computes the value of the field. For example: + +```graphql +type Product @table { + id: ID @primaryKey + price: Float + taxRate: Float + totalPrice: Float @computed(from: "price + (price * taxRate)") +} +``` + +The `from` argument specifies the expression that computes the value of the field. The expression can reference other fields in the record. The expression is evaluated when the record is queried or indexed. + +The `computed` directive may also be defined in a JavaScript module, which is useful for more complex computations. You can specify a computed attribute, and then define the function with the `setComputedAttribute` method. For example: + +```graphql +type Product @table { +... + totalPrice: Float @computed +} +``` + +```javascript +tables.Product.setComputedAttribute('totalPrice', (record) => { + return record.price + record.price * record.taxRate; +}); +``` + +Computed properties may also be indexed, which provides a powerful mechanism for creating indexes on derived fields with custom querying capabilities. This can provide a mechanism for composite indexes, custom full-text indexing, vector indexing, or other custom indexing strategies. A computed property can be indexed by adding the `@indexed` directive to the computed property. When using a JavaScript module for a computed property that is indexed, it is highly recommended that you specify a `version` argument to ensure that the computed attribute is re-evaluated when the function is updated. For example: + +```graphql +type Product @table { +... + totalPrice: Float @computed(version: 1) @indexed +} +``` + +If you were to update the `setComputedAttribute` function for the `totalPrice` attribute, to use a new formula, you must increment the `version` argument to ensure that the computed attribute is re-indexed (note that on a large database, re-indexing may be a lengthy operation). Failing to increment the `version` argument with a modified function can result in an inconsistent index. The computed function must be deterministic, and should not have side effects, as it may be re-evaluated multiple times during indexing. + +Note that computed properties will not be included by default in a query result, you must explicitly include them in query results using the `select` query function. + +Another example of using a computed custom index, is that we could index all the comma-separated words in a `tags` property by doing (similar techniques are used for full-text indexing): + +```graphql +type Product @table { + id: ID @primaryKey + tags: String # comma delimited set of tags + tagsSeparated: String[] @computed(from: "tags.split(/\\s*,\\s*/)") @indexed # split and index the tags +} +``` + +For more in-depth information on computed properties, visit our blog [here](https:/www.harpersystems.dev/development/tutorials/how-to-create-custom-indexes-with-computed-properties) + +### Field Directives + +The field directives can be used for information about each attribute in table type definition. + +#### `@primaryKey` + +The `@primaryKey` directive specifies that an attribute is the primary key for a table. These must be unique and when records are created, this will be auto-generated if no primary key is provided. When a primary key is auto-generated, it will be a UUID (as a string) if the primary key type is `String` or `ID`. If the primary key type is `Int`, `Long`, or `Any`, then the primary key will be an auto-incremented number. Using numeric primary keys is more efficient than using UUIDs. Note that if the type is `Int`, the primary key will be limited to 32-bit, which can be limiting and problematic for large tables. It is recommended that if you will be relying on auto-generated keys, that you use a primary key type of `Long` or `Any` (the latter will allow you to also use strings as primary keys). + +#### `@indexed` + +The `@indexed` directive specifies that an attribute should be indexed. When an attribute is indexed, Harper will create secondary index from the data in this field for fast/efficient querying using this field. This is necessary if you want to execute queries using this attribute (whether that is through RESTful query parameters, SQL, or NoSQL operations). + +A standard index will index the values in each field, so you can query directly by those values. If the field's value is an array, each of the values in the array will be indexed (you can query by any individual value). + +#### Vector Indexing + +The `@indexed` directive can also specify a `type`. To use vector indexing, you can specify the `type` as `HNSW` for Hierarchical Navigable Small World indexing. This will create a vector index for the attribute. For example: + +```graphql +type Product @table { + id: Long @primaryKey + textEmbeddings: [Float] @indexed(type: "HNSW") +} +``` + +HNSW indexing finds the nearest neighbors to a search vector. To use this, you can query with a `sort` parameter, for example: + +```javascript +let results = Product.search({ + sort: { attribute: 'textEmbeddings', target: searchVector }, + limit: 5, / get the five nearest neighbors +}); +``` + +This can be used in combination with other conditions as well, for example: + +```javascript +let results = Product.search({ + conditions: [{ attribute: 'price', comparator: 'lt', value: 50 }], + sort: { attribute: 'textEmbeddings', target: searchVector }, + limit: 5, / get the five nearest neighbors +}); +``` + +HNSW supports several additional arguments to the `@indexed` directive to adjust the HNSW parameters: + +- `distance` - Define the distance function. This can be set to 'euclidean' or 'cosine' (uses negative of cosine similarity). The default is cosine. +- `efConstruction` - Maximum number of nodes to keep in the list for finding nearest neighbors. A higher value can yield better recall, and a lower value can have better performance. If `efSearchConstruction` is set, this is only applied to indexing. The default is 100. +- `M` - The preferred number of connections at each layer in the HNSW graph. A higher number uses more space but can be helpful when the intrinsic dimensionality of the data is higher. A lower number can be more efficient. The default is 16. +- `optimizeRouting` - This uses a heuristic to avoid graph connections that match existing indirect connections (connections through another node). This can yield more efficient graph traversals for the same M setting. This is a number between 0 and 1 and a higher value will more aggressively omit connections with alternate paths. Setting this to 0 will disable route optimizing and follow the traditional HNSW algorithm for creating connections. The default is 0.5. +- `mL` - The normalization factor for level generation, by default this is computed from `M`. +- `efSearchConstruction` - Maximum number of nodes to keep in the list for finding nearest neighbors for searching. The default is 50. + +For exmpale + +```graphql +type Product @table { + id: Long @primaryKey + textEmbeddings: [Float] @indexed(type: "HNSW", distance: "euclidean", optimizeRouting: 0, efSearchConstruction: 100) +} +``` + +#### `@createdTime` + +The `@createdTime` directive indicates that this property should be assigned a timestamp of the creation time of the record (in epoch milliseconds). + +#### `@updatedTime` + +The `@updatedTime` directive indicates that this property should be assigned a timestamp of each updated time of the record (in epoch milliseconds). + +#### `@sealed` + +The `@sealed` directive specifies that no additional properties should be allowed on records besides though specified in the type itself + +### Defined vs Dynamic Schemas + +If you do not define a schema for a table and create a table through the operations API (without specifying attributes) or studio, such a table will not have a defined schema and will follow the behavior of a ["dynamic-schema" table](../../technical-details/reference/dynamic-schema). It is generally best-practice to define schemas for your tables to ensure predictable, consistent structures with data integrity. + +### Field Types + +Harper supports the following field types in addition to user defined (object) types: + +- `String`: String/text +- `Int`: A 32-bit signed integer (from -2147483648 to 2147483647) +- `Long`: A 54-bit signed integer (from -9007199254740992 to 9007199254740992) +- `Float`: Any number (any number that can be represented as a [64-bit double precision floating point number](https:/en.wikipedia.org/wiki/Double-precision_floating-point_format). Note that all numbers are stored in the most compact representation available) +- `BigInt`: Any integer (negative or positive) with less than 300 digits (Note that `BigInt` is a distinct and separate type from standard numbers in JavaScript, so custom code should handle this type appropriately) +- `Boolean`: true or false +- `ID`: A string (but indicates it is not intended to be human readable) +- `Any`: Any primitive, object, or array is allowed +- `Date`: A Date object +- `Bytes`: Binary data as a Buffer or Uint8Array +- `Blob`: Binary data as a [Blob](../../technical-details/reference/blob), designed for large blocks of data that can be streamed. It is recommend that you use this for binary data that will typically be larger than 20KB. + +#### Renaming Tables + +It is important to note that Harper does not currently support renaming tables. If you change the name of a table in your schema definition, this will result in the creation of a new, empty table. + +### OpenAPI Specification + +_The_ [_OpenAPI Specification_](https:/spec.openapis.org/oas/v3.1.0) _defines a standard, programming language-agnostic interface description for HTTP APIs, which allows both humans and computers to discover and understand the capabilities of a service without requiring access to source code, additional documentation, or inspection of network traffic._ + +If a set of endpoints are configured through a Harper GraphQL schema, those endpoints can be described by using a default REST endpoint called `GET /openapi`. + +_Note: The `/openapi` endpoint should only be used as a starting guide, it may not cover all the elements of an endpoint._ diff --git a/site/versioned_docs/version-4.6/developers/applications/example-projects.md b/site/versioned_docs/version-4.6/developers/applications/example-projects.md new file mode 100644 index 00000000..51231c31 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/applications/example-projects.md @@ -0,0 +1,37 @@ +--- +title: Example Projects +--- + +# Example Projects + +**Library of example Harper applications and components:** + +- [Authorization in Harper using Okta Customer Identity Cloud](https:/www.harperdb.io/post/authorization-in-harperdb-using-okta-customer-identity-cloud), by Yitaek Hwang + +- [How to Speed Up your Applications by Caching at the Edge with Harper](https:/dev.to/doabledanny/how-to-speed-up-your-applications-by-caching-at-the-edge-with-harperdb-3o2l), by Danny Adams + +- [OAuth Authentication in Harper using Auth0 & Node.js](https:/www.harperdb.io/post/oauth-authentication-in-harperdb-using-auth0-and-node-js), by Lucas Santos + +- [How To Create a CRUD API with Next.js & Harper Custom Functions](https:/www.harperdb.io/post/create-a-crud-api-w-next-js-harperdb), by Colby Fayock + +- [Build a Dynamic REST API with Custom Functions](https:/harperdb.io/blog/build-a-dynamic-rest-api-with-custom-functions/), by Terra Roush + +- [How to use Harper Custom Functions to Build your Entire Backend](https:/dev.to/andrewbaisden/how-to-use-harperdb-custom-functions-to-build-your-entire-backend-a2m), by Andrew Baisden + +- [Using TensorFlowJS & Harper Custom Functions for Machine Learning](https:/harperdb.io/blog/using-tensorflowjs-harperdb-for-machine-learning/), by Kevin Ashcraft + +- [Build & Deploy a Fitness App with Python & Harper](https:/www.youtube.com/watch?v=KMkmA4i2FQc), by Patrick Löber + +- [Create a Discord Slash Bot using Harper Custom Functions](https:/geekysrm.hashnode.dev/discord-slash-bot-with-harperdb-custom-functions), by Soumya Ranjan Mohanty + +- [How I used Harper Custom Functions to Build a Web App for my Newsletter](https:/blog.hrithwik.me/how-i-used-harperdb-custom-functions-to-build-a-web-app-for-my-newsletter), by Hrithwik Bharadwaj + +- [How I used Harper Custom Functions and Recharts to create Dashboard](https:/blog.greenroots.info/how-to-create-dashboard-with-harperdb-custom-functions-and-recharts), by Tapas Adhikary + +- [How To Use Harper Custom Functions With Your React App](https:/dev.to/tyaga001/how-to-use-harperdb-custom-functions-with-your-react-app-2c43), by Ankur Tyagi + +- [Build a Web App Using Harper’s Custom Functions](https:/www.youtube.com/watch?v=rz6prItVJZU), livestream by Jaxon Repp + +- [How to Web Scrape Using Python, Snscrape & Custom Functions](https:/hackernoon.com/how-to-web-scrape-using-python-snscrape-and-harperdb), by Davis David + +- [What’s the Big Deal w/ Custom Functions](https:/rss.com/podcasts/harperdb-select-star/278933/), Select\* Podcast diff --git a/site/versioned_docs/version-4.6/developers/applications/index.md b/site/versioned_docs/version-4.6/developers/applications/index.md new file mode 100644 index 00000000..388baf08 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/applications/index.md @@ -0,0 +1,237 @@ +--- +title: Applications +--- + +# Applications + +Harper is more than a database, it's a distributed clustering platform allowing you to package your schema, endpoints and application logic and deploy them to an entire fleet of Harper instances optimized for on-the-edge scalable data delivery. + +In this guide, we are going to explore the evermore extensible architecture that Harper provides by building a Harper application, a fundamental building-block of the Harper ecosystem. + +When working through this guide, we recommend you use the [Harper Application Template](https:/github.com/HarperDB/application-template) repo as a reference. + +Before we get started, let's clarify some terminology that is used throughout the documentation. + +**Components** are the high-level concept for modules that extend the Harper core platform adding additional functionality. The application you will build here is a component. In addition to applications, components also encompass extensions. + +> We are actively working to disambiguate the terminology. When you see "component", such as in the Operations API or CLI, it generally refers to an application. We will do our best to clarify exactly which classification of a component whenever possible. + +**Applications** are best defined as the implementation of a specific user-facing feature or functionality. Applications are built on top of extensions and can be thought of as the end product that users interact with. For example, a Next.js application that serves a web interface or an Apollo GraphQL server that provides a GraphQL API are both applications. + +**Extensions** are the building blocks of the Harper component system. Applications depend on extensions to provide the functionality the application is implementing. For example, the built-in `graphqlSchema` extension enables applications to define their databases and tables using GraphQL schemas. Furthermore, the `@harperdb/nextjs` and `@harperdb/apollo` extensions are the building blocks that provide support for building Next.js and Apollo applications. + +All together, the support for implementing a feature is the extension, and the actual implementation of the feature is the application. + +Extensions can also depend on other extensions. For example, the [`@harperdb/apollo`](https:/github.com/HarperDB/apollo) extension depends on the built-in `graphqlSchema` extension to create a cache table for Apollo queries. Applications can then use the `@harperdb/apollo` extension to implement an Apollo GraphQL backend server. + +```mermaid +flowchart TD + subgraph Applications + direction TB + NextJSApp["Next.js App"] + ApolloApp["Apollo App"] + CustomResource["Custom Resource"] + end + + subgraph Extensions + direction TB + subgraph Custom + NextjsExt["@harperdb/nextjs"] + ApolloExt["@harperdb/apollo"] + end + subgraph Built-In + GraphqlSchema["graphqlSchema"] + JsResource["jsResource"] + Rest["rest"] + end + end + + subgraph Core + direction TB + Database["database"] + FileSystem["file-system"] + Networking["networking"] + end + + NextJSApp --> NextjsExt + ApolloApp --> ApolloExt + CustomResource --> JsResource & GraphqlSchema & Rest + + NextjsExt --> Networking + NextjsExt --> FileSystem + ApolloExt --> GraphqlSchema + ApolloExt --> Networking + + GraphqlSchema --> Database + JsResource --> Database + Rest --> Networking +``` + +> As of Harper v4.6, a new, **experimental** component system has been introduced called **plugins**. Plugins are a **new iteration of the existing extension system**. They are simultaneously a simplification and an extensibility upgrade. Instead of defining multiple methods (`start` vs `startOnMainThread`, `handleFile` vs `setupFile`, `handleDirectory` vs `setupDirectory`), plugins only have to define a single `handleApplication` method. Plugins are **experimental**, and complete documentation is available on the [plugin API](../../technical-details/reference/components/plugins) page. In time we plan to deprecate the concept of extensions in favor of plugins, but for now, both are supported. + +Beyond applications and extensions, components are further classified as built-in or custom. **Built-in** components are included with Harper by default and can be directly referenced by their name. The `graphqlSchema`, `rest`, and `jsResource` extensions used in the previous application example are all examples of built-in extensions. **Custom** components must use external references, generally npm or GitHub packages, and are often included as dependencies within the `package.json` of the component. + +> Harper maintains a number of custom components that are available on `npm` and `GitHub`, such as the [`@harperdb/nextjs`](https:/github.com/HarperDB/nextjs) extension or the [`@harperdb/status-check`](https:/github.com/HarperDB/status-check) application. + +Harper does not currently include any built-in applications, making "custom applications" a bit redundant. Generally, we just say "application". However, there is a multitude of both built-in and custom extensions, and so the documentation refers to them as such. A complete list of built-in extensions is available in the [Built-In Extensions](../../technical-details/reference/components/built-in-extensions) documentation page, and the list of custom extensions and applications is available below. + +This guide is going to walk you through building a basic Harper application using a set of built-in extensions. + +> The Technical Details section of the documentation contains a [complete reference for all aspects of components](../../technical-details/reference/components), applications, extensions, and more. + +## Custom Functionality with JavaScript + +[The getting started guide](../../getting-started/first-harper-app) covers how to build an application entirely through schema configuration. However, if your application requires more custom functionality, you will probably want to employ your own JavaScript modules to implement more specific features and interactions. This gives you tremendous flexibility and control over how data is accessed and modified in Harper. Let's take a look at how we can use JavaScript to extend and define "resources" for custom functionality. Let's add a property to the dog records when they are returned, that includes their age in human years. In Harper, data is accessed through our [Resource API](../../technical-details/reference/resources/), a standard interface to access data sources, tables, and make them available to endpoints. Database tables are `Resource` classes, and so extending the function of a table is as simple as extending their class. + +To define custom (JavaScript) resources as endpoints, we need to create a `resources.js` module (this goes in the root of your application folder). And then endpoints can be defined with Resource classes that `export`ed. This can be done in addition to, or in lieu of the `@export`ed types in the schema.graphql. If you are exporting and extending a table you defined in the schema make sure you remove the `@export` from the schema so that don't export the original table or resource to the same endpoint/path you are exporting with a class. Resource classes have methods that correspond to standard HTTP/REST methods, like `get`, `post`, `patch`, and `put` to implement specific handling for any of these methods (for tables they all have default implementations). To do this, we get the `Dog` class from the defined tables, extend it, and export it: + +```javascript +/ resources.js: +const { Dog } = tables; / get the Dog table from the Harper provided set of tables (in the default database) + +export class DogWithHumanAge extends Dog { + static loadAsInstance = false; + async get(target) { + const record = await super.get(target); + return { + ...record, / include all properties from the record + humanAge: 15 + record.age * 5, / silly calculation of human age equivalent + }; + } +} +``` + +Here we exported the `DogWithHumanAge` class (exported with the same name), which directly maps to the endpoint path. Therefore, now we have a `/DogWithHumanAge/` endpoint based on this class, just like the direct table interface that was exported as `/Dog/`, but the new endpoint will return objects with the computed `humanAge` property. Resource classes provide getters/setters for every defined attribute so that accessing instance properties like `age`, will get the value from the underlying record. The instance holds information about the primary key of the record so updates and actions can be applied to the correct record. And changing or assigning new properties can be saved or included in the resource as it returned and serialized. The `return super.get(query)` call at the end allows for any query parameters to be applied to the resource, such as selecting individual properties (with a [`select` query parameter](../rest#select-properties)). + +Often we may want to incorporate data from other tables or data sources in your data models. Next, let's say that we want a `Breed` table that holds detailed information about each breed, and we want to add that information to the returned dog object. We might define the Breed table as (back in schema.graphql): + +```graphql +type Breed @table { + name: String @primaryKey + description: String @indexed + lifespan: Int + averageWeight: Float +} +``` + +We use the new table's (static) `get()` method to retrieve a breed by id. Harper will maintain the current context, ensuring that we are accessing the data atomically, in a consistent snapshot across tables. This provides: + +1. Automatic tracking of most recently updated timestamps across resources for caching purposes +1. Sharing of contextual metadata (like user who requested the data) +1. Transactional atomicity for any writes (not needed in this get operation, but important for other operations) + +The resource methods are automatically wrapped with a transaction and will automatically commit the changes when the method finishes. This allows us to fully utilize multiple resources in our current transaction. With our own snapshot of the database for the Dog and Breed table we can then access data like this: + +```javascript +/resource.js: +const { Dog, Breed } = tables; / get the Breed table too +export class DogWithBreed extends Dog { + static loadAsInstance = false; + async get(target) { + / get the Dog record + const record = await super.get(target); + / get the Breed record + let breedDescription = await Breed.get(record.breed); + return { + ...record, + breedDescription, + }; + } +} +``` + +The call to `Breed.get` will return an instance of the `Breed` resource class, which holds the record specified the provided id/primary key. Like the `Dog` instance, we can access or change properties on the Breed instance. + +Here we have focused on customizing how we retrieve data, but we may also want to define custom actions for writing data. While HTTP PUT method has a specific semantic definition (replace current record), a common method for custom actions is through the HTTP POST method. the POST method has much more open-ended semantics and is a good choice for custom actions. POST requests are handled by our Resource's post() method. Let's say that we want to define a POST handler that adds a new trick to the `tricks` array to a specific instance. We might do it like this, and specify an action to be able to differentiate actions: + +```javascript +export class CustomDog extends Dog { + static loadAsInstance = false; + async post(target, data) { + if (data.action === 'add-trick') { + const record = this.update(target); + record.tricks.push(data.trick); + } + } +} +``` + +And a POST request to /CustomDog/ would call this `post` method. The Resource class then automatically tracks changes you make to your resource instances and saves those changes when this transaction is committed (again these methods are automatically wrapped in a transaction and committed once the request handler is finished). So when you push data on to the `tricks` array, this will be recorded and persisted when this method finishes and before sending a response to the client. + +The `post` method automatically marks the current instance as being update. However, you can also explicitly specify that you are changing a resource by calling the `update()` method. If you want to modify a resource instance that you retrieved through a `get()` call (like `Breed.get()` call above), you can call its `update()` method to ensure changes are saved (and will be committed in the current transaction). + +We can also define custom authorization capabilities. For example, we might want to specify that only the owner of a dog can make updates to a dog. We could add logic to our `post()` method or `put()` method to do this. For example, we might do this: + +```javascript +export class CustomDog extends Dog { + static loadAsInstance = false; + async post(target, data) { + if (data.action === 'add-trick') { + const context = this.getContext(); + / if we want to skip the default permission checks, we can turn off checkPermissions: + target.checkPermissions = false; + const record = this.update(target); + / and do our own/custom permission check: + if (record.owner !== context.user?.username) { + throw new Error('Can not update this record'); + } + record.tricks.push(data.trick); + } + } +} +``` + +Any methods that are not defined will fall back to Harper's default authorization procedure based on users' roles. If you are using/extending a table, this is based on Harper's [role based access](../security/users-and-roles). If you are extending the base `Resource` class, the default access requires super user permission. + +You can also use the `default` export to define the root path resource handler. For example: + +```javascript +/ resources.json +export default class CustomDog extends Dog { + ... +``` + +This will allow requests to url like / to be directly resolved to this resource. + +## Define Custom Data Sources + +We can also directly implement the Resource class and use it to create new data sources from scratch that can be used as endpoints. Custom resources can also be used as caching sources. Let's say that we defined a `Breed` table that was a cache of information about breeds from another source. We could implement a caching table like: + +```javascript +const { Breed } = tables; / our Breed table +class BreedSource extends Resource { + / define a data source + async get(target) { + return (await fetch(`http:/best-dog-site.com/${target}`)).json(); + } +} +/ define that our breed table is a cache of data from the data source above, with a specified expiration +Breed.sourcedFrom(BreedSource, { expiration: 3600 }); +``` + +The [caching documentation](./caching) provides much more information on how to use Harper's powerful caching capabilities and set up data sources. + +Harper provides a powerful JavaScript API with significant capabilities that go well beyond a "getting started" guide. See our documentation for more information on using the [`globals`](../../technical-details/reference/globals) and the [Resource interface](../../technical-details/reference/resources/). + +## Configuring Applications/Components + +For complete information of configuring applications, refer to the [Component Configuration](../../technical-details/reference/components/configuration) reference page. + +## Define Fastify Routes + +Exporting resource will generate full RESTful endpoints. But, you may prefer to define endpoints through a framework. Harper includes a resource plugin for defining routes with the Fastify web framework. Fastify is a full-featured framework with many plugins, that provides sophisticated route definition capabilities. + +By default, applications are configured to load any modules in the `routes` directory (matching `routes/*.js`) with Fastify's autoloader, which will allow these modules to export a function to define fastify routes. See the [defining routes documentation](./define-routes) for more information on how to create Fastify routes. + +However, Fastify is not as fast as Harper's RESTful endpoints (about 10%-20% slower/more-overhead), nor does it automate the generation of a full uniform interface with correct RESTful header interactions (for caching control), so generally the Harper's REST interface is recommended for optimum performance and ease of use. + +## Restarting Your Instance + +Generally, Harper will auto-detect when files change and auto-restart the appropriate threads. However, if there are changes that aren't detected, you may manually restart, with the `restart_service` operation: + +```json +{ + "operation": "restart_service", + "service": "http_workers" +} +``` diff --git a/site/versioned_docs/version-4.6/developers/applications/web-applications.md b/site/versioned_docs/version-4.6/developers/applications/web-applications.md new file mode 100644 index 00000000..c49596b3 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/applications/web-applications.md @@ -0,0 +1,63 @@ +--- +title: Web Applications on Harper +--- + +# Web Applications on Harper + +Harper is an efficient, capable, and robust platform for developing web applications, with numerous capabilities designed +specifically for optimized web application delivery. In addition, there are a number of tools and frameworks that can be used +with Harper to create web applications with standard best-practice design and development patterns. Running these frameworks +on Harper can unlock tremendous scalability and performance benefits by leveraging Harper's built-in multi-threading, +caching, and distributed design. + +Harper's unique ability to run JavaScript code directly on the server side, combined with its built-in database for data storage, querying, and caching +allows you to create full-featured web applications with a single platform. This eliminates the overhead of legacy solutions that +require separate application servers, databases, and caching layers, and their requisite communication overhead and latency, while +allowing the full stack to deployed to distributed locations with full local response handling, providing an incredibly low latency web experience. + +## Web Application Frameworks + +With built-in caching mechanisms, and an easy-to-use JavaScript API for interacting with data, creating full-featured applications +using popular frameworks is a simple and straightforward process. + +Get started today with one of our examples: + +- [Next.js](https:/github.com/HarperDB/nextjs-example) +- [React SSR](https:/github.com/HarperDB/react-ssr-example) +- [Vue SSR](https:/github.com/HarperDB/vue-ssr-example) +- [Svelte SSR](https:/github.com/HarperDB/svelte-ssr-example) +- [Solid SSR](https:/github.com/HarperDB/solid-ssr-example) + +## Cookie Support + +Harper includes support for authenticated sessions using cookies. This allows you to create secure, authenticated web applications +using best-practice security patterns, allowing users to login and maintain a session without any credential storage on the client side +that can be compromised. A login endpoint can be defined by exporting a resource and calling the `login` method on the request object. For example, this could be a login endpoint in your resources.js file: + +```javascript +export class Login extends Resource { + async post(data) { + const { username, password } = data; + await request.login(username, password); + return { message: 'Logged in!' }; + } +} +``` + +This endpoint can be called from the client side using a standard fetch request, a cookie will be returned, and the session will be maintained by Harper. +This allows web applications to directly interact with Harper and database resources, without needing to go through extra layers of authentication handling. + +## Browser Caching Negotiation + +Browsers support caching negotiation with revalidation, which allows requests for locally cached data to be sent to servers with a tag or timestamp. Harper REST functionality can fully interact with these headers, and return `304 Not Modified` response based on prior `Etag` sent in headers. It is highly recommended that you utilize the [REST interface](../rest) for accessing tables, as it facilitates this downstream browser caching. Timestamps are recorded with all records and are then returned [as the `ETag` in the response](../rest#cachingconditional-requests). Utilizing this browser caching can greatly reduce the load on your server and improve the performance of your web application by being able to instantly use locally cached data after revalidation from the server. + +## Built-in Cross-Origin Resource Sharing (CORS) + +Harper includes built-in support for Cross-Origin Resource Sharing (CORS), which allows you to define which domains are allowed to access your Harper instance. This is a critical security feature for web applications, as it prevents unauthorized access to your data from other domains, while allowing cross-domain access from known hosts. You can define the allowed domains in your [Harper configuration file](../../deployments/configuration#http), and Harper will automatically handle the CORS headers for you. + +## More Resources + +Make sure to check out our developer videos too: + +- [Next.js on Harper | Step-by-Step Guide for Next Level Next.js Performance](https:/youtu.be/GqLEwteFJYY) +- [Server-side Rendering (SSR) with Multi-Tier Cache Demo](https:/youtu.be/L-tnBNhO9Fc) diff --git a/site/versioned_docs/version-4.6/developers/clustering/certificate-management.md b/site/versioned_docs/version-4.6/developers/clustering/certificate-management.md new file mode 100644 index 00000000..43839a4b --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/clustering/certificate-management.md @@ -0,0 +1,70 @@ +--- +title: Certificate Management +--- + +# Certificate Management + +## Development + +Out of the box Harper generates certificates that are used when Harper nodes are clustered together to securely share data between nodes. These certificates are meant for testing and development purposes. Because these certificates do not have Common Names (CNs) that will match the Fully Qualified Domain Name (FQDN) of the Harper node, the following settings (see the full [configuration file](../../deployments/configuration) docs for more details) are defaulted & recommended for ease of development: + +``` +clustering: + tls: + certificate: ~/hdb/keys/certificate.pem + certificateAuthority: ~/hdb/keys/ca.pem + privateKey: ~/hdb/keys/privateKey.pem + insecure: true + verify: true +``` + +The certificates that Harper generates are stored in your `/keys/`. + +`insecure` is set to `true` to accept the certificate CN mismatch due to development certificates. + +`verify` is set to `true` to enable mutual TLS between the nodes. + +## Production + +In a production environment, we recommend using your own certificate authority (CA), or a public CA such as LetsEncrypt to generate certs for your Harper cluster. This will let you generate certificates with CNs that match the FQDN of your nodes. + +Once you generate new certificates, to make Harper start using them you can either replace the generated files with your own, or update the configuration to point to your new certificates, and then restart Harper. + +Since these new certificates can be issued with correct CNs, you should set `insecure` to `false` so that nodes will do full validation of the certificates of the other nodes. + +### Certificate Requirements + +- Certificates must have an `Extended Key Usage` that defines both `TLS Web Server Authentication` and `TLS Web Client Authentication` as these certificates will be used to accept connections from other Harper nodes and to make requests to other Harper nodes. Example: + +``` +X509v3 Key Usage: critical + Digital Signature, Key Encipherment +X509v3 Extended Key Usage: + TLS Web Server Authentication, TLS Web Client Authentication +``` + +- If you are using an intermediate CA to issue the certificates, the entire certificate chain (to the root CA) must be included in the `certificateAuthority` file. +- If your certificates expire you will need a way to issue new certificates to the nodes and then restart Harper. If you are using a public CA such as LetsEncrypt, a tool like `certbot` can be used to renew certificates. + +### Certificate Troubleshooting + +If you are having TLS issues with clustering, use the following steps to verify that your certificates are valid. + +1. Make sure certificates can be parsed and that you can view the contents: + +``` +openssl x509 -in .pem -noout -text` +``` + +1. Make sure the certificate validates with the CA: + +``` +openssl verify -CAfile .pem .pem` +``` + +1. Make sure the certificate and private key are a valid pair by verifying that the output of the following commands match: + +``` +openssl rsa -modulus -noout -in .pem | openssl md5 +openssl x509 -modulus -noout -in .pem | openssl md5 +``` diff --git a/site/versioned_docs/version-4.6/developers/clustering/creating-a-cluster-user.md b/site/versioned_docs/version-4.6/developers/clustering/creating-a-cluster-user.md new file mode 100644 index 00000000..0a8b2a6c --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/clustering/creating-a-cluster-user.md @@ -0,0 +1,59 @@ +--- +title: Creating a Cluster User +--- + +# Creating a Cluster User + +Inter-node authentication takes place via Harper users. There is a special role type called `cluster_user` that exists by default and limits the user to only clustering functionality. + +A `cluster_user` must be created and added to the `harperdb-config.yaml` file for clustering to be enabled. + +All nodes that are intended to be clustered together need to share the same `cluster_user` credentials (i.e. username and password). + +There are multiple ways a `cluster_user` can be created, they are: + +1. Through the operations API by calling `add_user` + +```json +{ + "operation": "add_user", + "role": "cluster_user", + "username": "cluster_account", + "password": "letsCluster123!", + "active": true +} +``` + +When using the API to create a cluster user the `harperdb-config.yaml` file must be updated with the username of the new cluster user. + +This can be done through the API by calling `set_configuration` or by editing the `harperdb-config.yaml` file. + +```json +{ + "operation": "set_configuration", + "clustering_user": "cluster_account" +} +``` + +In the `harperdb-config.yaml` file under the top-level `clustering` element there will be a user element. Set this to the name of the cluster user. + +```yaml +clustering: + user: cluster_account +``` + +_Note: When making any changes to the `harperdb-config.yaml` file, Harper must be restarted for the changes to take effect._ + +1. Upon installation using **command line variables**. This will automatically set the user in the `harperdb-config.yaml` file. + +_Note: Using command line or environment variables for setting the cluster user only works on install._ + +``` +harperdb install --CLUSTERING_USER cluster_account --CLUSTERING_PASSWORD letsCluster123! +``` + +1. Upon installation using **environment variables**. This will automatically set the user in the `harperdb-config.yaml` file. + +``` +CLUSTERING_USER=cluster_account CLUSTERING_PASSWORD=letsCluster123 +``` diff --git a/site/versioned_docs/version-4.6/developers/clustering/enabling-clustering.md b/site/versioned_docs/version-4.6/developers/clustering/enabling-clustering.md new file mode 100644 index 00000000..606bc29c --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/clustering/enabling-clustering.md @@ -0,0 +1,49 @@ +--- +title: Enabling Clustering +--- + +# Enabling Clustering + +Clustering does not run by default; it needs to be enabled. + +To enable clustering the `clustering.enabled` configuration element in the `harperdb-config.yaml` file must be set to `true`. + +There are multiple ways to update this element, they are: + +1. Directly editing the `harperdb-config.yaml` file and setting enabled to `true` + +```yaml +clustering: + enabled: true +``` + +_Note: When making any changes to the `harperdb-config.yaml` file Harper must be restarted for the changes to take effect._ + +1. Calling `set_configuration` through the operations API + +```json +{ + "operation": "set_configuration", + "clustering_enabled": true +} +``` + +_Note: When making any changes to Harper configuration Harper must be restarted for the changes to take effect._ + +1. Using **command line variables**. + +``` +harperdb --CLUSTERING_ENABLED true +``` + +1. Using **environment variables**. + +``` +CLUSTERING_ENABLED=true +``` + +An efficient way to **install Harper**, **create the cluster user**, **set the node name** and **enable clustering** in one operation is to combine the steps using command line and/or environment variables. Here is an example using command line variables. + +``` +harperdb install --CLUSTERING_ENABLED true --CLUSTERING_NODENAME Node1 --CLUSTERING_USER cluster_account --CLUSTERING_PASSWORD letsCluster123! +``` diff --git a/site/versioned_docs/version-4.6/developers/clustering/establishing-routes.md b/site/versioned_docs/version-4.6/developers/clustering/establishing-routes.md new file mode 100644 index 00000000..1d4d5ae2 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/clustering/establishing-routes.md @@ -0,0 +1,73 @@ +--- +title: Establishing Routes +--- + +# Establishing Routes + +A route is a connection between two nodes. It is how the clustering network is established. + +Routes do not need to cross connect all nodes in the cluster. You can select a leader node or a few leaders and all nodes connect to them, you can chain, etc… As long as there is one route connecting a node to the cluster all other nodes should be able to reach that node. + +Using routes the clustering servers will create a mesh network between nodes. This mesh network ensures that if a node drops out all other nodes can still communicate with each other. That being said, we recommend designing your routing with failover in mind, this means not storing all your routes on one node but dispersing them throughout the network. + +A simple route example is a two node topology, if Node1 adds a route to connect it to Node2, Node2 does not need to add a route to Node1. That one route configuration is all that’s needed to establish a bidirectional connection between the nodes. + +A route consists of a `port` and a `host`. + +`port` - the clustering port of the remote instance you are creating the connection with. This is going to be the `clustering.hubServer.cluster.network.port` in the Harper configuration on the node you are connecting with. + +`host` - the host of the remote instance you are creating the connection with.This can be an IP address or a URL. + +Routes are set in the `harperdb-config.yaml` file using the `clustering.hubServer.cluster.network.routes` element, which expects an object array, where each object has two properties, `port` and `host`. + +```yaml +clustering: + hubServer: + cluster: + network: + routes: + - host: 3.62.184.22 + port: 9932 + - host: 3.735.184.8 + port: 9932 +``` + +![figure 1](/img/v4.6/clustering/figure1.png) + +This diagram shows one way of using routes to connect a network of nodes. Node2 and Node3 do not reference any routes in their config. Node1 contains routes for Node2 and Node3, which is enough to establish a network between all three nodes. + +There are multiple ways to set routes, they are: + +1. Directly editing the `harperdb-config.yaml` file (refer to code snippet above). +1. Calling `cluster_set_routes` through the API. + +```json +{ + "operation": "cluster_set_routes", + "server": "hub", + "routes": [{ "host": "3.735.184.8", "port": 9932 }] +} +``` + +_Note: When making any changes to Harper configuration Harper must be restarted for the changes to take effect._ + +1. From the command line. + +```bash +--CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES "[{\"host\": \"3.735.184.8\", \"port\": 9932}]" +``` + +1. Using environment variables. + +```bash +CLUSTERING_HUBSERVER_CLUSTER_NETWORK_ROUTES=[{"host": "3.735.184.8", "port": 9932}] +``` + +The API also has `cluster_get_routes` for getting all routes in the config and `cluster_delete_routes` for deleting routes. + +```json +{ + "operation": "cluster_delete_routes", + "routes": [{ "host": "3.735.184.8", "port": 9932 }] +} +``` diff --git a/site/versioned_docs/version-4.6/developers/clustering/index.md b/site/versioned_docs/version-4.6/developers/clustering/index.md new file mode 100644 index 00000000..95c3433c --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/clustering/index.md @@ -0,0 +1,31 @@ +--- +title: NATS Clustering +--- + +# NATS Clustering + +Harper 4.0 - 4.3 used a clustering system based on NATS for replication. In 4.4+, Harper has moved to a new native replication system that has better performance, reliability, and data consistency. This document describes the legacy NATS clustering system. Harper clustering is the process of connecting multiple Harper databases together to create a database mesh network that enables users to define data replication patterns. + +Harper’s clustering engine replicates data between instances of Harper using a highly performant, bi-directional pub/sub model on a per-table basis. Data replicates asynchronously with eventual consistency across the cluster following the defined pub/sub configuration. Individual transactions are sent in the order in which they were transacted, once received by the destination instance, they are processed in an ACID-compliant manner. Conflict resolution follows a last writer wins model based on recorded transaction time on the transaction and the timestamp on the record on the node. + +--- + +### Common Use Case + +A common use case is an edge application collecting and analyzing sensor data that creates an alert if a sensor value exceeds a given threshold: + +- The edge application should not be making outbound http requests for security purposes. +- There may not be a reliable network connection. +- Not all sensor data will be sent to the cloud--either because of the unreliable network connection, or maybe it’s just a pain to store it. +- The edge node should be inaccessible from outside the firewall. +- The edge node will send alerts to the cloud with a snippet of sensor data containing the offending sensor readings. + +Harper simplifies the architecture of such an application with its bi-directional, table-level replication: + +- The edge instance subscribes to a “thresholds” table on the cloud instance, so the application only makes localhost calls to get the thresholds. +- The application continually pushes sensor data into a “sensor_data” table via the localhost API, comparing it to the threshold values as it does so. +- When a threshold violation occurs, the application adds a record to the “alerts” table. +- The application appends to that record array “sensor_data” entries for the 60 seconds (or minutes, or days) leading up to the threshold violation. +- The edge instance publishes the “alerts” table up to the cloud instance. + +By letting Harper focus on the fault-tolerant logistics of transporting your data, you get to write less code. By moving data only when and where it’s needed, you lower storage and bandwidth costs. And by restricting your app to only making local calls to Harper, you reduce the overall exposure of your application to outside forces. diff --git a/site/versioned_docs/version-4.6/developers/clustering/managing-subscriptions.md b/site/versioned_docs/version-4.6/developers/clustering/managing-subscriptions.md new file mode 100644 index 00000000..f043c9d1 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/clustering/managing-subscriptions.md @@ -0,0 +1,199 @@ +--- +title: Managing subscriptions +--- + +Tables are replicated when the table is designated as replicating and there is subscription between the nodes. +Tables designated as replicating by default, but can be changed by setting `replicate` to `false` in the table definition: + +```graphql +type Product @table(replicate: false) { + id: ID! + name: String! +} +``` + +Or in your harperdb-config.yaml, you can set the default replication behavior for databases, and indicate which databases +should be replicated by default: + +```yaml +replication: + databases: data +``` + +If a table is not in the list of databases to be replicated, it will not be replicated unless the table is specifically set to replicate: + +```graphql +type Product @table(replicate: true) { + id: ID! + name: String! +} +``` + +Reading hdb*nodes (what we do \_to* the node, not what the node does). + +The subscription can be set to publish, subscribe, or both. + +# Managing subscriptions + +Subscriptions can be added, updated, or removed through the API. + +_Note: The databases and tables in the subscription must exist on either the local or the remote node. Any databases or tables that do not exist on one particular node, for example, the local node, will be automatically created on the local node._ + +To add a single node and create one or more subscriptions use `set_node_replication`. + +```json +{ + "operation": "set_node_replication", + "node_name": "Node2", + "subscriptions": [ + { + "database": "data", + "table": "dog", + "publish": false, + "subscribe": true + }, + { + "database": "data", + "table": "chicken", + "publish": true, + "subscribe": true + } + ] +} +``` + +This is an example of adding Node2 to your local node. Subscriptions are created for two tables, dog and chicken. + +To update one or more subscriptions with a single node you can also use `set_node_replication`, however this will behave as a PATCH/upsert, where only the subscription(s) changing will be inserted/update while the others will be left untouched. + +```json +{ + "operation": "set_node_replication", + "node_name": "Node2", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ] +} +``` + +This call will update the subscription with the dog table. Any other subscriptions with Node2 will not change. + +To add or update subscriptions with one or more nodes in one API call use `configure_cluster`. + +```json +{ + "operation": "configure_cluster", + "connections": [ + { + "node_name": "Node2", + "subscriptions": [ + { + "database": "dev", + "table": "chicken", + "publish": false, + "subscribe": true + }, + { + "database": "prod", + "table": "dog", + "publish": true, + "subscribe": true + } + ] + }, + { + "node_name": "Node3", + "subscriptions": [ + { + "database": "dev", + "table": "chicken", + "publish": true, + "subscribe": false + } + ] + } + ] +} +``` + +_Note: `configure_cluster` will override **any and all** existing subscriptions defined on the local node. This means that before going through the connections in the request and adding the subscriptions, it will first go through **all existing subscriptions the local node has** and remove them. To get all existing subscriptions use `cluster_status`._ + +#### Start time + +There is an optional property called `start_time` that can be passed in the subscription. This property accepts an ISO formatted UTC date. + +`start_time` can be used to set from what time you would like to source transactions from a table when creating or updating a subscription. + +```json +{ + "operation": "set_node_replication", + "node_name": "Node2", + "subscriptions": [ + { + "database": "dev", + "table": "dog", + "publish": false, + "subscribe": true, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] +} +``` + +This example will get all transactions on Node2’s dog table starting from `2022-09-02T20:06:35.993Z` and replicate them locally on the dog table. + +If no start time is passed it defaults to the current time. + +_Note: start time utilizes clustering to back source transactions. For this reason it can only source transactions that occurred when clustering was enabled._ + +#### Remove node + +To remove a node and all its subscriptions use `remove_node`. + +```json +{ + "operation": "remove_node", + "node_name": "Node2" +} +``` + +#### Cluster status + +To get the status of all connected nodes and see their subscriptions use `cluster_status`. + +```json +{ + "node_name": "Node1", + "is_enabled": true, + "connections": [ + { + "node_name": "Node2", + "status": "open", + "ports": { + "clustering": 9932, + "operations_api": 9925 + }, + "latency_ms": 65, + "uptime": "11m 19s", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ], + "system_info": { + "hdb_version": "4.0.0", + "node_version": "16.17.1", + "platform": "linux" + } + } + ] +} +``` diff --git a/site/versioned_docs/version-4.6/developers/clustering/naming-a-node.md b/site/versioned_docs/version-4.6/developers/clustering/naming-a-node.md new file mode 100644 index 00000000..7a512efb --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/clustering/naming-a-node.md @@ -0,0 +1,45 @@ +--- +title: Naming a Node +--- + +# Naming a Node + +Node name is the name given to a node. It is how nodes are identified within the cluster and must be unique to the cluster. + +The name cannot contain any of the following characters: `.,*>` . Dot, comma, asterisk, greater than, or whitespace. + +The name is set in the `harperdb-config.yaml` file using the `clustering.nodeName` configuration element. + +_Note: If you want to change the node name make sure there are no subscriptions in place before doing so. After the name has been changed a full restart is required._ + +There are multiple ways to update this element, they are: + +1. Directly editing the `harperdb-config.yaml` file. + +```yaml +clustering: + nodeName: Node1 +``` + +_Note: When making any changes to the `harperdb-config.yaml` file Harper must be restarted for the changes to take effect._ + +1. Calling `set_configuration` through the operations API + +```json +{ + "operation": "set_configuration", + "clustering_nodeName": "Node1" +} +``` + +1. Using command line variables. + +``` +harperdb --CLUSTERING_NODENAME Node1 +``` + +1. Using environment variables. + +``` +CLUSTERING_NODENAME=Node1 +``` diff --git a/site/versioned_docs/version-4.6/developers/clustering/requirements-and-definitions.md b/site/versioned_docs/version-4.6/developers/clustering/requirements-and-definitions.md new file mode 100644 index 00000000..22bc3977 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/clustering/requirements-and-definitions.md @@ -0,0 +1,11 @@ +--- +title: Requirements and Definitions +--- + +# Requirements and Definitions + +To create a cluster you must have two or more nodes\* (aka instances) of Harper running. + +\*_A node is a single instance/installation of Harper. A node of Harper can operate independently with clustering on or off._ + +On the following pages we'll walk you through the steps required, in order, to set up a Harper cluster. diff --git a/site/versioned_docs/version-4.6/developers/clustering/subscription-overview.md b/site/versioned_docs/version-4.6/developers/clustering/subscription-overview.md new file mode 100644 index 00000000..b4827de7 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/clustering/subscription-overview.md @@ -0,0 +1,45 @@ +--- +title: Subscription Overview +--- + +# Subscription Overview + +A subscription defines how data should move between two nodes. They are exclusively table level and operate independently. They connect a table on one node to a table on another node, the subscription will apply to a matching database name and table name on both nodes. + +_Note: ‘local’ and ‘remote’ will often be referred to. In the context of these docs ‘local’ is the node that is receiving the API request to create/update a subscription and remote is the other node that is referred to in the request, the node on the other end of the subscription._ + +A subscription consists of: + +`database` - the name of the database that the table you are creating the subscription for belongs to. _Note, this was previously referred to as schema and may occasionally still be referenced that way._ + +`table` - the name of the table the subscription will apply to. + +`publish` - a boolean which determines if transactions on the local table should be replicated on the remote table. + +`subscribe` - a boolean which determines if transactions on the remote table should be replicated on the local table. + +#### Publish subscription + +![figure 2](/img/v4.6/clustering/figure2.png) + +This diagram is an example of a `publish` subscription from the perspective of Node1. + +The record with id 2 has been inserted in the dog table on Node1, after it has completed that insert it is sent to Node 2 and inserted in the dog table there. + +#### Subscribe subscription + +![figure 3](/img/v4.6/clustering/figure3.png) + +This diagram is an example of a `subscribe` subscription from the perspective of Node1. + +The record with id 3 has been inserted in the dog table on Node2, after it has completed that insert it is sent to Node1 and inserted there. + +#### Subscribe and Publish + +![figure 4](/img/v4.6/clustering/figure4.png) + +This diagram shows both subscribe and publish but publish is set to false. You can see that because subscribe is true the insert on Node2 is being replicated on Node1 but because publish is set to false the insert on Node1 is _**not**_ being replicated on Node2. + +![figure 5](/img/v4.6/clustering/figure5.png) + +This shows both subscribe and publish set to true. The insert on Node1 is replicated on Node2 and the update on Node2 is replicated on Node1. diff --git a/site/versioned_docs/version-4.6/developers/clustering/things-worth-knowing.md b/site/versioned_docs/version-4.6/developers/clustering/things-worth-knowing.md new file mode 100644 index 00000000..f523c7bf --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/clustering/things-worth-knowing.md @@ -0,0 +1,43 @@ +--- +title: Things Worth Knowing +--- + +# Things Worth Knowing + +Additional information that will help you define your clustering topology. + +--- + +### Transactions + +Transactions that are replicated across the cluster are: + +- Insert +- Update +- Upsert +- Delete +- Bulk loads + - CSV data load + - CSV file load + - CSV URL load + - Import from S3 + +When adding or updating a node any databases and tables in the subscription that don’t exist on the remote node will be automatically created. + +**Destructive database operations do not replicate across a cluster**. Those operations include `drop_database`, `drop_table`, and `drop_attribute`. If the desired outcome is to drop database information from any nodes then the operation(s) will need to be run on each node independently. + +Users and roles are not replicated across the cluster. + +--- + +### Queueing + +Harper has built-in resiliency for when network connectivity is lost within a subscription. When connections are reestablished, a catchup routine is executed to ensure data that was missed, specific to the subscription, is sent/received as defined. + +--- + +### Topologies + +Harper clustering creates a mesh network between nodes giving end users the ability to create an infinite number of topologies. subscription topologies can be simple or as complex as needed. + +![](/img/v4.6/clustering/figure6.png) diff --git a/site/versioned_docs/version-4.6/developers/miscellaneous/google-data-studio.md b/site/versioned_docs/version-4.6/developers/miscellaneous/google-data-studio.md new file mode 100644 index 00000000..b29af70e --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/miscellaneous/google-data-studio.md @@ -0,0 +1,37 @@ +--- +title: Google Data Studio +--- + +# Google Data Studio + +[Google Data Studio](https:/datastudio.google.com/) is a free collaborative visualization tool which enables users to build configurable charts and tables quickly. The Harper Google Data Studio connector seamlessly integrates your Harper data with Google Data Studio so you can build custom, real-time data visualizations. + +The Harper Google Data Studio Connector is subject to our [Terms of Use](https:/harperdb.io/legal/harperdb-cloud-terms-of-service/) and [Privacy Policy](https:/harperdb.io/legal/privacy-policy/). + +## Requirements + +The Harper database must be accessible through the Internet in order for Google Data Studio servers to access it. The database may be hosted by you or via [Harper Cloud](../../deployments/harper-cloud/). + +## Get Started + +Get started by selecting the Harper connector from the [Google Data Studio Partner Connector Gallery](https:/datastudio.google.com/u/0/datasources/create). + +1. Log in to https:/datastudio.google.com/. +1. Add a new Data Source using the Harper connector. The current release version can be added as a data source by following this link: [Harper Google Data Studio Connector](https:/datastudio.google.com/datasources/create?connectorId=AKfycbxBKgF8FI5R42WVxO-QCOq7dmUys0HJrUJMkBQRoGnCasY60_VJeO3BhHJPvdd20-S76g). +1. Authorize the connector to access other servers on your behalf (this allows the connector to contact your database). +1. Enter the Web URL to access your database (preferably with HTTPS), as well as the Basic Auth key you use to access the database. Just include the key, not the word “Basic” at the start of it. +1. Check the box for “Secure Connections Only” if you want to always use HTTPS connections for this data source; entering a Web URL that starts with https:/ will do the same thing, if you prefer. +1. Check the box for “Allow Bad Certs” if your Harper instance does not have a valid SSL certificate. [Harper Cloud](../../deployments/harper-cloud/) always has valid certificates, and so will never require this to be checked. Instances you set up yourself may require this, if you are using self-signed certs. If you are using [Harper Cloud](../../deployments/harper-cloud/) or another instance you know should always have valid SSL certificates, do not check this box. +1. Choose your Query Type. This determines what information the configuration will ask for after pressing the Next button. + - Table will ask you for a Schema and a Table to return all fields of using `SELECT *`. + - SQL will ask you for the SQL query you’re using to retrieve fields from the database. You may `JOIN` multiple tables together, and use Harper specific SQL functions, along with the usual power SQL grants. +1. When all information is entered correctly, press the Connect button in the top right of the new Data Source view to generate the Schema. You may also want to name the data source at this point. If the connector encounters any errors, a dialog box will tell you what went wrong so you can correct the issue. +1. If there are no errors, you now have a data source you can use in your reports! You may change the types of the generated fields in the Schema view if you need to (for instance, changing a Number field to a specific currency), as well as creating new fields from the report view that do calculations on other fields. + +## Considerations + +- Both Postman and the [Harper Studio](../../administration/harper-studio/) app have ways to convert a user:password pair to a Basic Auth token. Use either to create the token for the connector's user. + - You may sign out of your current user by going to the instances tab in Harper Studio, then clicking on the lock icon at the top-right of a given instance’s box. Click the lock again to sign in as any user. The Basic Auth token will be visible in the Authorization header portion of any code created in the Sample Code tab. +- It’s highly recommended that you create a read-only user role in Harper Studio, and create a user with that role for your data sources to use. This prevents that authorization token from being used to alter your database, should someone else ever get ahold of it. +- The RecordCount field is intended for use as a metric, for counting how many instances of a given set of values appear in a report’s data set. +- _Do not attempt to create fields with spaces in their names_ for any data sources! Google Data Studio will crash when attempting to retrieve a field with such a name, producing a System Error instead of a useful chart on your reports. Using CamelCase or snake_case gets around this. diff --git a/site/versioned_docs/version-4.6/developers/miscellaneous/index.md b/site/versioned_docs/version-4.6/developers/miscellaneous/index.md new file mode 100644 index 00000000..f80dc499 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/miscellaneous/index.md @@ -0,0 +1,7 @@ +--- +title: Miscellaneous +--- + +# Miscellaneous + +This section covers a grouping of reference documents for various external developer tools, packages, SDKs, etc. diff --git a/site/versioned_docs/version-4.6/developers/miscellaneous/query-optimization.md b/site/versioned_docs/version-4.6/developers/miscellaneous/query-optimization.md new file mode 100644 index 00000000..139b862b --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/miscellaneous/query-optimization.md @@ -0,0 +1,37 @@ +--- +title: Query Optimization +--- + +## Query Optimization + +Harper has powerful query functionality with excellent performance characteristics. However, like any database, different queries can vary significantly in performance. It is important to understand how querying works to help you optimize your queries for the best performance. + +### Query Execution + +At a fundamental level, querying involves defining conditions to find matching data and then executing those conditions against the database and delivering the results based on required fields, relationships, and ordering. Harper supports indexed fields, and these indexes are used to speed up query execution. When conditions are specified in a query, Harper will attempt to utilize indexes to optimize the speed of query execution. When a field is not indexed, a query specifies a condition on that field, and the database check each potential record to determine if it matches the condition. + +When a query is performed with multiple conditions, Harper will attempt to optimize the ordering of these conditions. When using intersecting conditions (the default, an `and` operator, matching records must all match all conditions), Harper will attempt to to apply the most selective and performant condition first. This means that if one condition can use an index and is more selective than another, it will be used first to find the initial matching set of data and then filter based on the remaining conditions. If a condition can search an indexed field, with a selective condition, it will be used before conditions that aren't indexed, or as selective. The `search` method includes an `explain` flag that can be used to return a query execution order to understand how the query is being executed. This can be useful for debugging and optimizing queries. + +For a union query, each condition is executed separately and the results are combined/merged. + +### Condition, Operators, and Indexing + +When a query is performed, the conditions specified in the query are evaluated against the data in the database. The conditions can be simple or complex, and can include scalar operators such as `=`, `!=`, `>`, `<`, `>=`, `<=`, as well as `starts_with`, `contains`, and `ends_with`. The use of these operators can affect the performance of the query, especially when used with indexed fields. If an indexed field is not used, the database will have to check each potential record to determine if it matches the condition. If the only condition is not indexed, or there are no conditions with an indexed field, the database will have to check every record with a full table scan and can be very slow for large datasets (it will get slower as the dataset grows, `O(n)`). + +The use of indexed fields can significantly improve the performance of a query, providing fast performance even as the database grows in size (`O(log n)`). However, indexed fields require extra writes to the database when performing insert, update, or delete operations. This is because the index must be updated to reflect the changes in the data. This can slow down write operations, but the trade-off is often worth it if the field is frequently used in queries. + +The different operators can also affect the performance of a query. For example, using the `=` operator on an indexed field is generally faster than using the `!=` operator, as the latter requires checking all records that do not match the condition. An index is a sorted listed of values, so the greater than and less than operators will also utilize indexed fields when possible. If the range is narrow, these operations can be very fast. A wide range could yield a large number of records and will naturally incur more overhead. The `starts_with` operator can also leverage indexed fields because it quickly find the correct matching entries in the sorted index. On other hand, the `contains` and `ends_with` and not equal (`!=` or `not_equal`) operators can not leverage the indexes, so they will require a full table scan to find the matching records if they are not used in conjunction in with a selective/indexed condition. There is a special case of `!= null` which can use indexes to find non-null records. However, there is generally only helpful for sparse fields where a small subset are non-null values. More generally, operators are more efficient if they are selecting on fields with a high cardinality. + +Conditions can be applied to primary key fields or other indexed fields (known as secondary indexes). In general, querying on a primary key will be faster than querying on a secondary index, as the primary key is the most efficient way to access data in the database, and doesn't require cross-referencing to the main records. + +### Relationships/Joins + +Harper supports relationships between tables, allowing for "join" queries that. This does result in more complex queries with potentially larger performance overhead, as more lookups are necessary to connect matched or selected data with other tables. Similar principles apply to conditions which use relationships. Indexed fields and comparators that leverage the ordering are still valuable for performance. It is also important that if a condition on a table is connected to another table's foreign key, that that foreign key also be indexed. Likewise, if a query `select`s data from a related table that uses a foreign key to relate, that it is indexed. The same principles of higher cardinality applies here as well, more unique values allow for efficient lookups. + +### Sorting + +Queries can also specify a sort order. This can also significantly impact performance. If a query specifies a sort order on an indexed field, the database can use the index to quickly retrieve the data in the specified order. A sort order can be used in conjunction with a condition on the same (indexed) field can utilize the index for ordering. However, if the sort order is not on an indexed field, or the query specifies conditions on different fields, Harper will generally need to sort the data after retrieving it, which can be slow for large datasets. The same principles apply to sorting as they do to conditions. Sorting on a primary key is generally faster than sorting on a secondary index, if the condition aligns with the sort order. + +### Streaming + +One of the unique and powerful features of Harper's querying functionality is the ability to stream query results. When possible, Harper can return records from a query as they are found, rather than waiting for the entire query to complete. This can significantly improve performance for large queries, as it allows the application to start processing results or sending the initial data before the entire query is complete (improving time-to-first-byte speed, for example). However, using a sort order on a query with conditions that are not on an aligned index requires that the entire query result be loaded in order to perform the sorting, which defeats the streaming benefits. diff --git a/site/versioned_docs/version-4.6/developers/miscellaneous/sdks.md b/site/versioned_docs/version-4.6/developers/miscellaneous/sdks.md new file mode 100644 index 00000000..d64e19ce --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/miscellaneous/sdks.md @@ -0,0 +1,22 @@ +--- +title: SDKs +description: >- + Software Development Kits available for connecting to Harper from different + languages. +--- + +# SDKs + +| SDK/Tool | Description | Installation | +| ------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- | +| [HarperDB.NET.Client](https:/www.nuget.org/packages/HarperDB.NET.Client) | A Dot Net Core client to execute operations against HarperDB | `dotnet add package HarperDB.NET.Client --version 1.1.0` | +| [Websocket Client](https:/www.npmjs.com/package/harperdb-websocket-client) | A Javascript client for real-time access to HarperDB transactions | `npm i -s harperdb-websocket-client` | +| [Gatsby HarperDB Source](https:/www.npmjs.com/package/gatsby-source-harperdb) | Use Harper as the data source for a Gatsby project at the build time | `npm i -s gatsby-source-harperdb` | +| [HarperDB.EntityFrameworkCore](https:/www.nuget.org/packages/HarperDB.EntityFrameworkCore) | The Harper EntityFrameworkCore Provider Package for .NET 6.0 | `dotnet add package HarperDB.EntityFrameworkCore --version 1.0.0` | +| [Python SDK](https:/pypi.org/project/harperdb/) | Python3 implementations of Harper API functions with wrappers for an object-oriented interface | `pip3 install harperdb` | +| [HarperDB Flutter SDK](https:/github.com/HarperDB/harperdb-sdk-flutter) | A Harper SDK for Flutter | `flutter pub add harperdb` | +| [React Hook](https:/www.npmjs.com/package/use-harperdb) | A ReactJS Hook for HarperDB | `npm i -s use-harperdb` | +| [Node Red Node](https:/flows.nodered.org/node/node-red-contrib-harperdb) | Easy drag and drop connections to Harper using the Node-Red platform | `npm i -s node-red-contrib-harperdb` | +| [NodeJS SDK](https:/www.npmjs.com/package/harperive) | A Harper SDK for NodeJS | `npm i -s harperive` | +| [HarperDB Cargo Crate](https:/crates.io/crates/harperdb) | A Harper SDK for Rust | `Cargo.toml > harperdb = '1.0.0'` | +| [HarperDB Go SDK](https:/github.com/HarperDB/sdk-go) | A Harper SDK for Go | `go get github.com/HarperDB/sdk-go` | diff --git a/site/versioned_docs/version-4.6/developers/operations-api/advanced-json-sql-examples.md b/site/versioned_docs/version-4.6/developers/operations-api/advanced-json-sql-examples.md new file mode 100644 index 00000000..58116884 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/advanced-json-sql-examples.md @@ -0,0 +1,1775 @@ +--- +title: Advanced JSON SQL Examples +--- + +# Advanced JSON SQL Examples + +## Create movies database + +Create a new database called "movies" using the 'create_database' operation. + +_Note: Creating a database is optional, if one is not created Harper will default to using a database named `data`_ + +### Body + +```json +{ + "operation": "create_database", + "database": "movies" +} +``` + +### Response: 200 + +```json +{ + "message": "database 'movies' successfully created" +} +``` + +--- + +## Create movie Table + +Creates a new table called "movie" inside the database "movies" using the ‘create_table’ operation. + +### Body + +```json +{ + "operation": "create_table", + "database": "movies", + "table": "movie", + "primary_key": "id" +} +``` + +### Response: 200 + +```json +{ + "message": "table 'movies.movie' successfully created." +} +``` + +--- + +## Create credits Table + +Creates a new table called "credits" inside the database "movies" using the ‘create_table’ operation. + +### Body + +```json +{ + "operation": "create_table", + "database": "movies", + "table": "credits", + "primary_key": "movie_id" +} +``` + +### Response: 200 + +```json +{ + "message": "table 'movies.credits' successfully created." +} +``` + +--- + +## Bulk Insert movie Via CSV + +Inserts data from a hosted CSV file into the "movie" table using the 'csv_url_load' operation. + +### Body + +```json +{ + "operation": "csv_url_load", + "database": "movies", + "table": "movie", + "csv_url": "https:/search-json-sample-data.s3.us-east-2.amazonaws.com/movie.csv" +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id 1889eee4-23c1-4945-9bb7-c805fc20726c" +} +``` + +--- + +## Bulk Insert credits Via CSV + +Inserts data from a hosted CSV file into the "credits" table using the 'csv_url_load' operation. + +### Body + +```json +{ + "operation": "csv_url_load", + "database": "movies", + "table": "credits", + "csv_url": "https:/search-json-sample-data.s3.us-east-2.amazonaws.com/credits.csv" +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id 3a14cd74-67f3-41e9-8ccd-45ffd0addc2c", + "job_id": "3a14cd74-67f3-41e9-8ccd-45ffd0addc2c" +} +``` + +--- + +## View raw data + +In the following example we will be running expressions on the keywords & production_companies attributes, so for context we are displaying what the raw data looks like. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT title, rank, keywords, production_companies FROM movies.movie ORDER BY rank LIMIT 10" +} +``` + +### Response: 200 + +```json +[ + { + "title": "Ad Astra", + "rank": 1, + "keywords": [ + { + "id": 305, + "name": "moon" + }, + { + "id": 697, + "name": "loss of loved one" + }, + { + "id": 839, + "name": "planet mars" + }, + { + "id": 14626, + "name": "astronaut" + }, + { + "id": 157265, + "name": "moon colony" + }, + { + "id": 162429, + "name": "solar system" + }, + { + "id": 240119, + "name": "father son relationship" + }, + { + "id": 244256, + "name": "near future" + }, + { + "id": 257878, + "name": "planet neptune" + }, + { + "id": 260089, + "name": "space walk" + } + ], + "production_companies": [ + { + "id": 490, + "name": "New Regency Productions", + "origin_country": "" + }, + { + "id": 79963, + "name": "Keep Your Head", + "origin_country": "" + }, + { + "id": 73492, + "name": "MadRiver Pictures", + "origin_country": "" + }, + { + "id": 81, + "name": "Plan B Entertainment", + "origin_country": "US" + }, + { + "id": 30666, + "name": "RT Features", + "origin_country": "BR" + }, + { + "id": 30148, + "name": "Bona Film Group", + "origin_country": "CN" + }, + { + "id": 22213, + "name": "TSG Entertainment", + "origin_country": "US" + } + ] + }, + { + "title": "Extraction", + "rank": 2, + "keywords": [ + { + "id": 3070, + "name": "mercenary" + }, + { + "id": 4110, + "name": "mumbai (bombay), india" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 9730, + "name": "crime boss" + }, + { + "id": 11107, + "name": "rescue mission" + }, + { + "id": 18712, + "name": "based on graphic novel" + }, + { + "id": 265216, + "name": "dhaka (dacca), bangladesh" + } + ], + "production_companies": [ + { + "id": 106544, + "name": "AGBO", + "origin_country": "US" + }, + { + "id": 109172, + "name": "Thematic Entertainment", + "origin_country": "US" + }, + { + "id": 92029, + "name": "TGIM Films", + "origin_country": "US" + } + ] + }, + { + "title": "To the Beat! Back 2 School", + "rank": 3, + "keywords": [ + { + "id": 10873, + "name": "school" + } + ], + "production_companies": [] + }, + { + "title": "Bloodshot", + "rank": 4, + "keywords": [ + { + "id": 2651, + "name": "nanotechnology" + }, + { + "id": 9715, + "name": "superhero" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 164218, + "name": "psychotronic" + }, + { + "id": 255024, + "name": "shared universe" + }, + { + "id": 258575, + "name": "valiant comics" + } + ], + "production_companies": [ + { + "id": 34, + "name": "Sony Pictures", + "origin_country": "US" + }, + { + "id": 10246, + "name": "Cross Creek Pictures", + "origin_country": "US" + }, + { + "id": 6573, + "name": "Mimran Schur Pictures", + "origin_country": "US" + }, + { + "id": 333, + "name": "Original Film", + "origin_country": "US" + }, + { + "id": 103673, + "name": "The Hideaway Entertainment", + "origin_country": "US" + }, + { + "id": 124335, + "name": "Valiant Entertainment", + "origin_country": "US" + }, + { + "id": 5, + "name": "Columbia Pictures", + "origin_country": "US" + }, + { + "id": 1225, + "name": "One Race", + "origin_country": "US" + }, + { + "id": 30148, + "name": "Bona Film Group", + "origin_country": "CN" + } + ] + }, + { + "title": "The Call of the Wild", + "rank": 5, + "keywords": [ + { + "id": 818, + "name": "based on novel or book" + }, + { + "id": 4542, + "name": "gold rush" + }, + { + "id": 15162, + "name": "dog" + }, + { + "id": 155821, + "name": "sled dogs" + }, + { + "id": 189390, + "name": "yukon" + }, + { + "id": 207928, + "name": "19th century" + }, + { + "id": 259987, + "name": "cgi animation" + }, + { + "id": 263806, + "name": "1890s" + } + ], + "production_companies": [ + { + "id": 787, + "name": "3 Arts Entertainment", + "origin_country": "US" + }, + { + "id": 127928, + "name": "20th Century Studios", + "origin_country": "US" + }, + { + "id": 22213, + "name": "TSG Entertainment", + "origin_country": "US" + } + ] + }, + { + "title": "Sonic the Hedgehog", + "rank": 6, + "keywords": [ + { + "id": 282, + "name": "video game" + }, + { + "id": 6054, + "name": "friendship" + }, + { + "id": 10842, + "name": "good vs evil" + }, + { + "id": 41645, + "name": "based on video game" + }, + { + "id": 167043, + "name": "road movie" + }, + { + "id": 172142, + "name": "farting" + }, + { + "id": 188933, + "name": "bar fight" + }, + { + "id": 226967, + "name": "amistad" + }, + { + "id": 245230, + "name": "live action remake" + }, + { + "id": 258111, + "name": "fantasy" + }, + { + "id": 260223, + "name": "videojuego" + } + ], + "production_companies": [ + { + "id": 333, + "name": "Original Film", + "origin_country": "US" + }, + { + "id": 10644, + "name": "Blur Studios", + "origin_country": "US" + }, + { + "id": 77884, + "name": "Marza Animation Planet", + "origin_country": "JP" + }, + { + "id": 4, + "name": "Paramount", + "origin_country": "US" + }, + { + "id": 113750, + "name": "SEGA", + "origin_country": "JP" + }, + { + "id": 100711, + "name": "DJ2 Entertainment", + "origin_country": "" + }, + { + "id": 24955, + "name": "Paramount Animation", + "origin_country": "US" + } + ] + }, + { + "title": "Birds of Prey (and the Fantabulous Emancipation of One Harley Quinn)", + "rank": 7, + "keywords": [ + { + "id": 849, + "name": "dc comics" + }, + { + "id": 9717, + "name": "based on comic" + }, + { + "id": 187056, + "name": "woman director" + }, + { + "id": 229266, + "name": "dc extended universe" + } + ], + "production_companies": [ + { + "id": 9993, + "name": "DC Entertainment", + "origin_country": "US" + }, + { + "id": 82968, + "name": "LuckyChap Entertainment", + "origin_country": "GB" + }, + { + "id": 103462, + "name": "Kroll & Co Entertainment", + "origin_country": "US" + }, + { + "id": 174, + "name": "Warner Bros. Pictures", + "origin_country": "US" + }, + { + "id": 429, + "name": "DC Comics", + "origin_country": "US" + }, + { + "id": 128064, + "name": "DC Films", + "origin_country": "US" + }, + { + "id": 101831, + "name": "Clubhouse Pictures", + "origin_country": "US" + } + ] + }, + { + "title": "Justice League Dark: Apokolips War", + "rank": 8, + "keywords": [ + { + "id": 849, + "name": "dc comics" + } + ], + "production_companies": [ + { + "id": 2785, + "name": "Warner Bros. Animation", + "origin_country": "US" + }, + { + "id": 9993, + "name": "DC Entertainment", + "origin_country": "US" + }, + { + "id": 429, + "name": "DC Comics", + "origin_country": "US" + } + ] + }, + { + "title": "Parasite", + "rank": 9, + "keywords": [ + { + "id": 1353, + "name": "underground" + }, + { + "id": 5318, + "name": "seoul" + }, + { + "id": 5732, + "name": "birthday party" + }, + { + "id": 5752, + "name": "private lessons" + }, + { + "id": 9866, + "name": "basement" + }, + { + "id": 10453, + "name": "con artist" + }, + { + "id": 11935, + "name": "working class" + }, + { + "id": 12565, + "name": "psychological thriller" + }, + { + "id": 13126, + "name": "limousine driver" + }, + { + "id": 14514, + "name": "class differences" + }, + { + "id": 14864, + "name": "rich poor" + }, + { + "id": 17997, + "name": "housekeeper" + }, + { + "id": 18015, + "name": "tutor" + }, + { + "id": 18035, + "name": "family" + }, + { + "id": 33421, + "name": "crime family" + }, + { + "id": 173272, + "name": "flood" + }, + { + "id": 188861, + "name": "smell" + }, + { + "id": 198673, + "name": "unemployed" + }, + { + "id": 237462, + "name": "wealthy family" + } + ], + "production_companies": [ + { + "id": 7036, + "name": "CJ Entertainment", + "origin_country": "KR" + }, + { + "id": 4399, + "name": "Barunson E&A", + "origin_country": "KR" + } + ] + }, + { + "title": "Star Wars: The Rise of Skywalker", + "rank": 10, + "keywords": [ + { + "id": 161176, + "name": "space opera" + } + ], + "production_companies": [ + { + "id": 1, + "name": "Lucasfilm", + "origin_country": "US" + }, + { + "id": 11461, + "name": "Bad Robot", + "origin_country": "US" + }, + { + "id": 2, + "name": "Walt Disney Pictures", + "origin_country": "US" + }, + { + "id": 120404, + "name": "British Film Commission", + "origin_country": "" + } + ] + } +] +``` + +--- + +## Simple search_json call + +This query uses search_json to convert the keywords object array to a simple string array. The expression '[name]' tells the function to extract all values for the name attribute and wrap them in an array. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT title, rank, search_json('[name]', keywords) as keywords FROM movies.movie ORDER BY rank LIMIT 10" +} +``` + +### Response: 200 + +```json +[ + { + "title": "Ad Astra", + "rank": 1, + "keywords": [ + "moon", + "loss of loved one", + "planet mars", + "astronaut", + "moon colony", + "solar system", + "father son relationship", + "near future", + "planet neptune", + "space walk" + ] + }, + { + "title": "Extraction", + "rank": 2, + "keywords": [ + "mercenary", + "mumbai (bombay), india", + "based on comic", + "crime boss", + "rescue mission", + "based on graphic novel", + "dhaka (dacca), bangladesh" + ] + }, + { + "title": "To the Beat! Back 2 School", + "rank": 3, + "keywords": ["school"] + }, + { + "title": "Bloodshot", + "rank": 4, + "keywords": ["nanotechnology", "superhero", "based on comic", "psychotronic", "shared universe", "valiant comics"] + }, + { + "title": "The Call of the Wild", + "rank": 5, + "keywords": [ + "based on novel or book", + "gold rush", + "dog", + "sled dogs", + "yukon", + "19th century", + "cgi animation", + "1890s" + ] + }, + { + "title": "Sonic the Hedgehog", + "rank": 6, + "keywords": [ + "video game", + "friendship", + "good vs evil", + "based on video game", + "road movie", + "farting", + "bar fight", + "amistad", + "live action remake", + "fantasy", + "videojuego" + ] + }, + { + "title": "Birds of Prey (and the Fantabulous Emancipation of One Harley Quinn)", + "rank": 7, + "keywords": ["dc comics", "based on comic", "woman director", "dc extended universe"] + }, + { + "title": "Justice League Dark: Apokolips War", + "rank": 8, + "keywords": ["dc comics"] + }, + { + "title": "Parasite", + "rank": 9, + "keywords": [ + "underground", + "seoul", + "birthday party", + "private lessons", + "basement", + "con artist", + "working class", + "psychological thriller", + "limousine driver", + "class differences", + "rich poor", + "housekeeper", + "tutor", + "family", + "crime family", + "flood", + "smell", + "unemployed", + "wealthy family" + ] + }, + { + "title": "Star Wars: The Rise of Skywalker", + "rank": 10, + "keywords": ["space opera"] + } +] +``` + +--- + +## Use search_json in a where clause + +This example shows how we can use SEARCH_JSON to filter out records in a WHERE clause. The production_companies attribute holds an object array of companies that produced each movie, we want to only see movies which were produced by Marvel Studios. Our expression is a filter '$[name="Marvel Studios"]' this tells the function to iterate the production_companies array and only return entries where the name is "Marvel Studios". + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT title, release_date FROM movies.movie where search_json('$[name=\"Marvel Studios\"]', production_companies) IS NOT NULL ORDER BY release_date" +} +``` + +### Response: 200 + +```json +[ + { + "title": "Iron Man", + "release_date": "2008-04-30" + }, + { + "title": "The Incredible Hulk", + "release_date": "2008-06-12" + }, + { + "title": "Iron Man 2", + "release_date": "2010-04-28" + }, + { + "title": "Thor", + "release_date": "2011-04-21" + }, + { + "title": "Captain America: The First Avenger", + "release_date": "2011-07-22" + }, + { + "title": "Marvel One-Shot: The Consultant", + "release_date": "2011-09-12" + }, + { + "title": "Marvel One-Shot: A Funny Thing Happened on the Way to Thor's Hammer", + "release_date": "2011-10-25" + }, + { + "title": "The Avengers", + "release_date": "2012-04-25" + }, + { + "title": "Marvel One-Shot: Item 47", + "release_date": "2012-09-13" + }, + { + "title": "Iron Man 3", + "release_date": "2013-04-18" + }, + { + "title": "Marvel One-Shot: Agent Carter", + "release_date": "2013-09-08" + }, + { + "title": "Thor: The Dark World", + "release_date": "2013-10-29" + }, + { + "title": "Marvel One-Shot: All Hail the King", + "release_date": "2014-02-04" + }, + { + "title": "Marvel Studios: Assembling a Universe", + "release_date": "2014-03-18" + }, + { + "title": "Captain America: The Winter Soldier", + "release_date": "2014-03-20" + }, + { + "title": "Guardians of the Galaxy", + "release_date": "2014-07-30" + }, + { + "title": "Avengers: Age of Ultron", + "release_date": "2015-04-22" + }, + { + "title": "Ant-Man", + "release_date": "2015-07-14" + }, + { + "title": "Captain America: Civil War", + "release_date": "2016-04-27" + }, + { + "title": "Team Thor", + "release_date": "2016-08-28" + }, + { + "title": "Doctor Strange", + "release_date": "2016-10-25" + }, + { + "title": "Guardians of the Galaxy Vol. 2", + "release_date": "2017-04-19" + }, + { + "title": "Spider-Man: Homecoming", + "release_date": "2017-07-05" + }, + { + "title": "Thor: Ragnarok", + "release_date": "2017-10-25" + }, + { + "title": "Black Panther", + "release_date": "2018-02-13" + }, + { + "title": "Avengers: Infinity War", + "release_date": "2018-04-25" + }, + { + "title": "Ant-Man and the Wasp", + "release_date": "2018-07-04" + }, + { + "title": "Captain Marvel", + "release_date": "2019-03-06" + }, + { + "title": "Avengers: Endgame", + "release_date": "2019-04-24" + }, + { + "title": "Spider-Man: Far from Home", + "release_date": "2019-06-28" + }, + { + "title": "Black Widow", + "release_date": "2020-10-28" + }, + { + "title": "Untitled Spider-Man 3", + "release_date": "2021-11-04" + }, + { + "title": "Thor: Love and Thunder", + "release_date": "2022-02-10" + }, + { + "title": "Doctor Strange in the Multiverse of Madness", + "release_date": "2022-03-23" + }, + { + "title": "Untitled Marvel Project (3)", + "release_date": "2022-07-29" + }, + { + "title": "Guardians of the Galaxy Vol. 3", + "release_date": "2023-02-16" + } +] +``` + +--- + +## Use search_json to show the movies with the largest casts + +This example shows how we can use SEARCH_JSON to perform a simple calculation on JSON and order by the results. The cast attribute holds an object array of details around the cast of a movie. We use the expression '$count(id)' that counts each id and returns the value back which we alias in SQL as cast_size which in turn gets used to sort the rows. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT movie_title, search_json('$count(id)', `cast`) as cast_size FROM movies.credits ORDER BY cast_size DESC LIMIT 10" +} +``` + +### Response: 200 + +```json +[ + { + "movie_title": "Around the World in Eighty Days", + "cast_size": 312 + }, + { + "movie_title": "And the Oscar Goes To...", + "cast_size": 259 + }, + { + "movie_title": "Rock of Ages", + "cast_size": 223 + }, + { + "movie_title": "Mr. Smith Goes to Washington", + "cast_size": 213 + }, + { + "movie_title": "Les Misérables", + "cast_size": 208 + }, + { + "movie_title": "Jason Bourne", + "cast_size": 201 + }, + { + "movie_title": "The Muppets", + "cast_size": 191 + }, + { + "movie_title": "You Don't Mess with the Zohan", + "cast_size": 183 + }, + { + "movie_title": "The Irishman", + "cast_size": 173 + }, + { + "movie_title": "Spider-Man: Far from Home", + "cast_size": 173 + } +] +``` + +--- + +## search_json as a condition, in a select with a table join + +This example shows how we can use SEARCH_JSON to find movies where at least of 2 our favorite actors from Marvel films have acted together then list the movie, its overview, release date, and the actors names and their characters. The WHERE clause performs a count on credits.cast attribute that have the matching actors. The SELECT performs the same filter on the cast attribute and performs a transform on each object to just return the actor's name and their character. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT m.title, m.overview, m.release_date, search_json('$[name in [\"Robert Downey Jr.\", \"Chris Evans\", \"Scarlett Johansson\", \"Mark Ruffalo\", \"Chris Hemsworth\", \"Jeremy Renner\", \"Clark Gregg\", \"Samuel L. Jackson\", \"Gwyneth Paltrow\", \"Don Cheadle\"]].{\"actor\": name, \"character\": character}', c.`cast`) as characters FROM movies.credits c INNER JOIN movies.movie m ON c.movie_id = m.id WHERE search_json('$count($[name in [\"Robert Downey Jr.\", \"Chris Evans\", \"Scarlett Johansson\", \"Mark Ruffalo\", \"Chris Hemsworth\", \"Jeremy Renner\", \"Clark Gregg\", \"Samuel L. Jackson\", \"Gwyneth Paltrow\", \"Don Cheadle\"]])', c.`cast`) >= 2" +} +``` + +### Response: 200 + +```json +[ + { + "title": "Out of Sight", + "overview": "Meet Jack Foley, a smooth criminal who bends the law and is determined to make one last heist. Karen Sisco is a federal marshal who chooses all the right moves … and all the wrong guys. Now they're willing to risk it all to find out if there's more between them than just the law.", + "release_date": "1998-06-26", + "characters": [ + { + "actor": "Don Cheadle", + "character": "Maurice Miller" + }, + { + "actor": "Samuel L. Jackson", + "character": "Hejira Henry (uncredited)" + } + ] + }, + { + "title": "Iron Man", + "overview": "After being held captive in an Afghan cave, billionaire engineer Tony Stark creates a unique weaponized suit of armor to fight evil.", + "release_date": "2008-04-30", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "Captain America: The First Avenger", + "overview": "During World War II, Steve Rogers is a sickly man from Brooklyn who's transformed into super-soldier Captain America to aid in the war effort. Rogers must stop the Red Skull – Adolf Hitler's ruthless head of weaponry, and the leader of an organization that intends to use a mysterious device of untold powers for world domination.", + "release_date": "2011-07-22", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "In Good Company", + "overview": "Dan Foreman is a seasoned advertisement sales executive at a high-ranking publication when a corporate takeover results in him being placed under naive supervisor Carter Duryea, who is half his age. Matters are made worse when Dan's new supervisor becomes romantically involved with his daughter an 18 year-old college student Alex.", + "release_date": "2004-12-29", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Alex Foreman" + }, + { + "actor": "Clark Gregg", + "character": "Mark Steckle" + } + ] + }, + { + "title": "Zodiac", + "overview": "The true story of the investigation of the \"Zodiac Killer\", a serial killer who terrified the San Francisco Bay Area, taunting police with his ciphers and letters. The case becomes an obsession for three men as their lives and careers are built and destroyed by the endless trail of clues.", + "release_date": "2007-03-02", + "characters": [ + { + "actor": "Mark Ruffalo", + "character": "Dave Toschi" + }, + { + "actor": "Robert Downey Jr.", + "character": "Paul Avery" + } + ] + }, + { + "title": "Hard Eight", + "overview": "A stranger mentors a young Reno gambler who weds a hooker and befriends a vulgar casino regular.", + "release_date": "1996-02-28", + "characters": [ + { + "actor": "Gwyneth Paltrow", + "character": "Clementine" + }, + { + "actor": "Samuel L. Jackson", + "character": "Jimmy" + } + ] + }, + { + "title": "The Spirit", + "overview": "Down these mean streets a man must come. A hero born, murdered, and born again. A Rookie cop named Denny Colt returns from the beyond as The Spirit, a hero whose mission is to fight against the bad forces from the shadows of Central City. The Octopus, who kills anyone unfortunate enough to see his face, has other plans; he is going to wipe out the entire city.", + "release_date": "2008-12-25", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Silken Floss" + }, + { + "actor": "Samuel L. Jackson", + "character": "Octopuss" + } + ] + }, + { + "title": "S.W.A.T.", + "overview": "Hondo Harrelson recruits Jim Street to join an elite unit of the Los Angeles Police Department. Together they seek out more members, including tough Deke Kay and single mom Chris Sanchez. The team's first big assignment is to escort crime boss Alex Montel to prison. It seems routine, but when Montel offers a huge reward to anyone who can break him free, criminals of various stripes step up for the prize.", + "release_date": "2003-08-08", + "characters": [ + { + "actor": "Samuel L. Jackson", + "character": "Sgt. Dan 'Hondo' Harrelson" + }, + { + "actor": "Jeremy Renner", + "character": "Brian Gamble" + } + ] + }, + { + "title": "Iron Man 2", + "overview": "With the world now aware of his dual life as the armored superhero Iron Man, billionaire inventor Tony Stark faces pressure from the government, the press and the public to share his technology with the military. Unwilling to let go of his invention, Stark, with Pepper Potts and James 'Rhodey' Rhodes at his side, must forge new alliances – and confront powerful enemies.", + "release_date": "2010-04-28", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Scarlett Johansson", + "character": "Natalie Rushman / Natasha Romanoff / Black Widow" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + } + ] + }, + { + "title": "Thor", + "overview": "Against his father Odin's will, The Mighty Thor - a powerful but arrogant warrior god - recklessly reignites an ancient war. Thor is cast down to Earth and forced to live among humans as punishment. Once here, Thor learns what it takes to be a true hero when the most dangerous villain of his world sends the darkest forces of Asgard to invade Earth.", + "release_date": "2011-04-21", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye (uncredited)" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + } + ] + }, + { + "title": "View from the Top", + "overview": "A small-town woman tries to achieve her goal of becoming a flight attendant.", + "release_date": "2003-03-21", + "characters": [ + { + "actor": "Gwyneth Paltrow", + "character": "Donna" + }, + { + "actor": "Mark Ruffalo", + "character": "Ted Stewart" + } + ] + }, + { + "title": "The Nanny Diaries", + "overview": "A college graduate goes to work as a nanny for a rich New York family. Ensconced in their home, she has to juggle their dysfunction, a new romance, and the spoiled brat in her charge.", + "release_date": "2007-08-24", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Annie Braddock" + }, + { + "actor": "Chris Evans", + "character": "Hayden \"Harvard Hottie\"" + } + ] + }, + { + "title": "The Perfect Score", + "overview": "Six high school seniors decide to break into the Princeton Testing Center so they can steal the answers to their upcoming SAT tests and all get perfect scores.", + "release_date": "2004-01-30", + "characters": [ + { + "actor": "Chris Evans", + "character": "Kyle" + }, + { + "actor": "Scarlett Johansson", + "character": "Francesca Curtis" + } + ] + }, + { + "title": "The Avengers", + "overview": "When an unexpected enemy emerges and threatens global safety and security, Nick Fury, director of the international peacekeeping agency known as S.H.I.E.L.D., finds himself in need of a team to pull the world back from the brink of disaster. Spanning the globe, a daring recruitment effort begins!", + "release_date": "2012-04-25", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + } + ] + }, + { + "title": "Iron Man 3", + "overview": "When Tony Stark's world is torn apart by a formidable terrorist called the Mandarin, he starts an odyssey of rebuilding and retribution.", + "release_date": "2013-04-18", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / Iron Patriot" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner (uncredited)" + } + ] + }, + { + "title": "Marvel One-Shot: The Consultant", + "overview": "Agent Coulson informs Agent Sitwell that the World Security Council wishes Emil Blonsky to be released from prison to join the Avengers Initiative. As Nick Fury doesn't want to release Blonsky, the two agents decide to send a patsy to sabotage the meeting...", + "release_date": "2011-09-12", + "characters": [ + { + "actor": "Clark Gregg", + "character": "Phil Coulson" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark (archive footage)" + } + ] + }, + { + "title": "Thor: The Dark World", + "overview": "Thor fights to restore order across the cosmos… but an ancient race led by the vengeful Malekith returns to plunge the universe back into darkness. Faced with an enemy that even Odin and Asgard cannot withstand, Thor must embark on his most perilous and personal journey yet, one that will reunite him with Jane Foster and force him to sacrifice everything to save us all.", + "release_date": "2013-10-29", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Chris Evans", + "character": "Loki as Captain America (uncredited)" + } + ] + }, + { + "title": "Avengers: Age of Ultron", + "overview": "When Tony Stark tries to jumpstart a dormant peacekeeping program, things go awry and Earth’s Mightiest Heroes are put to the ultimate test as the fate of the planet hangs in the balance. As the villainous Ultron emerges, it is up to The Avengers to stop him from enacting his terrible plans, and soon uneasy alliances and unexpected action pave the way for an epic and unique global adventure.", + "release_date": "2015-04-22", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + } + ] + }, + { + "title": "Captain America: The Winter Soldier", + "overview": "After the cataclysmic events in New York with The Avengers, Steve Rogers, aka Captain America is living quietly in Washington, D.C. and trying to adjust to the modern world. But when a S.H.I.E.L.D. colleague comes under attack, Steve becomes embroiled in a web of intrigue that threatens to put the world at risk. Joining forces with the Black Widow, Captain America struggles to expose the ever-widening conspiracy while fighting off professional assassins sent to silence him at every turn. When the full scope of the villainous plot is revealed, Captain America and the Black Widow enlist the help of a new ally, the Falcon. However, they soon find themselves up against an unexpected and formidable enemy—the Winter Soldier.", + "release_date": "2014-03-20", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + } + ] + }, + { + "title": "Thanks for Sharing", + "overview": "A romantic comedy that brings together three disparate characters who are learning to face a challenging and often confusing world as they struggle together against a common demon—sex addiction.", + "release_date": "2013-09-19", + "characters": [ + { + "actor": "Mark Ruffalo", + "character": "Adam" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Phoebe" + } + ] + }, + { + "title": "Chef", + "overview": "When Chef Carl Casper suddenly quits his job at a prominent Los Angeles restaurant after refusing to compromise his creative integrity for its controlling owner, he is left to figure out what's next. Finding himself in Miami, he teams up with his ex-wife, his friend and his son to launch a food truck. Taking to the road, Chef Carl goes back to his roots to reignite his passion for the kitchen -- and zest for life and love.", + "release_date": "2014-05-08", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Molly" + }, + { + "actor": "Robert Downey Jr.", + "character": "Marvin" + } + ] + }, + { + "title": "Marvel Studios: Assembling a Universe", + "overview": "A look at the story behind Marvel Studios and the Marvel Cinematic Universe, featuring interviews and behind-the-scenes footage from all of the Marvel films, the Marvel One-Shots and \"Marvel's Agents of S.H.I.E.L.D.\"", + "release_date": "2014-03-18", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Himself / Tony Stark / Iron Man" + }, + { + "actor": "Chris Hemsworth", + "character": "Himself / Thor" + }, + { + "actor": "Chris Evans", + "character": "Himself / Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Himself / Bruce Banner / Hulk" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Herself" + }, + { + "actor": "Clark Gregg", + "character": "Himself" + }, + { + "actor": "Samuel L. Jackson", + "character": "Himself" + }, + { + "actor": "Scarlett Johansson", + "character": "Herself" + }, + { + "actor": "Jeremy Renner", + "character": "Himself" + } + ] + }, + { + "title": "Captain America: Civil War", + "overview": "Following the events of Age of Ultron, the collective governments of the world pass an act designed to regulate all superhuman activity. This polarizes opinion amongst the Avengers, causing two factions to side with Iron Man or Captain America, which causes an epic battle between former allies.", + "release_date": "2016-04-27", + "characters": [ + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + } + ] + }, + { + "title": "Thor: Ragnarok", + "overview": "Thor is imprisoned on the other side of the universe and finds himself in a race against time to get back to Asgard to stop Ragnarok, the destruction of his home-world and the end of Asgardian civilization, at the hands of an all-powerful new threat, the ruthless Hela.", + "release_date": "2017-10-25", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / Hulk" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow (archive footage / uncredited)" + } + ] + }, + { + "title": "Avengers: Endgame", + "overview": "After the devastating events of Avengers: Infinity War, the universe is in ruins due to the efforts of the Mad Titan, Thanos. With the help of remaining allies, the Avengers must assemble once more in order to undo Thanos' actions and restore order to the universe once and for all, no matter what consequences may be in store.", + "release_date": "2019-04-24", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / Hulk" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Jeremy Renner", + "character": "Clint Barton / Hawkeye" + }, + { + "actor": "Don Cheadle", + "character": "James Rhodes / War Machine" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Pepper Potts" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + } + ] + }, + { + "title": "Avengers: Infinity War", + "overview": "As the Avengers and their allies have continued to protect the world from threats too large for any one hero to handle, a new danger has emerged from the cosmic shadows: Thanos. A despot of intergalactic infamy, his goal is to collect all six Infinity Stones, artifacts of unimaginable power, and use them to inflict his twisted will on all of reality. Everything the Avengers have fought for has led up to this moment - the fate of Earth and existence itself has never been more uncertain.", + "release_date": "2018-04-25", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Don Cheadle", + "character": "James \"Rhodey\" Rhodes / War Machine" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury (uncredited)" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + } + ] + }, + { + "title": "Captain Marvel", + "overview": "The story follows Carol Danvers as she becomes one of the universe’s most powerful heroes when Earth is caught in the middle of a galactic war between two alien races. Set in the 1990s, Captain Marvel is an all-new adventure from a previously unseen period in the history of the Marvel Cinematic Universe.", + "release_date": "2019-03-06", + "characters": [ + { + "actor": "Samuel L. Jackson", + "character": "Nick Fury" + }, + { + "actor": "Clark Gregg", + "character": "Agent Phil Coulson" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America (uncredited)" + }, + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow (uncredited)" + }, + { + "actor": "Don Cheadle", + "character": "James 'Rhodey' Rhodes / War Machine (uncredited)" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk (uncredited)" + } + ] + }, + { + "title": "Spider-Man: Homecoming", + "overview": "Following the events of Captain America: Civil War, Peter Parker, with the help of his mentor Tony Stark, tries to balance his life as an ordinary high school student in Queens, New York City, with fighting crime as his superhero alter ego Spider-Man as a new threat, the Vulture, emerges.", + "release_date": "2017-07-05", + "characters": [ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Gwyneth Paltrow", + "character": "Virginia \"Pepper\" Potts" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + } + ] + }, + { + "title": "Team Thor", + "overview": "Discover what Thor was up to during the events of Captain America: Civil War.", + "release_date": "2016-08-28", + "characters": [ + { + "actor": "Chris Hemsworth", + "character": "Thor Odinson" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner" + } + ] + }, + { + "title": "Black Widow", + "overview": "Natasha Romanoff, also known as Black Widow, confronts the darker parts of her ledger when a dangerous conspiracy with ties to her past arises. Pursued by a force that will stop at nothing to bring her down, Natasha must deal with her history as a spy and the broken relationships left in her wake long before she became an Avenger.", + "release_date": "2020-10-28", + "characters": [ + { + "actor": "Scarlett Johansson", + "character": "Natasha Romanoff / Black Widow" + }, + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + } + ] + } +] +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/analytics.md b/site/versioned_docs/version-4.6/developers/operations-api/analytics.md new file mode 100644 index 00000000..548a2ed5 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/analytics.md @@ -0,0 +1,125 @@ +--- +title: Analytics Operations +--- + +# Analytics Operations + +## get_analytics +Retrieves analytics data from the server. + +* operation _(required)_ - must always be `get_analytics` +* metric _(required)_ - any value returned by `list_metrics` +* start_time _(optional)_ - Unix timestamp in seconds +* end_time _(optional)_ - Unix timestamp in seconds +* get_attributes _(optional)_ - array of attribute names to retrieve +* conditions _(optional)_ - array of conditions to filter results (see [search_by_conditions docs](./nosql-operations) for details) + +### Body + +```json +{ + "operation": "get_analytics", + "metric": "resource-usage", + "start_time": 1609459200, + "end_time": 1609545600, + "get_attributes": ["id", "metric", "userCPUTime", "systemCPUTime"], + "conditions": [ + { + "attribute": "node", + "operator": "equals", + "value": "node1.example.com" + } + ] +} +``` + +### Response 200 + +```json +[ + { + "id": "12345", + "metric": "resource-usage", + "userCPUTime": 100, + "systemCPUTime": 50 + }, + { + "id": "67890", + "metric": "resource-usage", + "userCPUTime": 150, + "systemCPUTime": 75 + } +] +``` + +## list_metrics +Returns a list of available metrics that can be queried. + +* operation _(required)_ - must always be `list_metrics` +* metric_types _(optional)_ - array of metric types to filter results; one or both of `custom` and `builtin`; default is `builtin` + +### Body + +```json +{ + "operation": "list_metrics", + "metric_types": ["custom", "builtin"] +} +``` + +### Response 200 + +```json +[ + "resource-usage", + "table-size", + "database-size", + "main-thread-utilization", + "utilization", + "storage-volume" +] +``` + +## describe_metric +Provides detailed information about a specific metric, including its structure and available parameters. + +* operation _(required)_ - must always be `describe_metric` +* metric _(required)_ - name of the metric to describe + +### Body + +```json +{ + "operation": "describe_metric", + "metric": "resource-usage" +} +``` + +### Response 200 + +```json +{ + "attributes": [ + { + "name": "id", + "type": "number" + }, + { + "name": "metric", + "type": "string" + }, + { + "name": "userCPUTime", + "type": "number" + }, + { + "name": "systemCPUTime", + "type": "number" + }, + { + "name": "node", + "type": "string" + } + ] +} +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/bulk-operations.md b/site/versioned_docs/version-4.6/developers/operations-api/bulk-operations.md new file mode 100644 index 00000000..51801438 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/bulk-operations.md @@ -0,0 +1,255 @@ +--- +title: Bulk Operations +--- + +# Bulk Operations + +## Export Local + +Exports data based on a given search operation to a local file in JSON or CSV format. + +- operation _(required)_ - must always be `export_local` +- format _(required)_ - the format you wish to export the data, options are `json` & `csv` +- path _(required)_ - path local to the server to export the data +- search*operation *(required)\_ - search_operation of `search_by_hash`, `search_by_value`, `search_by_conditions` or `sql` +- filename _(optional)_ - the name of the file where your export will be written to (do not include extension in filename). If one is not provided it will be autogenerated based on the epoch. + +### Body + +```json +{ + "operation": "export_local", + "format": "json", + "path": "/data/", + "search_operation": { + "operation": "sql", + "sql": "SELECT * FROM dev.breed" + } +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id 6fc18eaa-3504-4374-815c-44840a12e7e5" +} +``` + +--- + +## CSV Data Load + +Ingests CSV data, provided directly in the operation as an `insert`, `update` or `upsert` into the specified database table. + +- operation _(required)_ - must always be `csv_data_load` +- action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +- database _(optional)_ - name of the database where you are loading your data. The default is `data` +- table _(required)_ - name of the table where you are loading your data +- data _(required)_ - csv data to import into Harper + +### Body + +```json +{ + "operation": "csv_data_load", + "database": "dev", + "action": "insert", + "table": "breed", + "data": "id,name,section,country,image\n1,ENGLISH POINTER,British and Irish Pointers and Setters,GREAT BRITAIN,http:/www.fci.be/Nomenclature/Illustrations/001g07.jpg\n2,ENGLISH SETTER,British and Irish Pointers and Setters,GREAT BRITAIN,http:/www.fci.be/Nomenclature/Illustrations/002g07.jpg\n3,KERRY BLUE TERRIER,Large and medium sized Terriers,IRELAND,\n" +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id 2fe25039-566e-4670-8bb3-2db3d4e07e69", + "job_id": "2fe25039-566e-4670-8bb3-2db3d4e07e69" +} +``` + +--- + +## CSV File Load + +Ingests CSV data, provided via a path on the local filesystem, as an `insert`, `update` or `upsert` into the specified database table. + +_Note: The CSV file must reside on the same machine on which Harper is running. For example, the path to a CSV on your computer will produce an error if your Harper instance is a cloud instance._ + +- operation _(required)_ - must always be `csv_file_load` +- action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +- database _(optional)_ - name of the database where you are loading your data. The default is `data` +- table _(required)_ - name of the table where you are loading your data +- file*path *(required)\_ - path to the csv file on the host running Harper + +### Body + +```json +{ + "operation": "csv_file_load", + "action": "insert", + "database": "dev", + "table": "breed", + "file_path": "/home/user/imports/breeds.csv" +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id 3994d8e2-ec6a-43c4-8563-11c1df81870e", + "job_id": "3994d8e2-ec6a-43c4-8563-11c1df81870e" +} +``` + +--- + +## CSV URL Load + +Ingests CSV data, provided via URL, as an `insert`, `update` or `upsert` into the specified database table. + +- operation _(required)_ - must always be `csv_url_load` +- action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +- database _(optional)_ - name of the database where you are loading your data. The default is `data` +- table _(required)_ - name of the table where you are loading your data +- csv*url *(required)\_ - URL to the csv + +### Body + +```json +{ + "operation": "csv_url_load", + "action": "insert", + "database": "dev", + "table": "breed", + "csv_url": "https:/s3.amazonaws.com/complimentarydata/breeds.csv" +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id 332aa0a2-6833-46cd-88a6-ae375920436a", + "job_id": "332aa0a2-6833-46cd-88a6-ae375920436a" +} +``` + +--- + +## Export To S3 + +Exports data based on a given search operation from table to AWS S3 in JSON or CSV format. + +- operation _(required)_ - must always be `export_to_s3` +- format _(required)_ - the format you wish to export the data, options are `json` & `csv` +- s3 _(required)_ - details your access keys, bucket, bucket region and key for saving the data to S3 +- search*operation *(required)\_ - search_operation of `search_by_hash`, `search_by_value`, `search_by_conditions` or `sql` + +### Body + +```json +{ + "operation": "export_to_s3", + "format": "json", + "s3": { + "aws_access_key_id": "YOUR_KEY", + "aws_secret_access_key": "YOUR_SECRET_KEY", + "bucket": "BUCKET_NAME", + "key": "OBJECT_NAME", + "region": "BUCKET_REGION" + }, + "search_operation": { + "operation": "sql", + "sql": "SELECT * FROM dev.dog" + } +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id 9fa85968-4cb1-4008-976e-506c4b13fc4a", + "job_id": "9fa85968-4cb1-4008-976e-506c4b13fc4a" +} +``` + +--- + +## Import from S3 + +This operation allows users to import CSV or JSON files from an AWS S3 bucket as an `insert`, `update` or `upsert`. + +- operation _(required)_ - must always be `import_from_s3` +- action _(optional)_ - type of action you want to perform - `insert`, `update` or `upsert`. The default is `insert` +- database _(optional)_ - name of the database where you are loading your data. The default is `data` +- table _(required)_ - name of the table where you are loading your data +- s3 _(required)_ - object containing required AWS S3 bucket info for operation: + - aws_access_key_id - AWS access key for authenticating into your S3 bucket + - aws_secret_access_key - AWS secret for authenticating into your S3 bucket + - bucket - AWS S3 bucket to import from + - key - the name of the file to import - _the file must include a valid file extension ('.csv' or '.json')_ + - region - the region of the bucket + +### Body + +```json +{ + "operation": "import_from_s3", + "action": "insert", + "database": "dev", + "table": "dog", + "s3": { + "aws_access_key_id": "YOUR_KEY", + "aws_secret_access_key": "YOUR_SECRET_KEY", + "bucket": "BUCKET_NAME", + "key": "OBJECT_NAME", + "region": "BUCKET_REGION" + } +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id 062a1892-6a0a-4282-9791-0f4c93b12e16", + "job_id": "062a1892-6a0a-4282-9791-0f4c93b12e16" +} +``` + +--- + +## Delete Records Before + +Delete data before the specified timestamp on the specified database table exclusively on the node where it is executed. Any clustered nodes with replicated data will retain that data. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `delete_records_before` +- date _(required)_ - records older than this date will be deleted. Supported format looks like: `YYYY-MM-DDThh:mm:ss.sZ` +- schema _(required)_ - name of the schema where you are deleting your data +- table _(required)_ - name of the table where you are deleting your data + +### Body + +```json +{ + "operation": "delete_records_before", + "date": "2021-01-25T23:05:27.464", + "schema": "dev", + "table": "breed" +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id d3aed926-e9fe-4ec1-aea7-0fb4451bd373", + "job_id": "d3aed926-e9fe-4ec1-aea7-0fb4451bd373" +} +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/certificate-management.md b/site/versioned_docs/version-4.6/developers/operations-api/certificate-management.md new file mode 100644 index 00000000..b569dffc --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/certificate-management.md @@ -0,0 +1,124 @@ +--- +title: Certificate Management +--- + +# Certificate Management + +## Add Certificate + +Adds or updates a certificate in the `hdb_certificate` system table. +If a `private_key` is provided it will **not** be stored in `hdb_certificate`, it will be written to file in `/keys/`. +If a `private_key` is not passed the operation will search for one that matches the certificate. If one is not found an error will be returned. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `add_certificate` +- name _(required)_ - a unique name for the certificate +- certificate _(required)_ - a PEM formatted certificate string +- is*authority *(required)\_ - a boolean indicating if the certificate is a certificate authority +- hosts _(optional)_ - an array of hostnames that the certificate is valid for +- private*key *(optional)\_ - a PEM formatted private key string + +### Body + +```json +{ + "operation": "add_certificate", + "name": "my-cert", + "certificate": "-----BEGIN CERTIFICATE-----ZDFAay... -----END CERTIFICATE-----", + "is_authority": false, + "private_key": "-----BEGIN RSA PRIVATE KEY-----Y4dMpw5f... -----END RSA PRIVATE KEY-----" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully added certificate: my-cert" +} +``` + +--- + +## Remove Certificate + +Removes a certificate from the `hdb_certificate` system table and deletes the corresponding private key file. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `remove_certificate` +- name _(required)_ - the name of the certificate + +### Body + +```json +{ + "operation": "remove_certificate", + "name": "my-cert" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully removed my-cert" +} +``` + +--- + +## List Certificates + +Lists all certificates in the `hdb_certificate` system table. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `list_certificates` + +### Body + +```json +{ + "operation": "list_certificates" +} +``` + +### Response: 200 + +```json +[ + { + "name": "HarperDB-Certificate-Authority-node1", + "certificate": "-----BEGIN CERTIFICATE-----\r\nTANBgkqhk... S34==\r\n-----END CERTIFICATE-----\r\n", + "private_key_name": "privateKey.pem", + "is_authority": true, + "details": { + "issuer": "CN=HarperDB-Certificate-Authority-node1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "subject": "CN=HarperDB-Certificate-Authority-node1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "serial_number": "5235345", + "valid_from": "Aug 27 15:00:00 2024 GMT", + "valid_to": "Aug 25 15:00:00 2034 GMT" + }, + "is_self_signed": true, + "uses": ["https", "wss"] + }, + { + "name": "node1", + "certificate": "-----BEGIN CERTIFICATE-----\r\ngIEcSR1M... 5bv==\r\n-----END CERTIFICATE-----\r\n", + "private_key_name": "privateKey.pem", + "is_authority": false, + "details": { + "issuer": "CN=HarperDB-Certificate-Authority-node1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "subject": "CN=node.1 C=USA ST=Colorado L=Denver O=HarperDB\\, Inc.", + "subject_alt_name": "IP Address:127.0.0.1, DNS:localhost, IP Address:0:0:0:0:0:0:0:1, DNS:node.1", + "serial_number": "5243646", + "valid_from": "Aug 27 15:00:00 2024 GMT", + "valid_to": "Aug 25 15:00:00 2034 GMT" + }, + "is_self_signed": true, + "uses": ["https", "wss"] + } +] +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/clustering-nats.md b/site/versioned_docs/version-4.6/developers/operations-api/clustering-nats.md new file mode 100644 index 00000000..a45c593e --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/clustering-nats.md @@ -0,0 +1,486 @@ +--- +title: Clustering using NATS +--- + +# Clustering using NATS + +## Cluster Set Routes + +Adds a route/routes to either the hub or leaf server cluster configuration. This operation behaves as a PATCH/upsert, meaning it will add new routes to the configuration while leaving existing routes untouched. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `cluster_set_routes` +- server _(required)_ - must always be `hub` or `leaf`, in most cases you should use `hub` here +- routes _(required)_ - must always be an objects array with a host and port: + - host - the host of the remote instance you are clustering to + - port - the clustering port of the remote instance you are clustering to, in most cases this is the value in `clustering.hubServer.cluster.network.port` on the remote instance `harperdb-config.yaml` + +### Body + +```json +{ + "operation": "cluster_set_routes", + "server": "hub", + "routes": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "cluster routes successfully set", + "set": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "skipped": [] +} +``` + +--- + +## Cluster Get Routes + +Gets all the hub and leaf server routes from the config file. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `cluster_get_routes` + +### Body + +```json +{ + "operation": "cluster_get_routes" +} +``` + +### Response: 200 + +```json +{ + "hub": [ + { + "host": "3.22.181.22", + "port": 12345 + }, + { + "host": "3.137.184.8", + "port": 12345 + }, + { + "host": "18.223.239.195", + "port": 12345 + }, + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "leaf": [] +} +``` + +--- + +## Cluster Delete Routes + +Removes route(s) from hub and/or leaf server routes array in config file. Returns a deletion success message and arrays of deleted and skipped records. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `cluster_delete_routes` +- routes _required_ - Must be an array of route object(s) + +### Body + +```json +{ + "operation": "cluster_delete_routes", + "routes": [ + { + "host": "18.116.24.71", + "port": 12345 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "cluster routes successfully deleted", + "deleted": [ + { + "host": "18.116.24.71", + "port": 12345 + } + ], + "skipped": [] +} +``` + +--- + +## Add Node + +Registers an additional Harper instance with associated subscriptions. Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `add_node` +- node*name *(required)\_ - the node name of the remote node +- subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `schema`, `table`, `subscribe` and `publish`: + - schema - the schema to replicate from + - table - the table to replicate from + - subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + - publish - a boolean which determines if transactions on the local table should be replicated on the remote table + - start*time *(optional)\_ - How far back to go to get transactions from node being added. Must be in UTC YYYY-MM-DDTHH:mm:ss.sssZ format + +### Body + +```json +{ + "operation": "add_node", + "node_name": "ec2-3-22-181-22", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": false, + "publish": true, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully added 'ec2-3-22-181-22' to manifest" +} +``` + +--- + +## Update Node + +Modifies an existing Harper instance registration and associated subscriptions. This operation behaves as a PATCH/upsert, meaning it will insert or update the specified replication configurations while leaving other table replication configuration untouched. Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `update_node` +- node*name *(required)\_ - the node name of the remote node you are updating +- subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `schema`, `table`, `subscribe` and `publish`: + - schema - the schema to replicate from + - table - the table to replicate from + - subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + - publish - a boolean which determines if transactions on the local table should be replicated on the remote table + - start*time *(optional)\_ - How far back to go to get transactions from node being added. Must be in UTC YYYY-MM-DDTHH:mm:ss.sssZ format + +### Body + +```json +{ + "operation": "update_node", + "node_name": "ec2-18-223-239-195", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": false, + "start_time": "2022-09-02T20:06:35.993Z" + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully updated 'ec2-3-22-181-22'" +} +``` + +--- + +## Set Node Replication + +A more adeptly named alias for add and update node. This operation behaves as a PATCH/upsert, meaning it will insert or update the specified replication configurations while leaving other table replication configuration untouched. The `database` (aka `schema`) parameter is optional, it will default to `data`. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `set_node_replication` +- node*name *(required)\_ - the node name of the remote node you are updating +- subscriptions _(required)_ - The relationship created between nodes. Must be an object array and `table`, `subscribe` and `publish`: + - database _(optional)_ - the database to replicate from + - table _(required)_ - the table to replicate from + - subscribe _(required)_ - a boolean which determines if transactions on the remote table should be replicated on the local table + - publish _(required)_ - a boolean which determines if transactions on the local table should be replicated on the remote table +- + +### Body + +```json +{ + "operation": "set_node_replication", + "node_name": "node1", + "subscriptions": [ + { + "table": "dog", + "subscribe": true, + "publish": true + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully updated 'ec2-3-22-181-22'" +} +``` + +--- + +## Cluster Status + +Returns an array of status objects from a cluster. A status object will contain the clustering node name, whether or not clustering is enabled, and a list of possible connections. Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `cluster_status` + +### Body + +```json +{ + "operation": "cluster_status" +} +``` + +### Response: 200 + +```json +{ + "node_name": "ec2-18-221-143-69", + "is_enabled": true, + "connections": [ + { + "node_name": "ec2-3-22-181-22", + "status": "open", + "ports": { + "clustering": 12345, + "operations_api": 9925 + }, + "latency_ms": 13, + "uptime": "30d 1h 18m 8s", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "publish": true, + "subscribe": true + } + ] + } + ] +} +``` + +--- + +## Cluster Network + +Returns an object array of enmeshed nodes. Each node object will contain the name of the node, the amount of time (in milliseconds) it took for it to respond, the names of the nodes it is enmeshed with and the routes set in its config file. Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +- operation _(required)_- must always be `cluster_network` +- timeout (_optional_) - the amount of time in milliseconds to wait for a response from the network. Must be a number +- connected*nodes (\_optional*) - omit `connected_nodes` from the response. Must be a boolean. Defaults to `false` +- routes (_optional_) - omit `routes` from the response. Must be a boolean. Defaults to `false` + +### Body + +```json +{ + "operation": "cluster_network" +} +``` + +### Response: 200 + +```json +{ + "nodes": [ + { + "name": "local_node", + "response_time": 4, + "connected_nodes": ["ec2-3-142-255-78"], + "routes": [ + { + "host": "3.142.255.78", + "port": 9932 + } + ] + }, + { + "name": "ec2-3-142-255-78", + "response_time": 57, + "connected_nodes": ["ec2-3-12-153-124", "ec2-3-139-236-138", "local_node"], + "routes": [] + } + ] +} +``` + +--- + +## Remove Node + +Removes a Harper instance and associated subscriptions from the cluster. Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `remove_node` +- name _(required)_ - The name of the node you are de-registering + +### Body + +```json +{ + "operation": "remove_node", + "node_name": "ec2-3-22-181-22" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully removed 'ec2-3-22-181-22' from manifest" +} +``` + +--- + +## Configure Cluster + +Bulk create/remove subscriptions for any number of remote nodes. Resets and replaces any existing clustering setup. +Learn more about [Harper clustering here](../clustering/). + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `configure_cluster` +- connections _(required)_ - must be an object array with each object containing `node_name` and `subscriptions` for that node + +### Body + +```json +{ + "operation": "configure_cluster", + "connections": [ + { + "node_name": "ec2-3-137-184-8", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": false + } + ] + }, + { + "node_name": "ec2-18-223-239-195", + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": true + } + ] + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "Cluster successfully configured." +} +``` + +--- + +## Purge Stream + +Will purge messages from a stream + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `purge_stream` +- database _(required)_ - the name of the database where the streams table resides +- table _(required)_ - the name of the table that belongs to the stream +- options _(optional)_ - control how many messages get purged. Options are: + - `keep` - purge will keep this many most recent messages + - `seq` - purge all messages up to, but not including, this sequence + +### Body + +```json +{ + "operation": "purge_stream", + "database": "dev", + "table": "dog", + "options": { + "keep": 100 + } +} +``` + +--- diff --git a/site/versioned_docs/version-4.6/developers/operations-api/clustering.md b/site/versioned_docs/version-4.6/developers/operations-api/clustering.md new file mode 100644 index 00000000..d6f1f06f --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/clustering.md @@ -0,0 +1,356 @@ +--- +title: Clustering +--- + +# Clustering + +The following operations are available for configuring and managing [Harper replication](../replication/).\ + +_**If you are using NATS for clustering, please see the**_ [_**NATS Clustering Operations**_](./clustering-nats) _**documentation.**_ + +## Add Node + +Adds a new Harper instance to the cluster. If `subscriptions` are provided, it will also create the replication relationships between the nodes. If they are not provided a fully replicating system will be created. [Learn more about adding nodes here](../replication/). + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `add_node` +- hostname or url _(required)_ - one of these fields is required. You must provide either the `hostname` or the `url` of the node you want to add +- verify_tls _(optional)_ - a boolean which determines if the TLS certificate should be verified. This will allow the Harper default self-signed certificates to be accepted. Defaults to `true` +- authorization _(optional)_ - an object or a string which contains the authorization information for the node being added. If it is an object, it should contain `username` and `password` fields. If it is a string, it should use HTTP `Authorization` style credentials +- retain*authorization *(optional)\_ - a boolean which determines if the authorization credentials should be retained/stored and used everytime a connection is made to this node. If `true`, the authorization will be stored on the node record. Generally this should not be used, as mTLS/certificate based authorization is much more secure and safe, and avoids the need for storing credentials. Defaults to `false`. +- revoked*certificates *(optional)\_ - an array of revoked certificates serial numbers. If a certificate is revoked, it will not be accepted for any connections. +- shard _(optional)_ - a number which can be used to indicate which shard this node belongs to. This is only needed if you are using sharding. +- subscriptions _(optional)_ - The relationship created between nodes. If not provided a fully replicated cluster will be setup. Must be an object array and include `database`, `table`, `subscribe` and `publish`: + - database - the database to replicate + - table - the table to replicate + - subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + - publish - a boolean which determines if transactions on the local table should be replicated on the remote table + +### Body + +```json +{ + "operation": "add_node", + "hostname": "server-two", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password" + } +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully added 'server-two' to cluster" +} +``` + +--- + +## Update Node + +Modifies an existing Harper instance in the cluster. + +_Operation is restricted to super_user roles only_ + +_Note: will attempt to add the node if it does not exist_ + +- operation _(required)_ - must always be `update_node` +- hostname _(required)_ - the `hostname` of the remote node you are updating +- revoked*certificates *(optional)\_ - an array of revoked certificates serial numbers. If a certificate is revoked, it will not be accepted for any connections. +- shard _(optional)_ - a number which can be used to indicate which shard this node belongs to. This is only needed if you are using sharding. +- subscriptions _(required)_ - The relationship created between nodes. Must be an object array and include `database`, `table`, `subscribe` and `publish`: + - database - the database to replicate from + - table - the table to replicate from + - subscribe - a boolean which determines if transactions on the remote table should be replicated on the local table + - publish - a boolean which determines if transactions on the local table should be replicated on the remote table + +### Body + +```json +{ + "operation": "update_node", + "hostname": "server-two", + "subscriptions": [ + { + "database": "dev", + "table": "my-table", + "subscribe": true, + "publish": true + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully updated 'server-two'" +} +``` + +--- + +## Remove Node + +Removes a Harper node from the cluster and stops replication, [Learn more about remove node here](../replication/). + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `remove_node` +- name _(required)_ - The name of the node you are removing + +### Body + +```json +{ + "operation": "remove_node", + "hostname": "server-two" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully removed 'server-two' from cluster" +} +``` + +--- + +## Cluster Status + +Returns an array of status objects from a cluster. + +`database_sockets` shows the actual websocket connections that exist between nodes. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `cluster_status` + +### Body + +```json +{ + "operation": "cluster_status" +} +``` + +### Response: 200 + +```json +{ + "type": "cluster-status", + "connections": [ + { + "replicateByDefault": true, + "replicates": true, + "url": "wss:/server-2.domain.com:9933", + "name": "server-2.domain.com", + "subscriptions": null, + "database_sockets": [ + { + "database": "data", + "connected": true, + "latency": 0.70, + "thread_id": 1, + "nodes": [ + "server-2.domain.com" + ], + "lastCommitConfirmed": "Wed, 12 Feb 2025 19:09:34 GMT", + "lastReceivedRemoteTime": "Wed, 12 Feb 2025 16:49:29 GMT", + "lastReceivedLocalTime": "Wed, 12 Feb 2025 16:50:59 GMT", + "lastSendTime": "Wed, 12 Feb 2025 16:50:59 GMT" + }, + } + ], + "node_name": "server-1.domain.com", + "is_enabled": true +} +``` + +There is a separate socket for each database for each node. Each node is represented in the connections array, and each database connection to that node is represented in the `database_sockets` array. Additional timing statistics include: + +- `lastCommitConfirmed`: When a commit is sent out, it should receive a confirmation from the remote server; this is the last receipt of confirmation of an outgoing commit. +- `lastReceivedRemoteTime`: This is the timestamp of the transaction that was last received. The timestamp is from when the original transaction occurred. +- `lastReceivedLocalTime`: This is local time when the last transaction was received. If there is a different between this and `lastReceivedRemoteTime`, it means there is a delay from the original transaction to \* receiving it and so it is probably catching-up/behind. +- `sendingMessage`: The timestamp of transaction is actively being sent. This won't exist if the replicator is waiting for the next transaction to send. + +--- + +## Configure Cluster + +Bulk create/remove subscriptions for any number of remote nodes. Resets and replaces any existing clustering setup. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `configure_cluster` +- connections _(required)_ - must be an object array with each object following the `add_node` schema. + +### Body + +```json +{ + "operation": "configure_cluster", + "connections": [ + { + "hostname": "server-two", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password2" + }, + "subscriptions": [ + { + "schema": "dev", + "table": "my-table", + "subscribe": true, + "publish": false + } + ] + }, + { + "hostname": "server-three", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password3" + }, + "subscriptions": [ + { + "schema": "dev", + "table": "dog", + "subscribe": true, + "publish": true + } + ] + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "Cluster successfully configured." +} +``` + +--- + +## Cluster Set Routes + +Adds a route/routes to the `replication.routes` configuration. This operation behaves as a PATCH/upsert, meaning it will add new routes to the configuration while leaving existing routes untouched. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `cluster_set_routes` +- routes _(required)_ - the routes field is an array that specifies the routes for clustering. Each element in the array can be either a string or an object with `hostname` and `port` properties. + +### Body + +```json +{ + "operation": "cluster_set_routes", + "routes": [ + "wss:/server-two:9925", + { + "hostname": "server-three", + "port": 9930 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "cluster routes successfully set", + "set": [ + "wss:/server-two:9925", + { + "hostname": "server-three", + "port": 9930 + } + ], + "skipped": [] +} +``` + +--- + +## Cluster Get Routes + +Gets the replication routes from the Harper config file. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `cluster_get_routes` + +### Body + +```json +{ + "operation": "cluster_get_routes" +} +``` + +### Response: 200 + +```json +[ + "wss:/server-two:9925", + { + "hostname": "server-three", + "port": 9930 + } +] +``` + +--- + +## Cluster Delete Routes + +Removes route(s) from the Harper config file. Returns a deletion success message and arrays of deleted and skipped records. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `cluster_delete_routes` +- routes _required_ - Must be an array of route object(s) + +### Body + +```json +{ + "operation": "cluster_delete_routes", + "routes": [ + { + "hostname": "server-three", + "port": 9930 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "cluster routes successfully deleted", + "deleted": [ + { + "hostname": "server-three", + "port": 9930 + } + ], + "skipped": [] +} +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/components.md b/site/versioned_docs/version-4.6/developers/operations-api/components.md new file mode 100644 index 00000000..d02b0fff --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/components.md @@ -0,0 +1,550 @@ +--- +title: Components +--- + +# Components + +## Add Component + +Creates a new component project in the component root directory using a predefined template. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `add_component` +- project _(required)_ - the name of the project you wish to create +- replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "add_component", + "project": "my-component" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully added project: my-component" +} +``` + +--- + +## Deploy Component + +Will deploy a component using either a base64-encoded string representation of a `.tar` file (the output from `package_component`) or a package value, which can be any valid NPM reference, such as a GitHub repo, an NPM package, a tarball, a local directory or a website. + +If deploying with the `payload` option, Harper will decrypt the base64-encoded string, reconstitute the .tar file of your project folder, and extract it to the component root project directory. + +If deploying with the `package` option, the package value will be written to `harperdb-config.yaml`. Then npm install will be utilized to install the component in the `node_modules` directory located in the hdb root. The value is a package reference, which should generally be a [URL reference, as described here](https:/docs.npmjs.com/cli/v10/configuring-npm/package-json#urls-as-dependencies) (it is also possible to include NPM registerd packages and file paths). URL package references can directly reference tarballs that can be installed as a package. However, the most common and recommended usage is to install from a Git repository, which can be combined with a tag to deploy a specific version directly from versioned source control. When using tags, we highly recommend that you use the `semver` directive to ensure consistent and reliable installation by NPM. In addition to tags, you can also reference branches or commit numbers. Here is an example URL package reference to a (public) Git repository that doesn't require authentication: + +``` +https:/github.com/HarperDB/application-template#semver:v1.0.0 +``` + +or this can be shortened to: + +``` +HarperDB/application-template#semver:v1.0.0 +``` + +You can also install from private repository if you have an installed SSH keys on the server: + +``` +git+ssh:/git@github.com:my-org/my-app.git#semver:v1.0.0 +``` + +Or you can use a Github token: + +``` +https:/@github.com/my-org/my-app#semver:v1.0.0 +``` + +Or you can use a GitLab Project Access Token: + +``` +https:/my-project:@gitlab.com/my-group/my-project#semver:v1.0.0 +``` + +Note that your component will be installed by NPM. If your component has dependencies, NPM will attempt to download and install these as well. NPM normally uses the public registry.npmjs.org registry. If you are installing without network access to this, you may wish to define [custom registry locations](https:/docs.npmjs.com/cli/v8/configuring-npm/npmrc) if you have any dependencies that need to be installed. NPM will install the deployed component and any dependencies in node_modules in the hdb root directory (typically `~/hdb/node_modules`). + +_Note: After deploying a component a restart may be required_ + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `deploy_component` +- project _(required)_ - the name of the project you wish to deploy +- package _(optional)_ - this can be any valid GitHub or NPM reference +- payload _(optional)_ - a base64-encoded string representation of the .tar file. Must be a string +- restart _(optional)_ - must be either a boolean or the string `rolling`. If set to `rolling`, a rolling restart will be triggered after the component is deployed, meaning that each node in the cluster will be sequentially restarted (waiting for the last restart to start the next). If set to `true`, the restart will not be rolling, all nodes will be restarted in parallel. If `replicated` is `true`, the restart operations will be replicated across the cluster. +- replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. +- install_command _(optional)_ - A command to use when installing the component. Must be a string. This can be used to install dependencies with pnpm or yarn, for example, like: `"install_command": "npm install -g pnpm && pnpm install"` + +### Body + +```json +{ + "operation": "deploy_component", + "project": "my-component", + "payload": "A very large base64-encoded string representation of the .tar file" +} +``` + +```json +{ + "operation": "deploy_component", + "project": "my-component", + "package": "HarperDB/application-template", + "replicated": true +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deployed: my-component" +} +``` + +--- + +## Package Component + +Creates a temporary `.tar` file of the specified project folder, then reads it into a base64-encoded string and returns an object with the string and the payload. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `package_component` +- project _(required)_ - the name of the project you wish to package +- skip_node_modules _(optional)_ - if true, creates option for tar module that will exclude the project's node_modules directory. Must be a boolean + +### Body + +```json +{ + "operation": "package_component", + "project": "my-component", + "skip_node_modules": true +} +``` + +### Response: 200 + +```json +{ + "project": "my-component", + "payload": "LgAAAAAAAAAAAAAAAAAAA...AAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" +} +``` + +--- + +## Drop Component + +Deletes a file from inside the component project or deletes the complete project. + +**If just `project` is provided it will delete all that projects local files and folders** + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `drop_component` +- project _(required)_ - the name of the project you wish to delete or to delete from if using the `file` parameter +- file _(optional)_ - the path relative to your project folder of the file you wish to delete +- replicated _(optional)_ - if true, Harper will replicate the component deletion to all nodes in the cluster. Must be a boolean. +- restart _(optional)_ - if true, Harper will restart after dropping the component. Must be a boolean. + +### Body + +```json +{ + "operation": "drop_component", + "project": "my-component", + "file": "utils/myUtils.js" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully dropped: my-component/utils/myUtils.js" +} +``` + +--- + +## Get Components + +Gets all local component files and folders and any component config from `harperdb-config.yaml` + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `get_components` + +### Body + +```json +{ + "operation": "get_components" +} +``` + +### Response: 200 + +```json +{ + "name": "components", + "entries": [ + { + "package": "HarperDB/application-template", + "name": "deploy-test-gh" + }, + { + "package": "@fastify/compress", + "name": "fast-compress" + }, + { + "name": "my-component", + "entries": [ + { + "name": "LICENSE", + "mtime": "2023-08-22T16:00:40.286Z", + "size": 1070 + }, + { + "name": "index.md", + "mtime": "2023-08-22T16:00:40.287Z", + "size": 1207 + }, + { + "name": "config.yaml", + "mtime": "2023-08-22T16:00:40.287Z", + "size": 1069 + }, + { + "name": "package.json", + "mtime": "2023-08-22T16:00:40.288Z", + "size": 145 + }, + { + "name": "resources.js", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 583 + }, + { + "name": "schema.graphql", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 466 + }, + { + "name": "utils", + "entries": [ + { + "name": "commonUtils.js", + "mtime": "2023-08-22T16:00:40.289Z", + "size": 583 + } + ] + } + ] + } + ] +} +``` + +--- + +## Get Component File + +Gets the contents of a file inside a component project. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `get_component_file` +- project _(required)_ - the name of the project where the file is located +- file _(required)_ - the path relative to your project folder of the file you wish to view +- encoding _(optional)_ - the encoding that will be passed to the read file call. Defaults to `utf8` + +### Body + +```json +{ + "operation": "get_component_file", + "project": "my-component", + "file": "resources.js" +} +``` + +### Response: 200 + +```json +{ + "message": "/**export class MyCustomResource extends tables.TableName {\n\t/ we can define our own custom POST handler\n\tpost(content) {\n\t\t/ do something with the incoming content;\n\t\treturn super.post(content);\n\t}\n\t/ or custom GET handler\n\tget() {\n\t\t/ we can modify this resource before returning\n\t\treturn super.get();\n\t}\n}\n */\n/ we can also define a custom resource without a specific table\nexport class Greeting extends Resource {\n\t/ a \"Hello, world!\" handler\n\tget() {\n\t\treturn { greeting: 'Hello, world!' };\n\t}\n}" +} +``` + +--- + +## Set Component File + +Creates or updates a file inside a component project. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `set_component_file` +- project _(required)_ - the name of the project the file is located in +- file _(required)_ - the path relative to your project folder of the file you wish to set +- payload _(required)_ - what will be written to the file +- encoding _(optional)_ - the encoding that will be passed to the write file call. Defaults to `utf8` +- replicated _(optional)_ - if true, Harper will replicate the component update to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "set_component_file", + "project": "my-component", + "file": "test.js", + "payload": "console.log('hello world')" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully set component: test.js" +} +``` + +--- + +## Add SSH Key + +Adds an SSH key for deploying components from private repositories. This will also create an ssh config file that will be used when deploying the components. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `add_ssh_key` +- name _(required)_ - the name of the key +- key _(required)_ - the private key contents. Must be an ed25519 key. Line breaks must be delimited with `\n` and have a trailing `\n` +- host _(required)_ - the host for the ssh config (see below). Used as part of the `package` url when deploying a component using this key +- hostname _(required)_ - the hostname for the ssh config (see below). Used to map `host` to an actual domain (e.g. `github.com`) +- known*hosts *(optional)\_ - the public SSH keys of the host your component will be retrieved from. If `hostname` is `github.com` this will be retrieved automatically. Line breaks must be delimited with `\n` +- replicated _(optional)_ - if true, HarperDB will replicate the key to all nodes in the cluster. Must be a boolean. + _Operation is restricted to super_user roles only_ + +* operation _(required)_ - must always be `add_ssh_key` +* name _(required)_ - the name of the key +* key _(required)_ - the private key contents. Line breaks must be delimited with +* host _(required)_ - the host for the ssh config (see below). Used as part of the `package` url when deploying a component using this key +* hostname _(required)_ - the hostname for the ssh config (see below). Used to map `host` to an actual domain (e.g. `github.com`) +* known_hosts _(optional)_ - the public SSH keys of the host your component will be retrieved from. If `hostname` is `github.com` this will be retrieved automatically. Line breaks must be delimited with +* replicated _(optional)_ - if true, Harper will replicate the key to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "add_ssh_key", + "name": "harperdb-private-component", + "key": "-----BEGIN OPENSSH PRIVATE KEY-----\nthis\nis\na\nfake\nkey\n-----END OPENSSH PRIVATE KEY-----\n", + "host": "harperdb-private-component.github.com", + "hostname": "github.com" +} +``` + +### Response: 200 + +```json +{ + "message": "Added ssh key: harperdb-private-component" +} +``` + +### Generated Config and Deploy Component "package" string examples + +``` +#harperdb-private-component +Host harperdb-private-component.github.com + HostName github.com + User git + IdentityFile /hdbroot/ssh/harperdb-private-component.key + IdentitiesOnly yes +``` + +``` +"package": "git+ssh:/git@:.git#semver:v1.2.3" + +"package": "git+ssh:/git@harperdb-private-component.github.com:HarperDB/harperdb-private-component.git#semver:v1.2.3" +``` + +Note that `deploy_component` with a package uses `npm install` so the url must be a valid npm format url. The above is an example of a url using a tag in the repo to install. + +--- + +## Update SSH Key + +Updates the private key contents of an existing SSH key. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `update_ssh_key` +- name _(required)_ - the name of the key to be updated +- key _(required)_ - the private key contents. Must be an ed25519 key. Line breaks must be delimited with `\n` and have a trailing `\n` +- replicated _(optional)_ - if true, Harper will replicate the key update to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "update_ssh_key", + "name": "harperdb-private-component", + "key": "-----BEGIN OPENSSH PRIVATE KEY-----\nthis\nis\na\nNEWFAKE\nkey\n-----END OPENSSH PRIVATE KEY-----\n", + "host": "harperdb-private-component.github.com", + "hostname": "github.com" +} +``` + +### Response: 200 + +```json +{ + "message": "Updated ssh key: harperdb-private-component" +} +``` + +## Delete SSH Key + +Deletes a SSH key. This will also remove it from the generated SSH config. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `delete_ssh_key` +- name _(required)_ - the name of the key to be deleted +- replicated _(optional)_ - if true, Harper will replicate the key deletion to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "name": "harperdb-private-component" +} +``` + +### Response: 200 + +```json +{ + "message": "Deleted ssh key: harperdb-private-component" +} +``` + +--- + +## List SSH Keys + +List off the names of added SSH keys + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `list_ssh_keys` + +### Body + +```json +{ + "operation": "list_ssh_keys" +} +``` + +### Response: 200 + +```json +[ + { + "name": "harperdb-private-component" + }, + ... +] +``` + +--- + +## Set SSH Known Hosts + +Sets the SSH known_hosts file. This will overwrite the file. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `set_ssh_known_hosts` +- known_hosts _(required)_ - The contents to set the known_hosts to. Line breaks must be delimite d with +- replicated _(optional)_ - if true, Harper will replicate the known hosts to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "set_ssh_known_hosts", + "known_hosts": "github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=\n" +} +``` + +### Response: 200 + +```json +{ + "message": "Known hosts successfully set" +} +``` + +## Get SSH Known Hosts + +Gets the contents of the known_hosts file + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `get_ssh_known_hosts` + +### Body + +```json +{ + "operation": "get_ssh_known_hosts" +} +``` + +### Response: 200 + +```json +{ + "known_hosts": "github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=\n" +} +``` + +--- + +## Install Node Modules + +This operation is deprecated, as it is handled automatically by deploy_component and restart. +Executes npm install against specified custom function projects. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `install_node_modules` +- projects _(required)_ - must ba an array of custom functions projects. +- dry*run *(optional)\_ - refers to the npm --dry-run flag: [https:/docs.npmjs.com/cli/v8/commands/npm-install#dry-run](https:/docs.npmjs.com/cli/v8/commands/npm-install#dry-run). Defaults to false. + +### Body + +```json +{ + "operation": "install_node_modules", + "projects": ["dogs", "cats"], + "dry_run": true +} +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/configuration.md b/site/versioned_docs/version-4.6/developers/operations-api/configuration.md new file mode 100644 index 00000000..c48381ab --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/configuration.md @@ -0,0 +1,135 @@ +--- +title: Configuration +--- + +# Configuration + +## Set Configuration + +Modifies the Harper configuration file parameters. Must follow with a restart or restart_service operation. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `set_configuration` +- logging*level *(example/optional)\_ - one or more configuration keywords to be updated in the Harper configuration file +- clustering*enabled *(example/optional)\_ - one or more configuration keywords to be updated in the Harper configuration file + +### Body + +```json +{ + "operation": "set_configuration", + "logging_level": "trace", + "clustering_enabled": true +} +``` + +### Response: 200 + +```json +{ + "message": "Configuration successfully set. You must restart HarperDB for new config settings to take effect." +} +``` + +--- + +## Get Configuration + +Returns the Harper configuration parameters. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `get_configuration` + +### Body + +```json +{ + "operation": "get_configuration" +} +``` + +### Response: 200 + +```json +{ + "http": { + "compressionThreshold": 1200, + "cors": false, + "corsAccessList": [null], + "keepAliveTimeout": 30000, + "port": 9926, + "securePort": null, + "timeout": 120000 + }, + "threads": 11, + "authentication": { + "cacheTTL": 30000, + "enableSessions": true, + "operationTokenTimeout": "1d", + "refreshTokenTimeout": "30d" + }, + "analytics": { + "aggregatePeriod": 60 + }, + "replication": { + "hostname": "node1", + "databases": "*", + "routes": null, + "url": "wss:/127.0.0.1:9925" + }, + "componentsRoot": "/Users/hdb/components", + "localStudio": { + "enabled": false + }, + "logging": { + "auditAuthEvents": { + "logFailed": false, + "logSuccessful": false + }, + "auditLog": true, + "auditRetention": "3d", + "file": true, + "level": "error", + "root": "/Users/hdb/log", + "rotation": { + "enabled": false, + "compress": false, + "interval": null, + "maxSize": null, + "path": "/Users/hdb/log" + }, + "stdStreams": false + }, + "mqtt": { + "network": { + "port": 1883, + "securePort": 8883 + }, + "webSocket": true, + "requireAuthentication": true + }, + "operationsApi": { + "network": { + "cors": true, + "corsAccessList": ["*"], + "domainSocket": "/Users/hdb/operations-server", + "port": 9925, + "securePort": null + } + }, + "rootPath": "/Users/hdb", + "storage": { + "writeAsync": false, + "caching": true, + "compression": false, + "noReadAhead": true, + "path": "/Users/hdb/database", + "prefetchWrites": true + }, + "tls": { + "privateKey": "/Users/hdb/keys/privateKey.pem" + } +} +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/custom-functions.md b/site/versioned_docs/version-4.6/developers/operations-api/custom-functions.md new file mode 100644 index 00000000..ed31785a --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/custom-functions.md @@ -0,0 +1,279 @@ +--- +title: Custom Functions +--- + +# Custom Functions + +_These operations are deprecated._ + +## Custom Functions Status + +Returns the state of the Custom functions server. This includes whether it is enabled, upon which port it is listening, and where its root project directory is located on the host machine. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `custom_function_status` + +### Body + +```json +{ + "operation": "custom_functions_status" +} +``` + +### Response: 200 + +```json +{ + "is_enabled": true, + "port": 9926, + "directory": "/Users/myuser/hdb/custom_functions" +} +``` + +--- + +## Get Custom Functions + +Returns an array of projects within the Custom Functions root project directory. Each project has details including each of the files in the routes and helpers directories, and the total file count in the static folder. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `get_custom_functions` + +### Body + +```json +{ + "operation": "get_custom_functions" +} +``` + +### Response: 200 + +```json +{ + "dogs": { + "routes": ["examples"], + "helpers": ["example"], + "static": 3 + } +} +``` + +--- + +## Get Custom Function + +Returns the content of the specified file as text. HarperDStudio uses this call to render the file content in its built-in code editor. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `get_custom_function` +- project _(required)_ - the name of the project containing the file for which you wish to get content +- type _(required)_ - the name of the sub-folder containing the file for which you wish to get content - must be either routes or helpers +- file _(required)_ - The name of the file for which you wish to get content - should not include the file extension (which is always .js) + +### Body + +```json +{ + "operation": "get_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example" +} +``` + +### Response: 200 + +```json +{ + "message": "'use strict';\n\nconst https = require('https');\n\nconst authRequest = (options) => {\n return new Promise((resolve, reject) => {\n const req = https.request(options, (res) => {\n res.setEncoding('utf8');\n let responseBody = '';\n\n res.on('data', (chunk) => {\n responseBody += chunk;\n });\n\n res.on('end', () => {\n resolve(JSON.parse(responseBody));\n });\n });\n\n req.on('error', (err) => {\n reject(err);\n });\n\n req.end();\n });\n};\n\nconst customValidation = async (request,logger) => {\n const options = {\n hostname: 'jsonplaceholder.typicode.com',\n port: 443,\n path: '/todos/1',\n method: 'GET',\n headers: { authorization: request.headers.authorization },\n };\n\n const result = await authRequest(options);\n\n /*\n * throw an authentication error based on the response body or statusCode\n */\n if (result.error) {\n const errorString = result.error || 'Sorry, there was an error authenticating your request';\n logger.error(errorString);\n throw new Error(errorString);\n }\n return request;\n};\n\nmodule.exports = customValidation;\n" +} +``` + +--- + +## Set Custom Function + +Updates the content of the specified file. Harper Studio uses this call to save any changes made through its built-in code editor. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `set_custom_function` +- project _(required)_ - the name of the project containing the file for which you wish to set content +- type _(required)_ - the name of the sub-folder containing the file for which you wish to set content - must be either routes or helpers +- file _(required)_ - the name of the file for which you wish to set content - should not include the file extension (which is always .js) +- function*content *(required)\_ - the content you wish to save into the specified file + +### Body + +```json +{ + "operation": "set_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example", + "function_content": "'use strict';\n\nconst https = require('https');\n\nconst authRequest = (options) => {\n return new Promise((resolve, reject) => {\n const req = https.request(options, (res) => {\n res.setEncoding('utf8');\n let responseBody = '';\n\n res.on('data', (chunk) => {\n responseBody += chunk;\n });\n\n res.on('end', () => {\n resolve(JSON.parse(responseBody));\n });\n });\n\n req.on('error', (err) => {\n reject(err);\n });\n\n req.end();\n });\n};\n\nconst customValidation = async (request,logger) => {\n const options = {\n hostname: 'jsonplaceholder.typicode.com',\n port: 443,\n path: '/todos/1',\n method: 'GET',\n headers: { authorization: request.headers.authorization },\n };\n\n const result = await authRequest(options);\n\n /*\n * throw an authentication error based on the response body or statusCode\n */\n if (result.error) {\n const errorString = result.error || 'Sorry, there was an error authenticating your request';\n logger.error(errorString);\n throw new Error(errorString);\n }\n return request;\n};\n\nmodule.exports = customValidation;\n" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully updated custom function: example.js" +} +``` + +--- + +## Drop Custom Function + +Deletes the specified file. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `drop_custom_function` +- project _(required)_ - the name of the project containing the file you wish to delete +- type _(required)_ - the name of the sub-folder containing the file you wish to delete. Must be either routes or helpers +- file _(required)_ - the name of the file you wish to delete. Should not include the file extension (which is always .js) + +### Body + +```json +{ + "operation": "drop_custom_function", + "project": "dogs", + "type": "helpers", + "file": "example" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deleted custom function: example.js" +} +``` + +--- + +## Add Custom Function Project + +Creates a new project folder in the Custom Functions root project directory. It also inserts into the new directory the contents of our Custom Functions Project template, which is available publicly, here: https:/github.com/HarperDB/harperdb-custom-functions-template. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `add_custom_function_project` +- project _(required)_ - the name of the project you wish to create + +### Body + +```json +{ + "operation": "add_custom_function_project", + "project": "dogs" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully created custom function project: dogs" +} +``` + +--- + +## Drop Custom Function Project + +Deletes the specified project folder and all of its contents. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `drop_custom_function_project` +- project _(required)_ - the name of the project you wish to delete + +### Body + +```json +{ + "operation": "drop_custom_function_project", + "project": "dogs" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deleted project: dogs" +} +``` + +--- + +## Package Custom Function Project + +Creates a .tar file of the specified project folder, then reads it into a base64-encoded string and returns an object with the string, the payload and the file. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `package_custom_function_project` +- project _(required)_ - the name of the project you wish to package up for deployment +- skip*node_modules *(optional)\_ - if true, creates option for tar module that will exclude the project's node_modules directory. Must be a boolean. + +### Body + +```json +{ + "operation": "package_custom_function_project", + "project": "dogs", + "skip_node_modules": true +} +``` + +### Response: 200 + +```json +{ + "project": "dogs", + "payload": "LgAAAAAAAAAAAAAAAAAAA...AAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "file": "/tmp/d27f1154-5d82-43f0-a5fb-a3018f366081.tar" +} +``` + +--- + +## Deploy Custom Function Project + +Takes the output of package_custom_function_project, decrypts the base64-encoded string, reconstitutes the .tar file of your project folder, and extracts it to the Custom Functions root project directory. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `deploy_custom_function_project` +- project _(required)_ - the name of the project you wish to deploy. Must be a string +- payload _(required)_ - a base64-encoded string representation of the .tar file. Must be a string + +### Body + +```json +{ + "operation": "deploy_custom_function_project", + "project": "dogs", + "payload": "A very large base64-encoded string represenation of the .tar file" +} +``` + +### Response: 200 + +```json +{ + "message": "Successfully deployed project: dogs" +} +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/databases-and-tables.md b/site/versioned_docs/version-4.6/developers/operations-api/databases-and-tables.md new file mode 100644 index 00000000..eea77222 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/databases-and-tables.md @@ -0,0 +1,388 @@ +--- +title: Databases and Tables +--- + +# Databases and Tables + +## Describe All + +Returns the definitions of all databases and tables within the database. Record counts about 5000 records are estimated, as determining the exact count can be expensive. When the record count is estimated, this is indicated by the inclusion of a confidence interval of `estimated_record_range`. If you need the exact count, you can include an `"exact_count": true` in the operation, but be aware that this requires a full table scan (may be expensive). + +- operation _(required)_ - must always be `describe_all` + +### Body + +```json +{ + "operation": "describe_all" +} +``` + +### Response: 200 + +```json +{ + "dev": { + "dog": { + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 + } + } +} +``` + +--- + +## Describe database + +Returns the definitions of all tables within the specified database. + +- operation _(required)_ - must always be `describe_database` +- database _(optional)_ - database where the table you wish to describe lives. The default is `data` + +### Body + +```json +{ + "operation": "describe_database", + "database": "dev" +} +``` + +### Response: 200 + +```json +{ + "dog": { + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 + } +} +``` + +--- + +## Describe Table + +Returns the definition of the specified table. + +- operation _(required)_ - must always be `describe_table` +- table _(required)_ - table you wish to describe +- database _(optional)_ - database where the table you wish to describe lives. The default is `data` + +### Body + +```json +{ + "operation": "describe_table", + "table": "dog" +} +``` + +### Response: 200 + +```json +{ + "schema": "dev", + "name": "dog", + "hash_attribute": "id", + "audit": true, + "schema_defined": false, + "attributes": [ + { + "attribute": "id", + "indexed": true, + "is_primary_key": true + }, + { + "attribute": "__createdtime__", + "indexed": true + }, + { + "attribute": "__updatedtime__", + "indexed": true + }, + { + "attribute": "type", + "indexed": true + } + ], + "clustering_stream_name": "dd9e90c2689151ab812e0f2d98816bff", + "record_count": 4000, + "estimated_record_range": [3976, 4033], + "last_updated_record": 1697658683698.4504 +} +``` + +--- + +## Create database + +Create a new database. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `create_database` +- database _(optional)_ - name of the database you are creating. The default is `data` + +### Body + +```json +{ + "operation": "create_database", + "database": "dev" +} +``` + +### Response: 200 + +```json +{ + "message": "database 'dev' successfully created" +} +``` + +--- + +## Drop database + +Drop an existing database. NOTE: Dropping a database will delete all tables and all of their records in that database. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - this should always be `drop_database` +- database _(required)_ - name of the database you are dropping +- replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "drop_database", + "database": "dev" +} +``` + +### Response: 200 + +```json +{ + "message": "successfully deleted 'dev'" +} +``` + +--- + +## Create Table + +Create a new table within a database. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `create_table` +- database _(optional)_ - name of the database where you want your table to live. If the database does not exist, it will be created. If the `database` property is not provided it will default to `data`. +- table _(required)_ - name of the table you are creating +- primary*key *(required)\_ - primary key for the table +- attributes _(optional)_ - an array of attributes that specifies the schema for the table, that is the set of attributes for the table. When attributes are supplied the table will not be considered a "dynamic schema" table, and attributes will not be auto-added when records with new properties are inserted. Each attribute is specified as: + - name _(required)_ - the name of the attribute + - indexed _(optional)_ - indicates if the attribute should be indexed + - type _(optional)_ - specifies the data type of the attribute (can be String, Int, Float, Date, ID, Any) +- expiration _(optional)_ - specifies the time-to-live or expiration of records in the table before they are evicted (records are not evicted on any timer if not specified). This is specified in seconds. + +### Body + +```json +{ + "operation": "create_table", + "database": "dev", + "table": "dog", + "primary_key": "id" +} +``` + +### Response: 200 + +```json +{ + "message": "table 'dev.dog' successfully created." +} +``` + +--- + +## Drop Table + +Drop an existing database table. NOTE: Dropping a table will delete all associated records in that table. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - this should always be `drop_table` +- database _(optional)_ - database where the table you are dropping lives. The default is `data` +- table _(required)_ - name of the table you are dropping +- replicated _(optional)_ - if true, Harper will replicate the component to all nodes in the cluster. Must be a boolean. + +### Body + +```json +{ + "operation": "drop_table", + "database": "dev", + "table": "dog" +} +``` + +### Response: 200 + +```json +{ + "message": "successfully deleted table 'dev.dog'" +} +``` + +--- + +## Create Attribute + +Create a new attribute within the specified table. **The create_attribute operation can be used for admins wishing to pre-define database values for setting role-based permissions or for any other reason.** + +_Note: Harper will automatically create new attributes on insert and update if they do not already exist within the database._ + +- operation _(required)_ - must always be `create_attribute` +- database _(optional)_ - name of the database of the table you want to add your attribute. The default is `data` +- table _(required)_ - name of the table where you want to add your attribute to live +- attribute _(required)_ - name for the attribute + +### Body + +```json +{ + "operation": "create_attribute", + "database": "dev", + "table": "dog", + "attribute": "is_adorable" +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 1 of 1 records", + "skipped_hashes": [], + "inserted_hashes": ["383c0bef-5781-4e1c-b5c8-987459ad0831"] +} +``` + +--- + +## Drop Attribute + +Drop an existing attribute from the specified table. NOTE: Dropping an attribute will delete all associated attribute values in that table. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - this should always be `drop_attribute` +- database _(optional)_ - database where the table you are dropping lives. The default is `data` +- table _(required)_ - table where the attribute you are dropping lives +- attribute _(required)_ - attribute that you intend to drop + +### Body + +```json +{ + "operation": "drop_attribute", + "database": "dev", + "table": "dog", + "attribute": "is_adorable" +} +``` + +### Response: 200 + +```json +{ + "message": "successfully deleted attribute 'is_adorable'" +} +``` + +--- + +## Get Backup + +This will return a snapshot of the requested database. This provides a means for backing up the database through the operations API. The response will be the raw database file (in binary format), which can later be restored as a database file by copying into the appropriate hdb/databases directory (with Harper not running). The returned file is a snapshot of the database at the moment in time that the get_backup operation begins. This also supports backing up individual tables in a database. However, this is a more expensive operation than backing up a database in whole, and will lose any transactional atomicity between writes across tables, so generally it is recommended that you backup the entire database. + +It is important to note that trying to copy a database file that is in use (Harper actively running and writing to the file) using standard file copying tools is not safe (the copied file will likely be corrupt), which is why using this snapshot operation is recommended for backups (volume snapshots are also a good way to backup Harper databases). + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - this should always be `get_backup` +- database _(required)_ - this is the database that will be snapshotted and returned +- table _(optional)_ - this will specify a specific table to backup +- tables _(optional)_ - this will specify a specific set of tables to backup + +### Body + +```json +{ + "operation": "get_backup", + "database": "dev" +} +``` + +### Response: 200 + +``` +The database in raw binary data format +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/index.md b/site/versioned_docs/version-4.6/developers/operations-api/index.md new file mode 100644 index 00000000..c1661ebe --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/index.md @@ -0,0 +1,55 @@ +--- +title: Operations API +--- + +# Operations API + +The operations API provides a full set of capabilities for configuring, deploying, administering, and controlling Harper. To send operations to the operations API, you send a POST request to the operations API endpoint, which [defaults to port 9925](../../deployments/configuration#operationsapi), on the root path, where the body is the operations object. These requests need to authenticated, which can be done with [basic auth](../security/basic-auth) or [JWT authentication](../security/jwt-auth). For example, a request to create a table would be performed as: + +```http +POST http:/my-harperdb-server:9925/ +Authorization: Basic YourBase64EncodedInstanceUser:Pass +Content-Type: application/json + +{ + "operation": "create_table", + "table": "my-table" +} +``` + +The operations API reference is available below and categorized by topic: + +* [Quick Start Examples](./quickstart-examples) +* [Databases and Tables](./databases-and-tables) +* [NoSQL Operations](./nosql-operations) +* [Bulk Operations](./bulk-operations) +* [Users and Roles](./users-and-roles) +* [Clustering](./clustering) +* [Clustering with NATS](./clustering-nats) +* [Components](./components) +* [Registration](./registration) +* [Jobs](./jobs) +* [Logs](./logs) +* [System Operations](./system-operations) +* [Configuration](./configuration) +* [Certificate Management](./certificate-management) +* [Token Authentication](./token-authentication) +* [SQL Operations](./sql-operations) +* [Advanced JSON SQL Examples](./advanced-json-sql-examples) +* [Analytics](./analytics) + +• [Past Release API Documentation](https:/olddocs.harperdb.io) + +## More Examples + +Here is an example of using `curl` to make an operations API request: + +```bash +curl --location --request POST 'https:/instance-subdomain.harperdbcloud.com' \ +--header 'Authorization: Basic YourBase64EncodedInstanceUser:Pass' \ +--header 'Content-Type: application/json' \ +--data-raw '{ +"operation": "create_schema", +"schema": "dev" +}' +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/jobs.md b/site/versioned_docs/version-4.6/developers/operations-api/jobs.md new file mode 100644 index 00000000..173125a1 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/jobs.md @@ -0,0 +1,87 @@ +--- +title: Jobs +--- + +# Jobs + +## Get Job + +Returns job status, metrics, and messages for the specified job ID. + +- operation _(required)_ - must always be `get_job` +- id _(required)_ - the id of the job you wish to view + +### Body + +```json +{ + "operation": "get_job", + "id": "4a982782-929a-4507-8794-26dae1132def" +} +``` + +### Response: 200 + +```json +[ + { + "__createdtime__": 1611615798782, + "__updatedtime__": 1611615801207, + "created_datetime": 1611615798774, + "end_datetime": 1611615801206, + "id": "4a982782-929a-4507-8794-26dae1132def", + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "start_datetime": 1611615798805, + "status": "COMPLETE", + "type": "csv_url_load", + "user": "HDB_ADMIN", + "start_datetime_converted": "2021-01-25T23:03:18.805Z", + "end_datetime_converted": "2021-01-25T23:03:21.206Z" + } +] +``` + +--- + +## Search Jobs By Start Date + +Returns a list of job statuses, metrics, and messages for all jobs executed within the specified time window. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `search_jobs_by_start_date` +- from*date *(required)\_ - the date you wish to start the search +- to*date *(required)\_ - the date you wish to end the search + +### Body + +```json +{ + "operation": "search_jobs_by_start_date", + "from_date": "2021-01-25T22:05:27.464+0000", + "to_date": "2021-01-25T23:05:27.464+0000" +} +``` + +### Response: 200 + +```json +[ + { + "id": "942dd5cb-2368-48a5-8a10-8770ff7eb1f1", + "user": "HDB_ADMIN", + "type": "csv_url_load", + "status": "COMPLETE", + "start_datetime": 1611613284781, + "end_datetime": 1611613287204, + "job_body": null, + "message": "successfully loaded 350 of 350 records", + "created_datetime": 1611613284764, + "__createdtime__": 1611613284767, + "__updatedtime__": 1611613287207, + "start_datetime_converted": "2021-01-25T22:21:24.781Z", + "end_datetime_converted": "2021-01-25T22:21:27.204Z" + } +] +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/logs.md b/site/versioned_docs/version-4.6/developers/operations-api/logs.md new file mode 100644 index 00000000..17eba72f --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/logs.md @@ -0,0 +1,732 @@ +--- +title: Logs +--- + +# Logs + +## Read Harper Log + +Returns log outputs from the primary Harper log based on the provided search criteria. [Read more about Harper logging here](../../administration/logging/standard-logging#read-logs-via-the-api). + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `read_Log` +- start _(optional)_ - result to start with. Default is 0, the first log in `hdb.log`. Must be a number +- limit _(optional)_ - number of results returned. Default behavior is 1000. Must be a number +- level _(optional)_ - error level to filter on. Default behavior is all levels. Must be `notify`, `error`, `warn`, `info`, `debug` or `trace` +- from _(optional)_ - date to begin showing log results. Must be `YYYY-MM-DD` or `YYYY-MM-DD hh:mm:ss`. Default is first log in `hdb.log` +- until _(optional)_ - date to end showing log results. Must be `YYYY-MM-DD` or `YYYY-MM-DD hh:mm:ss`. Default is last log in `hdb.log` +- order _(optional)_ - order to display logs desc or asc by timestamp. By default, will maintain `hdb.log` order + +### Body + +```json +{ + "operation": "read_log", + "start": 0, + "limit": 1000, + "level": "error", + "from": "2021-01-25T22:05:27.464+0000", + "until": "2021-01-25T23:05:27.464+0000", + "order": "desc" +} +``` + +### Response: 200 + +```json +[ + { + "level": "notify", + "message": "Connected to cluster server.", + "timestamp": "2021-01-25T23:03:20.710Z", + "thread": "main/0", + "tags": [] + }, + { + "level": "warn", + "message": "Login failed", + "timestamp": "2021-01-25T22:24:45.113Z", + "thread": "http/9", + "tags": [] + }, + { + "level": "error", + "message": "unknown attribute 'name and breed'", + "timestamp": "2021-01-25T22:23:24.167Z", + "thread": "http/9", + "tags": [] + } +] +``` + +--- + +## Read Transaction Log + +Returns all transactions logged for the specified database table. You may filter your results with the optional from, to, and limit fields. [Read more about Harper transaction logs here](./logs#read-transaction-log). + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `read_transaction_log` +- schema _(required)_ - schema under which the transaction log resides +- table _(required)_ - table under which the transaction log resides +- from _(optional)_ - time format must be millisecond-based epoch in UTC +- to _(optional)_ - time format must be millisecond-based epoch in UTC +- limit _(optional)_ - max number of logs you want to receive. Must be a number + +### Body + +```json +{ + "operation": "read_transaction_log", + "schema": "dev", + "table": "dog", + "from": 1560249020865, + "to": 1660585656639, + "limit": 10 +} +``` + +### Response: 200 + +```json +[ + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619736, + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38, + "__updatedtime__": 1660165619688, + "__createdtime__": 1660165619688 + } + ] + }, + { + "operation": "insert", + "user": "admin", + "timestamp": 1660165619813, + "records": [ + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "breed_id": 346, + "age": 7, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 3, + "dog_name": "Alby", + "owner_name": "Kaylan", + "breed_id": 348, + "age": 7, + "weight_lbs": 84, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "breed_id": 347, + "age": 6, + "weight_lbs": 60, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "breed_id": 348, + "age": 8, + "weight_lbs": 15, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 6, + "dog_name": "Kato", + "owner_name": "Kyle", + "breed_id": 351, + "age": 6, + "weight_lbs": 32, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 7, + "dog_name": "Simon", + "owner_name": "Fred", + "breed_id": 349, + "age": 3, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "breed_id": 350, + "age": 5, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 9, + "dog_name": "Yeti", + "owner_name": "Jaxon", + "breed_id": 200, + "age": 5, + "weight_lbs": 55, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 10, + "dog_name": "Monkey", + "owner_name": "Aron", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 11, + "dog_name": "Bode", + "owner_name": "Margo", + "breed_id": 104, + "age": 8, + "weight_lbs": 75, + "adorable": true, + "__updatedtime__": 1660165619797, + "__createdtime__": 1660165619797 + }, + { + "id": 12, + "dog_name": "Tucker", + "owner_name": "David", + "breed_id": 346, + "age": 2, + "weight_lbs": 60, + "adorable": true, + "__updatedtime__": 1660165619798, + "__createdtime__": 1660165619798 + }, + { + "id": 13, + "dog_name": "Jagger", + "owner_name": "Margo", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true, + "__updatedtime__": 1660165619798, + "__createdtime__": 1660165619798 + } + ] + }, + { + "operation": "update", + "user": "admin", + "timestamp": 1660165620040, + "records": [ + { + "id": 1, + "dog_name": "Penny B", + "__updatedtime__": 1660165620036 + } + ] + } +] +``` + +--- + +## Delete Transaction Logs Before + +Deletes transaction log data for the specified database table that is older than the specified timestamp. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `delete_transaction_log_before` +- schema _(required)_ - schema under which the transaction log resides. Must be a string +- table _(required)_ - table under which the transaction log resides. Must be a string +- timestamp _(required)_ - records older than this date will be deleted. Format is millisecond-based epoch in UTC + +### Body + +```json +{ + "operation": "delete_transaction_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1598290282817 +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id 26a6d3a6-6d77-40f9-bee7-8d6ef479a126" +} +``` + +--- + +## Read Audit Log + +AuditLog must be enabled in the Harper configuration file to make this request. Returns a verbose history of all transactions logged for the specified database table, including original data records. You may filter your results with the optional search_type and search_values fields. [Read more about Harper transaction logs here.](../../administration/logging/transaction-logging#read_transaction_log) + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `read_audit_log` +- schema _(required)_ - schema under which the transaction log resides +- table _(required)_ - table under which the transaction log resides +- search_type _(optional)_ - possibilities are `hash_value`, `timestamp` and `username` +- search_values _(optional)_ - an array of string or numbers relating to search_type + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog" +} +``` + +### Response: 200 + +```json +[ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [318], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [444], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [444], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [444], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + } +] +``` + +--- + +## Read Audit Log by timestamp + +AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table between the specified time window. [Read more about Harper transaction logs here](./logs#read-transaction-log). + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `read_audit_log` +- schema _(required)_ - schema under which the transaction log resides +- table _(required)_ - table under which the transaction log resides +- search_type _(optional)_ - timestamp +- search_values _(optional)_ - an array containing a maximum of two values \[`from_timestamp`, `to_timestamp`] defining the range of transactions you would like to view. + - Timestamp format is millisecond-based epoch in UTC + - If no items are supplied then all transactions are returned + - If only one entry is supplied then all transactions after the supplied timestamp will be returned + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "timestamp", + "search_values": [1660585740558, 1660585759710.56] +} +``` + +### Response: 200 + +```json +[ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [318], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [444], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [444], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [444], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "hash_values": [318], + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } +] +``` + +--- + +## Read Audit Log by username + +AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table which were committed by the specified user. [Read more about Harper transaction logs here](../../administration/logging/transaction-logging#read_transaction_log). + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `read_audit_log` +- schema _(required)_ - schema under which the transaction log resides +- table _(required)_ - table under which the transaction log resides +- search_type _(optional)_ - username +- search_values _(optional)_ - the Harper user for whom you would like to view transactions + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "username", + "search_values": ["admin"] +} +``` + +### Response: 200 + +```json +{ + "admin": [ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "hash_values": [318], + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585716133.01, + "hash_values": [444], + "records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660585740558.415, + "hash_values": [444], + "records": [ + { + "id": 444, + "fur_type": "coarse", + "__updatedtime__": 1660585740556 + } + ], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585716128, + "__createdtime__": 1660585716128 + } + ] + }, + { + "operation": "delete", + "user_name": "admin", + "timestamp": 1660585759710.56, + "hash_values": [444], + "original_records": [ + { + "id": 444, + "dog_name": "Davis", + "__updatedtime__": 1660585740556, + "__createdtime__": 1660585716128, + "fur_type": "coarse" + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "hash_values": [318], + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } + ] +} +``` + +--- + +## Read Audit Log by hash_value + +AuditLog must be enabled in the Harper configuration file to make this request. Returns the transactions logged for the specified database table which were committed to the specified hash value(s). [Read more about Harper transaction logs here](../../administration/logging/transaction-logging#read_transaction_log). + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `read_audit_log` +- schema _(required)_ - schema under which the transaction log resides +- table _(required)_ - table under which the transaction log resides +- search_type _(optional)_ - hash_value +- search_values _(optional)_ - an array of hash_attributes for which you wish to see transaction logs + +### Body + +```json +{ + "operation": "read_audit_log", + "schema": "dev", + "table": "dog", + "search_type": "hash_value", + "search_values": [318] +} +``` + +### Response: 200 + +```json +{ + "318": [ + { + "operation": "insert", + "user_name": "admin", + "timestamp": 1660585635882.288, + "records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + }, + { + "operation": "update", + "user_name": "admin", + "timestamp": 1660586298457.224, + "records": [ + { + "id": 318, + "fur_type": "super fluffy", + "__updatedtime__": 1660586298455 + } + ], + "original_records": [ + { + "id": 318, + "dog_name": "Polliwog", + "__updatedtime__": 1660585635876, + "__createdtime__": 1660585635876 + } + ] + } + ] +} +``` + +--- + +## Delete Audit Logs Before + +AuditLog must be enabled in the Harper configuration file to make this request. Deletes audit log data for the specified database table that is older than the specified timestamp. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `delete_audit_logs_before` +- schema _(required)_ - schema under which the transaction log resides. Must be a string +- table _(required)_ - table under which the transaction log resides. Must be a string +- timestamp _(required)_ - records older than this date will be deleted. Format is millisecond-based epoch in UTC + +### Body + +```json +{ + "operation": "delete_audit_logs_before", + "schema": "dev", + "table": "dog", + "timestamp": 1660585759710.56 +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id 7479e5f8-a86e-4fc9-add7-749493bc100f" +} +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/nosql-operations.md b/site/versioned_docs/version-4.6/developers/operations-api/nosql-operations.md new file mode 100644 index 00000000..099ebbcd --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/nosql-operations.md @@ -0,0 +1,384 @@ +--- +title: NoSQL Operations +--- + +# NoSQL Operations + +## Insert + +Adds one or more rows of data to a database table. Primary keys of the inserted JSON record may be supplied on insert. If a primary key is not provided, then a GUID or incremented number (depending on type) will be generated for each record. + +- operation _(required)_ - must always be `insert` +- database _(optional)_ - database where the table you are inserting records into lives. The default is `data` +- table _(required)_ - table where you want to insert records +- records _(required)_ - array of one or more records for insert + +### Body + +```json +{ + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 8, + "dog_name": "Harper", + "breed_id": 346, + "age": 7 + }, + { + "id": 9, + "dog_name": "Penny", + "breed_id": 154, + "age": 7 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 2 of 2 records", + "inserted_hashes": [8, 9], + "skipped_hashes": [] +} +``` + +--- + +## Update + +Changes the values of specified attributes in one or more rows in a database table as identified by the primary key. NOTE: Primary key of the updated JSON record(s) MUST be supplied on update. + +- operation _(required)_ - must always be `update` +- database _(optional)_ - database of the table you are updating records in. The default is `data` +- table _(required)_ - table where you want to update records +- records _(required)_ - array of one or more records for update + +### Body + +```json +{ + "operation": "update", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 1, + "weight_lbs": 55 + }, + { + "id": 2, + "owner": "Kyle B", + "weight_lbs": 35 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "updated 2 of 2 records", + "update_hashes": [1, 3], + "skipped_hashes": [] +} +``` + +--- + +## Upsert + +Changes the values of specified attributes for rows with matching primary keys that exist in the table. Adds rows to the database table for primary keys that do not exist or are not provided. + +- operation _(required)_ - must always be `upsert` +- database _(optional)_ - database of the table you are updating records in. The default is `data` +- table _(required)_ - table where you want to update records +- records _(required)_ - array of one or more records for update + +### Body + +```json +{ + "operation": "upsert", + "database": "dev", + "table": "dog", + "records": [ + { + "id": 8, + "weight_lbs": 155 + }, + { + "name": "Bill", + "breed": "Pit Bull", + "id": 10, + "Age": 11, + "weight_lbs": 155 + }, + { + "name": "Harper", + "breed": "Mutt", + "age": 5, + "weight_lbs": 155 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "upserted 3 of 3 records", + "upserted_hashes": [8, 10, "ea06fc8e-717b-4c6c-b69d-b29014054ab7"] +} +``` + +--- + +## Delete + +Removes one or more rows of data from a specified table. + +- operation _(required)_ - must always be `delete` +- database _(optional)_ - database where the table you are deleting records lives. The default is `data` +- table _(required)_ - table where you want to deleting records +- ids _(required)_ - array of one or more primary key values, which identifies records to delete + +### Body + +```json +{ + "operation": "delete", + "database": "dev", + "table": "dog", + "ids": [1, 2] +} +``` + +### Response: 200 + +```json +{ + "message": "2 of 2 records successfully deleted", + "deleted_hashes": [1, 2], + "skipped_hashes": [] +} +``` + +--- + +## Search By ID + +Returns data from a table for one or more primary keys. + +- operation _(required)_ - must always be `search_by_id` +- database _(optional)_ - database where the table you are searching lives. The default is `data` +- table _(required)_ - table you wish to search +- ids _(required)_ - array of primary keys to retrieve +- get*attributes *(required)_ - define which attributes you want returned. \_Use `['*']` to return all attributes_ + +### Body + +```json +{ + "operation": "search_by_id", + "database": "dev", + "table": "dog", + "ids": [1, 2], + "get_attributes": ["dog_name", "breed_id"] +} +``` + +### Response: 200 + +```json +[ + { + "dog_name": "Penny", + "breed_id": 154 + }, + { + "dog_name": "Harper", + "breed_id": 346 + } +] +``` + +--- + +## Search By Value + +Returns data from a table for a matching value. + +- operation _(required)_ - must always be `search_by_value` +- database _(optional)_ - database where the table you are searching lives. The default is `data` +- table _(required)_ - table you wish to search +- search*attribute *(required)\_ - attribute you wish to search can be any attribute +- search*value *(required)\_ - value you wish to search - wild cards are allowed +- get*attributes *(required)\_ - define which attributes you want returned. Use `['*']` to return all attributes + +### Body + +```json +{ + "operation": "search_by_value", + "database": "dev", + "table": "dog", + "search_attribute": "owner_name", + "search_value": "Ky*", + "get_attributes": ["id", "dog_name"] +} +``` + +### Response: 200 + +```json +[ + { + "dog_name": "Penny" + }, + { + "dog_name": "Kato" + } +] +``` + +--- + +## Search By Conditions + +Returns data from a table for one or more matching conditions. This supports grouping of conditions to indicate order of operations as well. + +- operation _(required)_ - must always be `search_by_conditions` +- database _(optional)_ - database where the table you are searching lives. The default is `data` +- table _(required)_ - table you wish to search +- operator _(optional)_ - the operator used between each condition - `and`, `or`. The default is `and` +- offset _(optional)_ - the number of records that the query results will skip. The default is `0` +- limit _(optional)_ - the number of records that the query results will include. The default is `null`, resulting in no limit +- sort _optional_ - This is an object that indicates the sort order. It has the following properties: + - attribute _(required)_ - The attribute to sort by + - descending _(optional)_ - If true, will sort in descending order (defaults to ascending order) + - next _(optional)_ - This can define the next sort object that will be used to break ties for sorting when there are multiple records with the same value for the first attribute (follows the same structure as `sort`, and can recursive additional attributes). +- get*attributes *(required)\_ - define which attributes you want returned. Use `['*']` to return all attributes +- conditions _(required)_ - the array of conditions objects, specified below, to filter by. Must include one or more object in the array that are a condition or a grouped set of conditions. A condition has the following properties: + - search*attribute *(required)\_ - the attribute you wish to search, can be any attribute + - search*type *(required)\_ - the type of search to perform - `equals`, `contains`, `starts_with`, `ends_with`, `greater_than`, `greater_than_equal`, `less_than`, `less_than_equal`, `between` + - search*value *(required)\_ - case-sensitive value you wish to search. If the `search_type` is `between` then use an array of two values to search between + Or a set of grouped conditions has the following properties: + - operator _(optional)_ - the operator used between each condition - `and`, `or`. The default is `and` + - conditions _(required)_ - the array of conditions objects as described above. + +### Body + +```json +{ + "operation": "search_by_conditions", + "database": "dev", + "table": "dog", + "operator": "and", + "offset": 0, + "limit": 10, + "sort": { + "attribute": "id", + "next": { + "dog_name": "age", + "descending": true + } + }, + "get_attributes": ["*"], + "conditions": [ + { + "search_attribute": "age", + "search_type": "between", + "search_value": [5, 8] + }, + { + "search_attribute": "weight_lbs", + "search_type": "greater_than", + "search_value": 40 + }, + { + "operator": "or", + "conditions": [ + { + "search_attribute": "adorable", + "search_type": "equals", + "search_value": true + }, + { + "search_attribute": "lovable", + "search_type": "equals", + "search_value": true + } + ] + } + ] +} +``` + +### Response: 200 + +```json +[ + { + "__createdtime__": 1620227719791, + "__updatedtime__": 1620227719791, + "adorable": true, + "age": 7, + "breed_id": 346, + "dog_name": "Harper", + "id": 2, + "owner_name": "Stephen", + "weight_lbs": 55 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 7, + "breed_id": 348, + "dog_name": "Alby", + "id": 3, + "owner_name": "Kaylan", + "weight_lbs": 84 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 6, + "breed_id": 347, + "dog_name": "Billy", + "id": 4, + "owner_name": "Zach", + "weight_lbs": 60 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 5, + "breed_id": 250, + "dog_name": "Gemma", + "id": 8, + "owner_name": "Stephen", + "weight_lbs": 55 + }, + { + "__createdtime__": 1620227719792, + "__updatedtime__": 1620227719792, + "adorable": true, + "age": 8, + "breed_id": 104, + "dog_name": "Bode", + "id": 11, + "owner_name": "Margo", + "weight_lbs": 75 + } +] +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/quickstart-examples.md b/site/versioned_docs/version-4.6/developers/operations-api/quickstart-examples.md new file mode 100644 index 00000000..9d60c002 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/quickstart-examples.md @@ -0,0 +1,370 @@ +--- +title: Quick Start Examples +--- + +# Quick Start Examples + +Harper recommends utilizing [Harper Applications](../../developers/applications/) for defining databases, tables, and other functionality. However, this guide is a great way to get started using on the Harper Operations API. + +## Create dog Table + +We first need to create a table. Since our company is named after our CEO's dog, lets create a table to store all our employees' dogs. We'll call this table, `dogs`. + +Tables in Harper are schema-less, so we don't need to add any attributes other than a primary_key (in pre 4.2 versions this was referred to as the hash_attribute) to create this table. + +Harper does offer a `database` parameter that can be used to hold logical groupings of tables. The parameter is optional and if not provided the operation will default to using a database named `data`. + +If you receive an error response, make sure your Basic Authentication user and password match those you entered during the installation process. + +### Body + +```json +{ + "operation": "create_table", + "table": "dog", + "primary_key": "id" +} +``` + +### Response: 200 + +```json +{ + "message": "table 'data.dog' successfully created." +} +``` + +--- + +## Create breed Table + +Now that we have a table to store our dog data, we also want to create a table to track known breeds. Just as with the dog table, the only attribute we need to specify is the `primary_key`. + +### Body + +```json +{ + "operation": "create_table", + "table": "breed", + "primary_key": "id" +} +``` + +### Response: 200 + +```json +{ + "message": "table 'data.breed' successfully created." +} +``` + +--- + +## Insert 1 Dog + +We're ready to add some dog data. Penny is our CTO's pup, so she gets ID 1 or we're all fired. We are specifying attributes in this call, but this doesn't prevent us from specifying additional attributes in subsequent calls. + +### Body + +```json +{ + "operation": "insert", + "table": "dog", + "records": [ + { + "id": 1, + "dog_name": "Penny", + "owner_name": "Kyle", + "breed_id": 154, + "age": 7, + "weight_lbs": 38 + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 1 of 1 records", + "inserted_hashes": [1], + "skipped_hashes": [] +} +``` + +--- + +## Insert Multiple Dogs + +Let's add some more Harper doggies! We can add as many dog objects as we want into the records collection. If you're adding a lot of objects, we would recommend using the .csv upload option (see the next section where we populate the breed table). + +### Body + +```json +{ + "operation": "insert", + "table": "dog", + "records": [ + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "breed_id": 346, + "age": 7, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 3, + "dog_name": "Alby", + "owner_name": "Kaylan", + "breed_id": 348, + "age": 7, + "weight_lbs": 84, + "adorable": true + }, + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "breed_id": 347, + "age": 6, + "weight_lbs": 60, + "adorable": true + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "breed_id": 348, + "age": 8, + "weight_lbs": 15, + "adorable": true + }, + { + "id": 6, + "dog_name": "Kato", + "owner_name": "Kyle", + "breed_id": 351, + "age": 6, + "weight_lbs": 32, + "adorable": true + }, + { + "id": 7, + "dog_name": "Simon", + "owner_name": "Fred", + "breed_id": 349, + "age": 3, + "weight_lbs": 35, + "adorable": true + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "breed_id": 350, + "age": 5, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 9, + "dog_name": "Yeti", + "owner_name": "Jaxon", + "breed_id": 200, + "age": 5, + "weight_lbs": 55, + "adorable": true + }, + { + "id": 10, + "dog_name": "Monkey", + "owner_name": "Aron", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true + }, + { + "id": 11, + "dog_name": "Bode", + "owner_name": "Margo", + "breed_id": 104, + "age": 8, + "weight_lbs": 75, + "adorable": true + }, + { + "id": 12, + "dog_name": "Tucker", + "owner_name": "David", + "breed_id": 346, + "age": 2, + "weight_lbs": 60, + "adorable": true + }, + { + "id": 13, + "dog_name": "Jagger", + "owner_name": "Margo", + "breed_id": 271, + "age": 7, + "weight_lbs": 35, + "adorable": true + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 12 of 12 records", + "inserted_hashes": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], + "skipped_hashes": [] +} +``` + +--- + +## Bulk Insert Breeds Via CSV + +We need to populate the 'breed' table with some data so we can reference it later. For larger data sets, we recommend using our CSV upload option. + +Each header in a column will be considered as an attribute, and each row in the file will be a row in the table. Simply specify the file path and the table to upload to, and Harper will take care of the rest. You can pull the breeds.csv file from here: https:/s3.amazonaws.com/complimentarydata/breeds.csv + +### Body + +```json +{ + "operation": "csv_url_load", + "table": "breed", + "csv_url": "https:/s3.amazonaws.com/complimentarydata/breeds.csv" +} +``` + +### Response: 200 + +```json +{ + "message": "Starting job with id e77d63b9-70d5-499c-960f-6736718a4369", + "job_id": "e77d63b9-70d5-499c-960f-6736718a4369" +} +``` + +--- + +## Update 1 Dog Using NoSQL + +Harper supports NoSQL and SQL commands. We're going to update the dog table to show Penny's last initial using our NoSQL API. + +### Body + +```json +{ + "operation": "update", + "table": "dog", + "records": [ + { + "id": 1, + "dog_name": "Penny B" + } + ] +} +``` + +### Response: 200 + +```json +{ + "message": "updated 1 of 1 records", + "update_hashes": [1], + "skipped_hashes": [] +} +``` + +--- + +## Select a Dog by ID Using SQL + +Now we're going to use a simple SQL SELECT call to pull Penny's updated data. Note we now see Penny's last initial in the dog name. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT * FROM data.dog where id = 1" +} +``` + +### Response: 200 + +```json +[ + { + "owner_name": "Kyle", + "adorable": null, + "breed_id": 154, + "__updatedtime__": 1610749428575, + "dog_name": "Penny B", + "weight_lbs": 38, + "id": 1, + "age": 7, + "__createdtime__": 1610749386566 + } +] +``` + +--- + +## Select Dogs and Join Breed + +Here's a more complex SQL command joining the breed table with the dog table. We will also pull only the pups belonging to Kyle, Zach, and Stephen. + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT d.id, d.dog_name, d.owner_name, b.name, b.section FROM data.dog AS d INNER JOIN data.breed AS b ON d.breed_id = b.id WHERE d.owner_name IN ('Kyle', 'Zach', 'Stephen') AND b.section = 'Mutt' ORDER BY d.dog_name" +} +``` + +### Response: 200 + +```json +[ + { + "id": 4, + "dog_name": "Billy", + "owner_name": "Zach", + "name": "LABRADOR / GREAT DANE MIX", + "section": "Mutt" + }, + { + "id": 8, + "dog_name": "Gemma", + "owner_name": "Stephen", + "name": "SHORT HAIRED SETTER MIX", + "section": "Mutt" + }, + { + "id": 2, + "dog_name": "Harper", + "owner_name": "Stephen", + "name": "HUSKY MIX", + "section": "Mutt" + }, + { + "id": 5, + "dog_name": "Rose Merry", + "owner_name": "Zach", + "name": "TERRIER MIX", + "section": "Mutt" + } +] +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/registration.md b/site/versioned_docs/version-4.6/developers/operations-api/registration.md new file mode 100644 index 00000000..56775c5d --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/registration.md @@ -0,0 +1,70 @@ +--- +title: Registration +--- + +# Registration + +## Registration Info + +Returns the registration data of the Harper instance. + +- operation _(required)_ - must always be `registration_info` + +### Body + +```json +{ + "operation": "registration_info" +} +``` + +### Response: 200 + +```json +{ + "registered": true, + "version": "4.2.0", + "ram_allocation": 2048, + "license_expiration_date": "2022-01-15" +} +``` + +--- + +## Get Fingerprint + +Returns the Harper fingerprint, uniquely generated based on the machine, for licensing purposes. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `get_fingerprint` + +### Body + +```json +{ + "operation": "get_fingerprint" +} +``` + +--- + +## Set License + +Sets the Harper license as generated by Harper License Management software. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `set_license` +- key _(required)_ - your license key +- company _(required)_ - the company that was used in the license + +### Body + +```json +{ + "operation": "set_license", + "key": "", + "company": "" +} +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/sql-operations.md b/site/versioned_docs/version-4.6/developers/operations-api/sql-operations.md new file mode 100644 index 00000000..71dfa436 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/sql-operations.md @@ -0,0 +1,127 @@ +--- +title: SQL Operations +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL Operations + +## Select + +Executes the provided SQL statement. The SELECT statement is used to query data from the database. + +- operation _(required)_ - must always be `sql` +- sql _(required)_ - use standard SQL + +### Body + +```json +{ + "operation": "sql", + "sql": "SELECT * FROM dev.dog WHERE id = 1" +} +``` + +### Response: 200 + +```json +[ + { + "id": 1, + "age": 7, + "dog_name": "Penny", + "weight_lbs": 38, + "breed_id": 154, + "owner_name": "Kyle", + "adorable": true, + "__createdtime__": 1611614106043, + "__updatedtime__": 1611614119507 + } +] +``` + +--- + +## Insert + +Executes the provided SQL statement. The INSERT statement is used to add one or more rows to a database table. + +- operation _(required)_ - must always be `sql` +- sql _(required)_ - use standard SQL + +### Body + +```json +{ + "operation": "sql", + "sql": "INSERT INTO dev.dog (id, dog_name) VALUE (22, 'Simon')" +} +``` + +### Response: 200 + +```json +{ + "message": "inserted 1 of 1 records", + "inserted_hashes": [22], + "skipped_hashes": [] +} +``` + +--- + +## Update + +Executes the provided SQL statement. The UPDATE statement is used to change the values of specified attributes in one or more rows in a database table. + +- operation _(required)_ - must always be `sql` +- sql _(required)_ - use standard SQL + +### Body + +```json +{ + "operation": "sql", + "sql": "UPDATE dev.dog SET dog_name = 'penelope' WHERE id = 1" +} +``` + +### Response: 200 + +```json +{ + "message": "updated 1 of 1 records", + "update_hashes": [1], + "skipped_hashes": [] +} +``` + +--- + +## Delete + +Executes the provided SQL statement. The DELETE statement is used to remove one or more rows of data from a database table. + +- operation _(required)_ - must always be `sql` +- sql _(required)_ - use standard SQL + +### Body + +```json +{ + "operation": "sql", + "sql": "DELETE FROM dev.dog WHERE id = 1" +} +``` + +### Response: 200 + +```json +{ + "message": "1 of 1 record successfully deleted", + "deleted_hashes": [1], + "skipped_hashes": [] +} +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/system-operations.md b/site/versioned_docs/version-4.6/developers/operations-api/system-operations.md new file mode 100644 index 00000000..da47e104 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/system-operations.md @@ -0,0 +1,195 @@ +--- +title: System Operations +--- + +# System Operations + +## Restart + +Restarts the Harper instance. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `restart` + +### Body + +```json +{ + "operation": "restart" +} +``` + +### Response: 200 + +```json +{ + "message": "Restarting HarperDB. This may take up to 60 seconds." +} +``` + +--- + +## Restart Service + +Restarts servers for the specified Harper service. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `restart_service` +- service _(required)_ - must be one of: `http_workers`, `clustering_config` or `clustering` +- replicated _(optional)_ - must be a boolean. If set to `true`, Harper will replicate the restart service operation across all nodes in the cluster. The restart will occur as a rolling restart, ensuring that each node is fully restarted before the next node begins restarting. + +### Body + +```json +{ + "operation": "restart_service", + "service": "http_workers" +} +``` + +### Response: 200 + +```json +{ + "message": "Restarting http_workers" +} +``` + +--- + +## System Information + +Returns detailed metrics on the host system. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `system_information` +- attributes _(optional)_ - string array of top level attributes desired in the response, if no value is supplied all attributes will be returned. Available attributes are: ['system', 'time', 'cpu', 'memory', 'disk', 'network', 'harperdb_processes', 'table_size', 'metrics', 'threads', 'replication'] + +### Body + +```json +{ + "operation": "system_information" +} +``` + +--- + +## Set Status + +Sets a status value that can be used for application-specific status tracking. Status values are stored in memory and are not persisted across restarts. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `set_status` +- id _(required)_ - the key identifier for the status +- status _(required)_ - the status value to set (string between 1-512 characters) + +### Body + +```json +{ + "operation": "set_status", + "id": "primary", + "status": "active" +} +``` + +### Response: 200 + +```json +{ + "id": "primary", + "status": "active", + "__createdtime__": 1621364589543, + "__updatedtime__": 1621364589543 +} +``` + +### Notes + +- The `id` parameter must be one of the allowed status types: 'primary', 'maintenance', or 'availability' +- If no `id` is specified, it defaults to 'primary' +- For 'availability' status, only 'Available' or 'Unavailable' values are accepted +- For other status types, any string value is accepted + +--- + +## Get Status + +Retrieves a status value previously set with the set_status operation. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `get_status` +- id _(optional)_ - the key identifier for the status to retrieve (defaults to all statuses if not provided) + +### Body + +```json +{ + "operation": "get_status", + "id": "primary" +} +``` + +### Response: 200 + +```json +{ + "id": "primary", + "status": "active", + "__createdtime__": 1621364589543, + "__updatedtime__": 1621364589543 +} +``` + +If no id parameter is provided, all status values will be returned: + +```json +[ + { + "id": "primary", + "status": "active", + "__createdtime__": 1621364589543, + "__updatedtime__": 1621364589543 + }, + { + "id": "maintenance", + "status": "scheduled", + "__createdtime__": 1621364600123, + "__updatedtime__": 1621364600123 + } +] +``` + +--- + +## Clear Status + +Removes a status entry by its ID. + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `clear_status` +- id _(required)_ - the key identifier for the status to remove + +### Body + +```json +{ + "operation": "clear_status", + "id": "primary" +} +``` + +### Response: 200 + +```json +{ + "message": "Status successfully cleared" +} +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/token-authentication.md b/site/versioned_docs/version-4.6/developers/operations-api/token-authentication.md new file mode 100644 index 00000000..b9ff5b31 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/token-authentication.md @@ -0,0 +1,60 @@ +--- +title: Token Authentication +--- + +# Token Authentication + +## Create Authentication Tokens + +Creates the tokens needed for authentication: operation & refresh token. + +_Note - this operation does not require authorization to be set_ + +- operation _(required)_ - must always be `create_authentication_tokens` +- username _(required)_ - username of user to generate tokens for +- password _(required)_ - password of user to generate tokens for + +### Body + +```json +{ + "operation": "create_authentication_tokens", + "username": "", + "password": "" +} +``` + +### Response: 200 + +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6IkhEQl9BRE1JTiIsImlhdCI6MTYwNTA2Mzk0OSwiZXhwIjoxNjA1MTUwMzQ5LCJzdWIiOiJvcGVyYXRpb24ifQ.TlV93BqavQVQntXTt_WeY5IjAuCshfd6RzhihLWFWhu1qEKLHdwg9o5Z4ASaNmfuyKBqbFw65IbOYKd348EXeC_T6d0GO3yUhICYWXkqhQnxVW_T-ECKc7m5Bty9HTgfeaJ2e2yW55nbZYWG_gLtNgObUjCziX20-gGGR25sNTRm78mLQPYQkBJph6WXwAuyQrX704h0NfvNqyAZSwjxgtjuuEftTJ7FutLrQSLGIBIYq9nsHrFkheiDSn-C8_WKJ_zATa4YIofjqn9g5wA6o_7kSNaU2-gWnCm_jbcAcfvOmXh6rd89z8pwPqnC0f131qHIBps9UHaC1oozzmu_C6bsg7905OoAdFFY42Vojs98SMbfRApRvwaS4SprBsam3izODNI64ZUBREu3l4SZDalUf2kN8XPVWkI1LKq_mZsdtqr1r11Z9xslI1wVdxjunYeanjBhs7_j2HTX7ieVGn1a23cWceUk8F1HDGe_KEuPQs03R73V8acq_freh-kPhIa4eLqmcHeBw3WcyNGW8GuP8kyQRkGuO5sQSzZqbr_YSbZdSShZWTWDE6RYYC9ZV9KJtHVxhs0hexUpcoqO8OtJocyltRjtDjhSm9oUxszYRaALu-h8YadZT9dEKzsyQIt30d7LS9ETmmGWx4nKSTME2bV21PnDv_rEc5R6gnE", + "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6IkhEQl9BRE1JTiIsImlhdCI6MTYwNTA2Mzk0OSwiZXhwIjoxNjA3NjU1OTQ5LCJzdWIiOiJyZWZyZXNoIn0.znhJhkdSROBPP_GLRzAxYdjgQ3BuqpAbQB7zMSSOQJ3s83HnmZ10Bnpw_3L2aF-tOFgz_t6HUAvn26fNOLsspJD2aOvHPcVS4yLKS5nagpA6ar_pqng9f6Ebfs8ohguLCfHnHRJ8poLxuWRvWW9_9pIlDiwsj4yo3Mbxi3mW8Bbtnk2MwiNHFxTksD12Ne8EWz8q2jic5MjArqBBgR373oYoWU1oxpTM6gIsZCBRowXcc9XFy2vyRoggEUU4ISRFQ4ZY9ayJ-_jleSDCUamJSNQsdb1OUTvc6CxeYlLjCoV0ijRUB6p2XWNVezFhDu8yGqOeyGFJzArhxbVc_pl4UYd5aUVxhrO9DdhG29cY_mHV0FqfXphR9QllK--LJFTP4aFqkCxnVr7HSa17hL0ZVK1HaKrx21PAdCkVNZpD6J3RtRbTkfnIB_C3Be9jhOV3vpTf7ZGn_Bs3CPJi_sL313Z1yKSDAS5rXTPceEOcTPHjzkMP9Wz19KfFq_0kuiZdDmeYNqJeFPAgGJ-S0tO51krzyGqLyCCA32_W104GR8OoQi2gEED6HIx2G0-1rnLnefN6eHQiY5r-Q3Oj9e2y3EvqqgWOmEDw88-SjPTwQVnMbBHYN2RfluU7EmvDh6Saoe79Lhlu8ZeSJ1x6ZgA8-Cirraz1_526Tn8v5FGDfrc" +} +``` + +--- + +## Refresh Operation Token + +This operation creates a new operation token. + +- operation _(required)_ - must always be `refresh_operation_token` +- refresh*token *(required)\_ - the refresh token that was provided when tokens were created + +### Body + +```json +{ + "operation": "refresh_operation_token", + "refresh_token": "EXISTING_REFRESH_TOKEN" +} +``` + +### Response: 200 + +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6eyJfX2NyZWF0ZWR0aW1lX18iOjE2MDQ1MTc4Nzk1MjMsIl9fdXBkYXRlZHRpbWVfXyI6MTYwNDUxNzg3OTUyMywiYWN0aXZlIjp0cnVlLCJhdXRoX3Rva2VuIjpudWxsLCJyb2xlIjp7Il9fY3JlYXRlZHRpbWVfXyI6MTYwNDUxNzg3OTUyMSwiX191cGRhdGVkdGltZV9fIjoxNjA0NTE3ODc5NTIxLCJpZCI6IjZhYmRjNGJhLWU5MjQtNDlhNi1iOGY0LWM1NWUxYmQ0OTYzZCIsInBlcm1pc3Npb24iOnsic3VwZXJfdXNlciI6dHJ1ZSwic3lzdGVtIjp7InRhYmxlcyI6eyJoZGJfdGFibGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9hdHRyaWJ1dGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9zY2hlbWEiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl91c2VyIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfcm9sZSI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2pvYiI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2xpY2Vuc2UiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9pbmZvIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfbm9kZXMiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl90ZW1wIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119fX19LCJyb2xlIjoic3VwZXJfdXNlciJ9LCJ1c2VybmFtZSI6IkhEQl9BRE1JTiJ9LCJpYXQiOjE2MDUwNjQ0MjMsImV4cCI6MTYwNTE1MDgyMywic3ViIjoib3BlcmF0aW9uIn0.VVZdhlh7_xFEaGPwhAh6VJ1d7eisiF3ok3ZwLTQAMWZB6umb2S7pPSTbXAmqAGHRlFAK3BYfnwT3YWt0gZbHvk24_0x3s_dej3PYJ8khIxzMjqpkR6qSjQIC2dhKqpwRPNtoqW_xnep9L-qf5iPtqkwsqWhF1c5VSN8nFouLWMZSuJ6Mag04soNhFvY0AF6QiTyzajMTb6uurRMWOnxk8hwMrY_5xtupabqtZheXP_0DV8l10B7GFi_oWf_lDLmwRmNbeUfW8ZyCIJMj36bjN3PsfVIxog87SWKKCwbWZWfJWw0KEph-HvU0ay35deyGWPIaDQmujuh2vtz-B0GoIAC58PJdXNyQRzES_nSb6Oqc_wGZsLM6EsNn_lrIp3mK_3a5jirZ8s6Z2SfcYKaLF2hCevdm05gRjFJ6ijxZrUSOR2S415wLxmqCCWCp_-sEUz8erUrf07_aj-Bv99GUub4b_znOsQF3uABKd4KKff2cNSMhAa-6sro5GDRRJg376dcLi2_9HOZbnSo90zrpVq8RNV900aydyzDdlXkZja8jdHBk4mxSSewYBvM7up6I0G4X-ZlzFOp30T7kjdLa6480Qp34iYRMMtq0Htpb5k2jPt8dNFnzW-Q2eRy1wNBbH3cCH0rd7_BIGuTCrl4hGU8QjlBiF7Gj0_-uJYhKnhg" +} +``` diff --git a/site/versioned_docs/version-4.6/developers/operations-api/users-and-roles.md b/site/versioned_docs/version-4.6/developers/operations-api/users-and-roles.md new file mode 100644 index 00000000..ecaa1117 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/operations-api/users-and-roles.md @@ -0,0 +1,508 @@ +--- +title: Users and Roles +--- + +# Users and Roles + +## List Roles + +Returns a list of all roles. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `list_roles` + +### Body + +```json +{ + "operation": "list_roles" +} +``` + +### Response: 200 + +```json +[ + { + "__createdtime__": 1611615061106, + "__updatedtime__": 1611615061106, + "id": "05c2ffcd-f780-40b1-9432-cfe8ba5ad890", + "permission": { + "super_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + }, + "role": "developer" + }, + { + "__createdtime__": 1610749235614, + "__updatedtime__": 1610749235614, + "id": "136f03fa-a0e9-46c3-bd5d-7f3e7dd5b564", + "permission": { + "cluster_user": true + }, + "role": "cluster_user" + }, + { + "__createdtime__": 1610749235609, + "__updatedtime__": 1610749235609, + "id": "745b3138-a7cf-455a-8256-ac03722eef12", + "permission": { + "super_user": true + }, + "role": "super_user" + } +] +``` + +--- + +## Add Role + +Creates a new role with the specified permissions. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `add_role` +- role _(required)_ - name of role you are defining +- permission _(required)_ - object defining permissions for users associated with this role: + - super*user *(optional)\_ - boolean which, if set to true, gives users associated with this role full access to all operations and methods. If not included, value will be assumed to be false. + - structure_user (optional) - boolean OR array of database names (as strings). If boolean, user can create new databases and tables. If array of strings, users can only manage tables within the specified databases. This overrides any individual table permissions for specified databases, or for all databases if the value is true. + +### Body + +```json +{ + "operation": "add_role", + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + } +} +``` + +### Response: 200 + +```json +{ + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": true, + "insert": true, + "update": true + } + ] + } + } + } + }, + "id": "0a9368b0-bd81-482f-9f5a-8722e3582f96", + "__updatedtime__": 1598549532897, + "__createdtime__": 1598549532897 +} +``` + +--- + +## Alter Role + +Modifies an existing role with the specified permissions. updates permissions from an existing role. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `alter_role` +- id _(required)_ - the id value for the role you are altering +- role _(optional)_ - name value to update on the role you are altering +- permission _(required)_ - object defining permissions for users associated with this role: + - super*user *(optional)\_ - boolean which, if set to true, gives users associated with this role full access to all operations and methods. If not included, value will be assumed to be false. + - structure_user (optional) - boolean OR array of database names (as strings). If boolean, user can create new databases and tables. If array of strings, users can only manage tables within the specified databases. This overrides any individual table permissions for specified databases, or for all databases if the value is true. + +### Body + +```json +{ + "operation": "alter_role", + "id": "f92162e2-cd17-450c-aae0-372a76859038", + "role": "another_developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": false, + "insert": true, + "update": true + } + ] + } + } + } + } +} +``` + +### Response: 200 + +```json +{ + "id": "a7cb91e9-32e4-4dbf-a327-fab4fa9191ea", + "role": "developer", + "permission": { + "super_user": false, + "structure_user": false, + "dev": { + "tables": { + "dog": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "name", + "read": false, + "insert": true, + "update": true + } + ] + } + } + } + }, + "__updatedtime__": 1598549996106 +} +``` + +--- + +## Drop Role + +Deletes an existing role from the database. NOTE: Role with associated users cannot be dropped. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - this must always be `drop_role` +- id _(required)_ - this is the id of the role you are dropping + +### Body + +```json +{ + "operation": "drop_role", + "id": "developer" +} +``` + +### Response: 200 + +```json +{ + "message": "developer successfully deleted" +} +``` + +--- + +## List Users + +Returns a list of all users. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `list_users` + +### Body + +```json +{ + "operation": "list_users" +} +``` + +### Response: 200 + +```json +[ + { + "__createdtime__": 1635520961165, + "__updatedtime__": 1635520961165, + "active": true, + "role": { + "__createdtime__": 1635520961161, + "__updatedtime__": 1635520961161, + "id": "7c78ef13-c1f3-4063-8ea3-725127a78279", + "permission": { + "super_user": true, + "system": { + "tables": { + "hdb_table": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_attribute": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_schema": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_user": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_role": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_job": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_license": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_info": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_nodes": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + }, + "hdb_temp": { + "read": true, + "insert": false, + "update": false, + "delete": false, + "attribute_permissions": [] + } + } + } + }, + "role": "super_user" + }, + "username": "HDB_ADMIN" + } +] +``` + +--- + +## User Info + +Returns user data for the associated user credentials. + +- operation _(required)_ - must always be `user_info` + +### Body + +```json +{ + "operation": "user_info" +} +``` + +### Response: 200 + +```json +{ + "__createdtime__": 1610749235611, + "__updatedtime__": 1610749235611, + "active": true, + "role": { + "__createdtime__": 1610749235609, + "__updatedtime__": 1610749235609, + "id": "745b3138-a7cf-455a-8256-ac03722eef12", + "permission": { + "super_user": true + }, + "role": "super_user" + }, + "username": "HDB_ADMIN" +} +``` + +--- + +## Add User + +Creates a new user with the specified role and credentials. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `add_user` +- role _(required)_ - 'role' name value of the role you wish to assign to the user. See `add_role` for more detail +- username _(required)_ - username assigned to the user. It can not be altered after adding the user. It serves as the hash +- password _(required)_ - clear text for password. Harper will encrypt the password upon receipt +- active _(required)_ - boolean value for status of user's access to your Harper instance. If set to false, user will not be able to access your instance of Harper. + +### Body + +```json +{ + "operation": "add_user", + "role": "role_name", + "username": "hdb_user", + "password": "password", + "active": true +} +``` + +### Response: 200 + +```json +{ + "message": "hdb_user successfully added" +} +``` + +--- + +## Alter User + +Modifies an existing user's role and/or credentials. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `alter_user` +- username _(required)_ - username assigned to the user. It can not be altered after adding the user. It serves as the hash. +- password _(optional)_ - clear text for password. Harper will encrypt the password upon receipt +- role _(optional)_ - `role` name value of the role you wish to assign to the user. See `add_role` for more detail +- active _(optional)_ - status of user's access to your Harper instance. See `add_role` for more detail + +### Body + +```json +{ + "operation": "alter_user", + "role": "role_name", + "username": "hdb_user", + "password": "password", + "active": true +} +``` + +### Response: 200 + +```json +{ + "message": "updated 1 of 1 records", + "new_attributes": [], + "txn_time": 1611615114397.988, + "update_hashes": ["hdb_user"], + "skipped_hashes": [] +} +``` + +--- + +## Drop User + +Deletes an existing user by username. [Learn more about Harper roles here.](../security/users-and-roles) + +_Operation is restricted to super_user roles only_ + +- operation _(required)_ - must always be `drop_user` +- username _(required)_ - username assigned to the user + +### Body + +```json +{ + "operation": "drop_user", + "username": "sgoldberg" +} +``` + +### Response: 200 + +```json +{ + "message": "sgoldberg successfully deleted" +} +``` diff --git a/site/versioned_docs/version-4.6/developers/real-time.md b/site/versioned_docs/version-4.6/developers/real-time.md new file mode 100644 index 00000000..03853ae6 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/real-time.md @@ -0,0 +1,180 @@ +--- +title: Real-Time +--- + +# Real-Time + +## Real-Time + +Harper provides real-time access to data and messaging. This allows clients to monitor and subscribe to data for changes in real-time as well as handling data-oriented messaging. Harper supports multiple standardized protocols to facilitate diverse standards-based client interaction. + +Harper real-time communication is based around database tables. Declared tables are the basis for monitoring data, and defining "topics" for publishing and subscribing to messages. Declaring a table that establishes a topic can be as simple as adding a table with no attributes to your [schema.graphql in a Harper application folder](./applications/): + +``` +type MyTopic @table @export +``` + +You can then subscribe to records or sub-topics in this topic/namespace, as well as save data and publish messages, with the protocols discussed below. + +### Content Negotiation + +Harper is a database, not a generic broker, and therefore highly adept at handling _structured_ data. Data can be published and subscribed in all supported structured/object formats, including JSON, CBOR, and MessagePack, and the data will be stored and handled as structured data. This means that different clients can individually choose which format they prefer, both for inbound and outbound messages. One client could publish in JSON, and another client could choose to receive messages in CBOR. + +## Protocols + +### MQTT + +Harper supports MQTT as an interface to this real-time data delivery. It is important to note that MQTT in Harper is not just a generic pub/sub hub, but is deeply integrated with the database providing subscriptions directly to database records, and publishing to these records. In this document we will explain how MQTT pub/sub concepts are aligned and integrated with database functionality. + +#### Configuration + +Harper supports MQTT with its `mqtt` server module and Harper supports MQTT over standard TCP sockets or over WebSockets. This is enabled by default, but can be configured in your `harperdb-config.yaml` configuration, allowing you to change which ports it listens on, if secure TLS connections are used, and MQTT is accepted over WebSockets: + +```yaml +mqtt: + network: + port: 1883 + securePort: 8883 # for TLS + webSocket: true # will also enable WS support through the default HTTP interface/port + mTLS: false + requireAuthentication: true +``` + +Note that if you are using WebSockets for MQTT, the sub-protocol should be set to "mqtt" (this is required by the MQTT specification, and should be included by any conformant client): `Sec-WebSocket-Protocol: mqtt`. mTLS is also supported by enabling it in the configuration and using the certificate authority from the TLS section of the configuration. See the [configuration documentation for more information](../deployments/configuration). + +#### Capabilities + +Harper's MQTT capabilities includes support for MQTT versions v3.1 and v5 with standard publish and subscription capabilities with multi-level topics, QoS 0 and 1 levels, and durable (non-clean) sessions. MQTT supports QoS 2 interaction, but doesn't guarantee exactly once delivery (although any guarantees of exactly once over unstable networks is a fictional aspiration). MQTT doesn't currently support last will, nor single-level wildcards (only multi-level wildcards). + +### Topics + +In MQTT, messages are published to, and subscribed from, topics. In Harper topics are aligned with resource endpoint paths in exactly the same way as the REST endpoints. If you define a table or resource in your schema, with a path/endpoint of "my-resource", that means that this can be addressed as a topic just like a URL path. So a topic of "my-resource/some-id" would correspond to the record in the my-resource table (or custom resource) with a record id of "some-id". + +This means that you can subscribe to "my-resource/some-id" and making this subscription means you will receive notification messages for any updates to this record. If this record is modified or deleted, a message will be sent to listeners of this subscription. + +The current value of this record is also treated as the "retained" message for this topic. When you subscribe to "my-resource/some-id", you will immediately receive the record for this id, through a "publish" command from the server, as the initial "retained" message that is first delivered. This provides a simple and effective way to get the current state of a record and future updates to that record without having to worry about timing issues of aligning a retrieval and subscription separately. + +Similarly, publishing a message to a "topic" also interacts with the database. Publishing a message with "retain" flag enabled is interpreted as an update or put to that record. The published message will replace the current record with the contents of the published message. + +If a message is published without a `retain` flag, the message will not alter the record at all, but will still be published to any subscribers to that record. + +Harper supports QoS 0 and 1 for publishing and subscribing. + +Harper supports multi-level topics, both for subscribing and publishing. Harper also supports multi-level wildcards, so you can subscribe to /`my-resource/#` to receive notifications for `my-resource/some-id` as well as `my-resource/nested/id`, or you can subscribe to `my-resource/nested/#` and receive the latter, but not the former, topic messages. Harper currently only supports trailing multi-level wildcards (no single-level wildcards with '\*'). + +#### Events + +JavaScript components can also listen for MQTT events. This is available on the server.mqtt.events object. For example, to set up a listener/callback for when MQTT clients connect and authorize, we can do: + +```javascript +server.mqtt.events.on('connected', (session, socket) => { + console.log('client connected with id', session.clientId); +}); +``` + +The following MQTT events are available: + +- `connection` - When a client initially establishes a TCP or WS connection to the server +- `connected` - When a client establishes an authorized MQTT connection +- `auth-failed` - When a client fails to authenticate +- `disconnected` - When a client disconnects from the server + +### Ordering + +Harper is designed to be a distributed database, and an intrinsic characteristic of distributed servers is that messages may take different amounts of time to traverse the network and may arrive in a different order depending on server location and network topology. Harper is designed for distributed data with minimal latency, and so messages are delivered to subscribers immediately when they arrive, Harper does not delay messages for coordinating confirmation or consensus among other nodes, which would significantly increase latency, messages are delivered as quickly as possible. + +As an example, let's consider message #1 is published to node A, which then sends the message to node B and node C, but the message takes a while to get there. Slightly later, while the first message is still in transit, message #2 is published to node B, which then replicates it to A and C, and because of network conditions, message #2 arrives at node C before message #1. Because Harper prioritizes low latency, when node C receives message #2, it immediately publishes it to all its local subscribers (it has no knowledge that message #1 is in transit). + +When message #1 is received by node C, the behavior of what it does with this message is dependent on whether the message is a "retained" message (was published with a retain flag set to true, or was put/update/upsert/inserted into the database) or was a non-retained message. In the case of a non-retained message, this message will be delivered to all local subscribers (even though it had been published earlier), thereby prioritizing the delivery of every message. On the other hand, a retained message will not deliver the earlier out-of-order message to clients, and Harper will keep the message with the latest timestamp as the "winning" record state (and will be retained message for any subsequent subscriptions). Retained messages maintain (eventual) consistency across the entire cluster of servers, all nodes will converge to the same message as the being the latest and retained message (#2 in this case). + +Non-retained messages are generally a good choice for applications like chat, where every message needs to be delivered even if they might arrive out-of-order (the order may not be consistent across all servers). Retained messages can be thought of a "superseding" messages, and are a good fit for applications like instrument measurements like temperature readings, where the priority to provide the _latest_ temperature and older temperature readings are not important to publish after a new reading, and consistency of the most-recent record (across the network) is important. + +### WebSockets + +WebSockets are supported through the REST interface and go through the `connect(incomingMessages)` method on resources. By default, making a WebSockets connection to a URL will subscribe to the referenced resource. For example, making a WebSocket connection to `new WebSocket('wss:/server/my-resource/341')` will access the resource defined for 'my-resource' and the resource id of 341 and connect to it. On the web platform this could be: + +```javascript +let ws = new WebSocket('wss:/server/my-resource/341'); +ws.onmessage = (event) => { + / received a notification from the server + let data = JSON.parse(event.data); +}; +``` + +By default, the resources will make a subscription to that resource, monitoring any changes to the records or messages published to it, and will return events on the WebSockets connection. You can also override `connect(incomingMessages)` with your own handler. The `connect` method simply needs to return an iterable (asynchronous iterable) that represents the stream of messages to be sent to the client. One easy way to create an iterable stream is to define the `connect` method as a generator and `yield` messages as they become available. For example, a simple WebSockets echo server for a resource could be written: + +```javascript +export class Echo extends Resource { + async *connect(incomingMessages) { + for await (let message of incomingMessages) { / wait for each incoming message from the client + / and send the message back to the client + yield message; + } + } +``` + +You can also call the default `connect` and it will provide a convenient streaming iterable with events for the outgoing messages, with a `send` method that you can call to send messages on the iterable, and a `close` event for determining when the connection is closed. The incoming messages iterable is also an event emitter, and you can listen for `data` events to get the incoming messages using event style: + +```javascript +export class Example extends Resource { + connect(incomingMessages) { + let outgoingMessages = super.connect(); + let timer = setInterval(() => { + outgoingMessages.send({greeting: 'hi again!'}); + }, 1000); / send a message once a second + incomingMessages.on('data', (message) => { + / another way of echo-ing the data back to the client + outgoingMessages.send(message); + }); + outgoingMessages.on('close', () => { + / make sure we end the timer once the connection is closed + clearInterval(timer); + }); + return outgoingMessages; + } +``` + +### Server Sent Events + +Server Sent Events (SSE) are also supported through the REST server interface, and provide a simple and efficient mechanism for web-based applications to receive real-time updates. For consistency of push delivery, SSE connections go through the `connect()` method on resources, much like WebSockets. The primary difference is that `connect` is called without any `incomingMessages` argument, since SSE is a one-directional transport mechanism. This can be used much like WebSockets, specifying a resource URL path will connect to that resource, and by default provides a stream of messages for changes and messages for that resource. For example, you can connect to receive notification in a browser for a resource like: + +```javascript +let eventSource = new EventSource('https:/server/my-resource/341', { withCredentials: true }); +eventSource.onmessage = (event) => { + / received a notification from the server + let data = JSON.parse(event.data); +}; +``` + +### MQTT Feature Support Matrix + +| Feature | Support | +| ------------------------------------------------------------------ | -------------------------------------------------------------- | +| Connections, protocol negotiation, and acknowledgement with v3.1.1 | :heavy_check_mark: | +| Connections, protocol negotiation, and acknowledgement with v5 | :heavy_check_mark: | +| Secure MQTTS | :heavy_check_mark: | +| MQTTS over WebSockets | :heavy_check_mark: | +| MQTT authentication via user/pass | :heavy_check_mark: | +| MQTT authentication via mTLS | :heavy_check_mark: | +| Publish | :heavy_check_mark: | +| Subscribe | :heavy_check_mark: | +| Multi-level wildcard | :heavy_check_mark: | +| Single-level wildcard | :heavy_check_mark: | +| QoS 0 | :heavy_check_mark: | +| QoS 1 | :heavy_check_mark: | +| QoS 2 | Not fully supported, can perform conversation but does persist | +| Keep-Alive monitoring | :heavy_check_mark: | +| Clean session | :heavy_check_mark: | +| Durable session | :heavy_check_mark: | +| Distributed durable session | | +| Will | :heavy_check_mark: | +| MQTT V5 User properties | | +| MQTT V5 Will properties | | +| MQTT V5 Connection properties | | +| MQTT V5 Connection acknowledgement properties | | +| MQTT V5 Publish properties | | +| MQTT V5 Subscribe properties retain handling | :heavy_check_mark: | +| MQTT V5 Subscribe properties | | +| MQTT V5 Ack properties | | +| MQTT V5 AUTH command | | +| MQTT V5 Shared Subscriptions | | diff --git a/site/versioned_docs/version-4.6/developers/replication/index.md b/site/versioned_docs/version-4.6/developers/replication/index.md new file mode 100644 index 00000000..3b6164fd --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/replication/index.md @@ -0,0 +1,280 @@ +--- +title: Replication/Clustering +--- + +# Replication/Clustering + +Harper’s replication system is designed to make distributed data replication fast and reliable across multiple nodes. This means you can easily build a distributed database that ensures high availability, disaster recovery, and data localization. The best part? It’s simple to set up, configure, and manage. You can easily add or remove nodes, choose which data to replicate, and monitor the system’s health without jumping through hoops. + +### Replication Overview + +Harper replication uses a peer-to-peer model where every node in your cluster can send and subscribe to data. Each node connects through WebSockets, allowing data to flow seamlessly in both directions. By default, Harper takes care of managing these connections and subscriptions, so you don’t have to worry about data consistency. The system is designed to maintain secure, reliable connections between nodes, ensuring that your data is always safe. + +### Replication Configuration + +To connect your nodes, you need to provide hostnames or URLs for the nodes to connect to each other. This can be done via configuration or through operations. To configure replication, you can specify connection information the `replication` section of the [harperdb-config.yaml](../../deployments/configuration). Here, you can specify the host name of the current node, and routes to connect to other nodes, for example: + +```yaml +replication: + hostname: server-one + routes: + - server-two + - server-three +``` + +In this example, the current node is `server-one`, and it will connect to `server-two` and `server-three`. Routes to other nodes can also be configured with URLs or ports: + +```yaml +replication: + hostname: server-one + routes: + - wss:/server-two:9933 # URL based route + - hostname: server-three # define a hostname and port + port: 9933 +``` + +You can also use the [operations API](../operations-api/clustering) to dynamically add and remove nodes from the cluster. This is useful for adding new nodes to a running cluster or removing nodes that are no longer needed. For example (note this is the basic form, you would also need to provide the necessary credentials for the operation, see the section on securing connections for more details): + +```json +{ + "operation": "add_node", + "hostname": "server-two" +} +``` + +These operations will also dynamically generating certificates as needed, if there are no existing signed certificates, or if the existing certificates are not valid for the new node. + +Harper will also automatically replicate node information to other nodes in a cluster ([gossip-style discovery](https:/highscalability.com/gossip-protocol-explained/)). This means that you only need to connect to one node in an existing cluster, and Harper will automatically detect and connect to other nodes in the cluster (bidirectionally). + +By default, Harper will replicate all the data in all the databases. You can configure which databases are replicated, and then override this behavior on a per-table basis. For example, you can indicate which databases should be replicated by default, here indicating you want to replicate the `data` and `system` databases: + +```yaml +replication: + databases: + - data + - system +``` + +By default, all tables within a replicated database will be replicated. Transactions are replicated atomically, which may involve data across multiple tables. However, you can also configure replication for individual tables, and disable and exclude replication for specific tables in a database by setting `replicate` to `false` in the table definition: + +```graphql +type LocalTableForNode @table(replicate: false) { + id: ID! + name: String! +} +``` + +You can also control which nodes data is replicated to, and how many nodes data is replicated to. By default, Harper will replicate data to all nodes in the cluster, but you can control where data is replicated to with the [sharding configuration and APIs](./sharding). + +By default, replication connects to the secure port 9933. You can configure the replication port in the `replication` section. + +```yaml +replication: + securePort: 9933 +``` + +### Securing Connections + +Harper supports the highest levels of security through public key infrastructure based security and authorization. Depending on your security configuration, you can configure Harper in several different ways to build a connected cluster. + +#### Provide your own certificates + +If you want to secure your Harper connections with your own signed certificates, you can easily do so. Whether you have certificates from a public authority (like Let's Encrypt or Digicert) or a corporate certificate authority, you can use them to authenticate nodes securely. You can then allow nodes to authorize each other by checking the certificate against the standard list of root certificate authorities by enabling the `enableRootCAs` option in the config: + +``` +replication + enableRootCAs: true +``` + +And then just make sure the certificate’s common name (CN) matches the node's hostname. + +#### Setting Up Custom Certificates + +There are two ways to configure Harper with your own certificates: + +1. Use the `add_certificate` operation to upload them. +1. Or, specify the certificate paths directly in the `replication` section of the `harperdb-config.yaml` file. + +If your certificate is signed by a trusted public authority, just provide the path to the certificate and private key. If you're using self-signed certificates or a private certificate authority, you’ll also need to provide the certificate authority (CA) details to complete the setup.\ +\ +Example configuration: + +```yaml +tls: + certificate: /path/to/certificate.pem + certificateAuthority: /path/to/ca.pem + privateKey: /path/to/privateKey.pem +``` + +With this in place, Harper will load the provided certificates into the certificate table and use these to secure and authenticate connections between nodes. + +You have the option to skip providing a specific certificate authority (CA) and instead verify your certificate against the root certificates included in the bundled Mozilla CA store. This bundled CA store, provided by Node.js, is a snapshot of Mozilla's CA certificates that is fixed at the time of each Node.js release. + +To enable the root certificates set `replication.enableRootCAs` to `true` in the `harperdb-config.yaml` file: + +```yaml +replication: + enableRootCAs: true +``` + +#### Cross-generated certificates + +Harper can also generate its own certificates for secure connections. This is useful for setting up secure connections between nodes when no existing certificates are available, and can be used in development, testing, or production environments. Certificates will be automatically requested and signed between nodes to support a form of distributed certificate generation and signing. To establish secure connections between nodes using cross-generated certificates, you simply use the [`add_node` operation](../operations-api/clustering) over SSL, and specify the temporary authentication credentials to use for connecting and authorizing the certificate generation and signing. \ +\ +Example configuration: + +```json +{ + "operation": "add_node", + "hostname": "server-two", + "verify_tls": false, + "authorization": { + "username": "admin", + "password": "password" + } +} +``` + +When you connect to another node (e.g., `server-two`), Harper uses secure WebSockets and the provided credentials to establish the connection. + +If you’re working with a fresh install, you’ll need to set `verify_tls` to `false` temporarily, so the self-signed certificate is accepted. Once the connection is made, Harper will automatically handle the certificate signing process: + +- It creates a certificate signing request (CSR), sends it to `server-two`, which then signs it and returns the signed certificate along with the certificate authority (CA). +- The signed certificate is stored for future connections between the nodes, ensuring secure communication. + +**Important:** Your credentials are not stored—they are discarded immediately after use. + +You can also provide credentials in HTTP Authorization format (Basic auth, Token auth, or JWT). This is helpful for handling authentication with the required permissions to generate and sign certificates. + +Additionally, you can use `set_node` as an alias for the `add_node` operation if you prefer. + +#### Revoking Certificates + +Certificates used in replication can be revoked by using the certificate serial number and either the `revoked_certificates` attribute in the `hdb_nodes` system table or route config in `harperdb-config.yaml`. + +To utilize the `revoked_certificates` attribute in the `hdb_nodes` table, you can use the `add_node` or `update_node` operation to add the certificate serial number to the `revoked_certificates` array. For example: + +```json +{ + "operation": "update_node", + "hostname": "server-two", + "revoked_certificates": ["1769F7D6A"] +} +``` + +To utilize the replication route config in `harperdb-config.yaml`, you can add the certificate serial number to the `revokedCertificates` array. For example: + +```yaml +replication: + routes: + - hostname: server-three + port: 9930 + revokedCertificates: + - 1769F7D6A + - QA69C7E2S +``` + +#### Removing Nodes + +Nodes can be removed from the cluster using the [`remove_node` operation](../operations-api/clustering). This will remove the node from the cluster, and stop replication to and from the node. For example: + +```json +{ + "operation": "remove_node", + "hostname": "server-two" +} +``` + +#### Insecure Connection IP-based Authentication + +You can completely disable secure connections and use IP addresses to authenticate nodes with each other. This can be useful for development and testing, or within a secure private network, but should never be used for production with publicly accessible servers. To disable secure connections, simply configure replication within an insecure port, either by [configuring the operations API](../../deployments/configuration) to run on an insecure port or replication to run on an insecure port. And then set up IP-based routes to connect to other nodes: + +```yaml +replication: + port: 9933 + routes: + - 127.0.0.2 + - 127.0.0.3 +``` + +Note that in this example, we are using loop back addresses, which can be a convenient way to run multiple nodes on a single machine for testing and development. + +#### Explicit Subscriptions + +#### Managing Node Connections and Subscriptions in Harper + +By default, Harper automatically handles connections and subscriptions between nodes, ensuring data consistency across your cluster. It even uses data routing to manage node failures. But if you want more control, you can manage these connections manually by explicitly subscribing to nodes. This is useful for advanced configurations, testing, or debugging. + +#### Important Notes on Explicit Subscriptions + +If you choose to manage subscriptions manually, Harper will no longer handle data consistency for you. This means there’s no guarantee that all nodes will have consistent data if subscriptions don’t fully replicate in all directions. If a node goes down, it’s possible that some data wasn’t replicated before the failure. + +#### How to Subscribe to Nodes + +To explicitly subscribe to a node, you can use operations like `add_node` and define the subscriptions. For example, you can configure a node (e.g., `server-two`) to publish transactions on a specific table (e.g., `dev.my-table`) without receiving data from that node. + +Example configuration: + +```json +{ + "operation": "add_node", + "hostname": "server-two", + "subscriptions": [ + { + "database": "dev", + "table": "my-table", + "publish": true, + "subscribe": false + } + ] +} +``` + +To update an explicit subscription you can use the [`update_node` operation](../operations-api/clustering). + +Here we are updating the subscription to receive transactions on the `dev.my-table` table from the `server-two` node. + +```json +{ + "operation": "update_node", + "hostname": "server-two", + "subscriptions": [ + { + "database": "dev", + "table": "my-table", + "publish": true, + "subscribe": true + } + ] +} +``` + +#### Monitoring Replication + +You can monitor the status of replication through the operations API. You can use the [`cluster_status` operation](../operations-api/clustering) to get the status of replication. For example: + +```json +{ + "operation": "cluster_status" +} +``` + +#### Database Initial Synchronization and Resynchronization + +When a new node is added to the cluster, if its database has not previously been synced, it will initially download the database from the first node it connects to. This will copy every record from the source database to the new node. Once the initial synchronization is complete, the new node will enter replication mode and receive records from each node as they are created, updated, or deleted. If a node goes down and comes back up, it will also resynchronize with the other nodes in the cluster, to ensure that it has the most up-to-date data. + +You may also specify a `start_time` in the `add_node` to specify that when a database connects, that it should not download the entire database, but only data since a given starting time. + +**Advanced Configuration** + +You can also check the configuration of the replication system, including the current known nodes and certificates, by querying the hdb_nodes and hdb_certificate table: + +```json +{ + "operation": "search_by_value", + "database": "system", + "table": "hdb_nodes", + "search_attribute": "name", + "search_value": "*" +} +``` diff --git a/site/versioned_docs/version-4.6/developers/replication/sharding.md b/site/versioned_docs/version-4.6/developers/replication/sharding.md new file mode 100644 index 00000000..84197445 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/replication/sharding.md @@ -0,0 +1,165 @@ +--- +title: Sharding +--- + +Harper's replication system supports various levels of replication or sharding. Harper can be configured or set up to replicate to different data to different subsets of nodes. This can be used facilitate horizontally scalability of storage and write performance, while maintaining optimal strategies of data locality and data consistency. When sharding is configured, Harper will replicate data to only a subset of nodes, based on the sharding configuration, and can then retrieve data from the appropriate nodes as needed to fulfill requests for data. + +There are two main ways to setup sharding in Harper. The approach is to use dynamic sharding, where the location or residency of records is determined dynamically based on where the record was written and record data, and records can be dynamically relocated based on where they are accessed. This residency information can be specific to each record, and can vary based on the computed residency and where the data is written and accessed. + +The second approach is define specific shards, where each node is assigned to a specific shard, and each record is replicated to the nodes in that shard based on the primary key, regardless of where the data was written or accessed, or content. This approach is more static, but can be more efficient for certain use cases, and means that the location of data can always be predictably determined based on the primary key. + +## Configuration For Dynamic Sharding + +By default, Harper will replicate all data to all nodes. However, replication can easily be configured for "sharding", or storing different data in different locations or nodes. The simplest way to configure sharding and limit replication to improve performance and efficiency is to configure a replication-to count. This will limit the number of nodes that data is replicated to. For example, to specify that writes should replicate to 2 other nodes besides the node that first stored the data, you can set the `replicateTo` to 2 in the `replication` section of the `harperdb-config.yaml` file: + +```yaml +replication: + replicateTo: 2 +``` + +This will ensure that data is replicated to two other nodes, so that each record will be stored on three nodes in total. + +With a sharding configuration (or customization below) in place, requests will for records that don't reside on the server handling requests will automatically be forwarded to the appropriate node. This will be done transparently, so that the client will not need to know where the data is stored. + +## Replication Control with Headers + +With the REST interface, replication levels and destinations can also specified with the `X-Replicate-To` header. This can be used to indicate the number of additional nodes that data should be replicated to, or to specify the nodes that data should be replicated to. The `X-Replicate-To` header can be used with the `POST` and `PUT` methods. This header can also specify if the response should wait for confirmation from other nodes, and how many, with the `confirm` parameter. For example, to specify that data should be replicated to two other nodes, and the response should be returned once confirmation is received from one other node, you can use the following header: + +```http +PUT /MyTable/3 +X-Replicate-To: 2;confirm=1 + +... +``` + +You can also explicitly specify destination nodes by providing a comma-separated list of node hostnames. For example, to specify that data should be replicated to nodes `node1` and `node2`, you can use the following header: + +```http +PUT /MyTable/3 +X-Replicate-To: node1,node2 +``` + +(This can also be used with the `confirm` parameter.) + +## Replication Control with Operations + +Likewise, you can specify replicateTo and confirm parameters in the operation object when using the Harper API. For example, to specify that data should be replicated to two other nodes, and the response should be returned once confirmation is received from one other node, you can use the following operation object: + +```json +{ + "operation": "update", + "schema": "dev", + "table": "MyTable", + "hashValues": [3], + "record": { + "name": "John Doe" + }, + "replicateTo": 2, + "replicatedConfirmation": 1 +} +``` + +or you can specify nodes: + +```json +..., + "replicateTo": ["node-1", "node-2"] +... +``` + +## Programmatic Replication Control + +Additionally, you can specify `replicateTo` and `replicatedConfirmation` parameters programmatically in the context of a resource. For example, you can define a put method: + +```javascript +class MyTable extends tables.MyTable { + put(record) { + const context = this.getContext(); + context.replicateTo = 2; / or an array of node names + context.replicatedConfirmation = 1; + return super.put(record); + } +} +``` + +## Configuration for Static Sharding + +Alternatively, you can configure static sharding, where each node is assigned to a specific shard, and each record is replicated to the nodes in that shard based on the primary key. The `shard` is identified by a number. To configure the shard for each node, you can specify the shard number in the `replication`'s `shard` in the configuration: + +```yaml +replication: + shard: 1 +``` + +Alternatively, you can configure the `shard` under the `replication` `routes`. This allows you to assign a specific shard id based on the routing configuration. + +```yaml +replication: + routes: + - hostname: node1 + shard: 1 + - hostname: node2 + shard: 2 +``` + +Or you can specify a `shard` number by including that property in an `add_node` operation or `set_node` operation, to dynamically assign a node to a shard. + +You can then specify shard number in the `setResidency` or `setResidencyById` functions below. + +## Custom Sharding + +You can also define a custom sharding strategy by specifying a function to compute the "residency" or location of where records should be stored and reside. To do this we use the `setResidency` method, providing a function that will determine the residency of each record. The function you provide will be called with the record entry, and should return an array of nodes that the record should be replicated to (using their hostname). For example, to shard records based on the value of the `id` field, you can use the following code: + +```javascript +MyTable.setResidency((record) => { + return record.id % 2 === 0 ? ['node1'] : ['node2']; +}); +``` + +With this approach, the record metadata, which includes the residency information, and any indexed properties, will be replicated to all nodes, but the full record will only be replicated to the nodes specified by the residency function. + +The `setResidency` function can alternately return a shard number, which will replicate the data to all the nodes in that shard: + +```javascript +MyTable.setResidency((record) => { + return record.id % 2 === 0 ? 1 : 2; +}); +``` + +### Custom Sharding By Primary Key + +Alternately you can define a custom sharding strategy based on the primary key alone. This allows records to be retrieved without needing access to the record data or metadata. With this approach, data will only be replicated to the nodes specified by the residency function (the record metadata doesn't need to replicated to all nodes). To do this, you can use the `setResidencyById` method, providing a function that will determine the residency or shard of each record based on the primary key. The function you provide will be called with the primary key, and should return a `shard` number or an array of nodes that the record should be replicated to (using their hostname). For example, to shard records based on the value of the primary key, you can use the following code: + +```javascript +MyTable.setResidencyById((id) => { + return id % 2 === 0 ? 1 : 2; / return shard number +}); +``` + +or + +```javascript +MyTable.setResidencyById((id) => { + return id % 2 === 0 ? ['node1'] : ['node2']; / return array of node hostnames +}); +``` + +### Disabling Cross-Node Access + +Normally sharding allows data to be stored in specific nodes, but still allows access to the data from any node. However, you can also disable cross-node access so that data is only returned if is stored on the node where it is accessed. To do this, you can set the `replicateFrom` property on the context of operation to `false`: + +```json +{ + "operation": "search_by_id", + "table": "MyTable", + "ids": [3], + "replicateFrom": false +} +``` + +Or use a header with the REST API: + +```http +GET /MyTable/3 +X-Replicate-From: none +``` diff --git a/site/versioned_docs/version-4.6/developers/rest.md b/site/versioned_docs/version-4.6/developers/rest.md new file mode 100644 index 00000000..2ebed3f4 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/rest.md @@ -0,0 +1,404 @@ +--- +title: REST +--- + +# REST + +## REST + +Harper provides a powerful, efficient, and standard-compliant HTTP REST interface for interacting with tables and other resources. The REST interface is the recommended interface for data access, querying, and manipulation (for HTTP interactions), providing the best performance and HTTP interoperability with different clients. + +Resources, including tables, can be configured as RESTful endpoints. Make sure you review the [application introduction](./applications/) and [defining schemas](./applications/defining-schemas) to properly define your schemas and select which tables are exported and available through REST interface, as tables are not exported by default. The name of the [exported](./applications/defining-schemas#export) resource defines the basis of the endpoint path available at the application HTTP server port [configured here](../deployments/configuration#http) (the default being `9926`). From there, a record id or query can be appended. Following uniform interface principles, HTTP methods define different actions with resources. For each method, this describes the default action. + +The default path structure provides access to resources at several levels: + +- `/my-resource` - The root path of a resource usually has a description of the resource (like a describe operation for a table). +- `/my-resource/` - The trailing slash in a path indicates it is a collection of the records. The root collection for a table represents all the records in a table, and usually you will append query parameters to query and search for more specific records. +- `/my-resource/record-id` - This resource locator represents a specific record, referenced by its id. This is typically how you can retrieve, update, and delete individual records. +- `/my-resource/record-id/` - Again, a trailing slash indicates a collection; here it is the collection of the records that begin with the specified id prefix. +- `/my-resource/record-id/with/multiple/parts` - A record id can consist of multiple path segments. + +### GET + +These can be used to retrieve individual records or perform searches. This is handled by the Resource method `get()` (and can be overridden). + +#### `GET /my-resource/` + +This can be used to retrieve a record by its primary key. The response will include the record as the body. + +**Caching/Conditional Requests** + +A `GET` response for a record will include an encoded version, a timestamp of the last modification, of this record in the `ETag` request headers (or any accessed record when used in a custom get method). On subsequent requests, a client (that has a cached copy) may include an `If-None-Match` request header with this tag. If the record has not been updated since this date, the response will have a 304 status and no body. This facilitates significant performance gains since the response data doesn't need to be serialized and transferred over the network. + +#### `GET /my-resource/?property=value` + +This can be used to search for records by the specified property name and value. See the querying section for more information. + +#### `GET /my-resource/.property` + +This can be used to retrieve the specified property of the specified record. Note that this will only work for properties that are declared in the schema. + +### PUT + +This can be used to create or update a record with the provided object/data (similar to an "upsert") with a specified key. This is handled by the Resource method `put(record)`. + +#### `PUT /my-resource/` + +This will create or update the record with the URL path that maps to the record's primary key. The record will be replaced with the contents of the data in the request body. The new record will exactly match the data that was sent (this will remove any properties that were present in the previous record and not included in the body). Future GETs will return the exact data that was provided by PUT (what you PUT is what you GET). For example: + +```http +PUT /MyTable/123 +Content-Type: application/json + +{ "name": "some data" } +``` + +This will create or replace the record with a primary key of "123" with the object defined by the JSON in the body. This is handled by the Resource method `put()`. + +### DELETE + +This can be used to delete a record or records. + +### `DELETE /my-resource/` + +This will delete a record with the given primary key. This is handled by the Resource's `delete` method. For example: + +```http +DELETE /MyTable/123 +``` + +This will delete the record with the primary key of "123". + +### `DELETE /my-resource/?property=value` + +This will delete all the records that match the provided query. + +### POST + +Generally the POST method can be used for custom actions since POST has the broadest semantics. For tables that are expost\ed as endpoints, this also can be used to create new records. + +#### `POST /my-resource/` + +This is handled by the Resource method `post(data)`, which is a good method to extend to make various other types of modifications. Also, with a table you can create a new record without specifying a primary key, for example: + +````http +````http +POST /MyTable/ +Content-Type: application/json + +`{ "name": "some data" }` +```` + +This will create a new record, auto-assigning a primary key, which will be returned in the `Location` header. + +### Querying through URL query parameters + +URL query parameters provide a powerful language for specifying database queries in Harper. This can be used to search by a single attribute name and value, to find all records which provide value for the given property/attribute. It is important to note that this attribute must be configured to be indexed to search on it. For example: + +````http +GET /my-resource/?property=value +``` + +We can specify multiple properties that must match: + +```http +GET /my-resource/?property=value&property2=another-value +``` + +Note that only one of the attributes needs to be indexed for this query to execute. + +We can also specify different comparators such as less than and greater than queries using [FIQL](https:/datatracker.ietf.org/doc/html/draft-nottingham-atompub-fiql-00) syntax. If we want to specify records with an `age` value greater than 20: + +```http +GET /my-resource/?age=gt=20 +``` + +Or less than or equal to 20: + +```http +GET /my-resource/?age=le=20 +``` + +The comparison operators include standard FIQL operators, `lt` (less than), `le` (less than or equal), `gt` (greater than), `ge` (greater than or equal), and `ne` (not equal). These comparison operators can also be combined with other query parameters with `&`. For example, if we wanted products with a category of software and price between 100 and 200, we could write: + +```http +GET /Product/?category=software&price=gt=100&price=lt=200 +``` + +Comparison operators can also be used on Date fields, however, we have to ensure that the date format is properly escaped. For example, if we are looking for a listing date greater than `2017-03-08T09:00:00.000Z` we must escape the colons as `%3A`: + +``` +GET /Product/?listDate=gt=2017-03-08T09%3A30%3A00.000Z +``` + +You can also search for attributes that start with a specific string, by using the == comparator and appending a `*` to the attribute value: + +```http +GET /Product/?name==Keyboard* +``` + +**Chained Conditions** + +You can also specify that a range condition must be met for a single attribute value by chaining conditions. This is done by omitting the name in the name-value pair. For example, to find products with a price between 100 and 200, you could write: + +```http +GET /Product/?price=gt=100<=200 +``` + +Chaining can be used to combined `gt` or `ge` with `lt` or `le` to specify a range of values. Currently, no other types of chaining are supported. + +Note that some HTTP clients may be overly aggressive in encoding query parameters, and you may need to disable extra encoding of query parameters, to ensure operators are passed through without manipulation. + +Here is a full list of the supported FIQL-style operators/comparators: + +- `==`: equal +- `=lt=`: less than +- `=le=`: less than or equal +- `=gt=`: greater than +- `=ge=`: greater than or equal +- `=ne=`, !=: not equal +- `=ct=`: contains the value (for strings) +- `=sw=`, `==*`: starts with the value (for strings) +- `=ew=`: ends with the value (for strings) +- `=`, `===`: strict equality (no type conversion) +- `!==`: strict inequality (no type conversion) + +#### Unions + +Conditions can also be applied with `OR` logic, returning the union of records that match either condition. This can be specified by using the `|` operator instead of `&`. For example, to return any product a rating of `5` _or_ a `featured` attribute that is `true`, we could write: + +```http +GET /Product/?rating=5|featured=true +``` + +#### Grouping of Operators + +Multiple conditions with different operators can be combined with grouping of conditions to indicate the order of operation. Grouping conditions can be done with parenthesis, with standard grouping conventions as used in query and mathematical expressions. For example, a query to find products with a rating of 5 OR a price between 100 and 200 could be written: + +```http +GET /Product/?rating=5|(price=gt=100&price=lt=200) +``` + +Grouping conditions can also be done with square brackets, which function the same as parenthesis for grouping conditions. The advantage of using square brackets is that you can include user provided values that might have parenthesis in them, and use standard URI component encoding functionality, which will safely escape/encode square brackets, but not parenthesis. For example, if we were constructing a query for products with a rating of a 5 and matching one of a set of user provided tags, a query could be built like: + +```http +GET /Product/?rating=5&[tag=fast|tag=scalable|tag=efficient] +``` + +And the tags could be safely generated from user inputs in a tag array like: + +```javascript +let url = `/Product/?rating=5[${tags.map(encodeURIComponent).join('|')}]`; +``` + +More complex queries can be created by further nesting groups: + +```http +GET /Product/?price=lt=100|[rating=5&[tag=fast|tag=scalable|tag=efficient]&inStock=true] +``` + +### Query Calls + +Harper has several special query functions that use "call" syntax. These can be included in the query string as its own query entry (separated from other query conditions with an `&`). These include: + +#### `select(properties)` + +This function allows you to specify which properties should be included in the responses. This takes several forms: + +- `?select(property)`: This will return the values of the specified property directly in the response (will not be put in an object). +- `?select(property1,property2)`: This returns the records as objects, but limited to the specified properties. +- `?select([property1,property2,...])`: This returns the records as arrays of the property values in the specified properties. +- `?select(property1,)`: This can be used to specify that objects should be returned with the single specified property. +- `?select(property{subProperty1,subProperty2{subSubProperty,..}},...)`: This can be used to specify which sub-properties should be included in nested objects and joined/references records. + +To get a list of product names with a category of software: + +```http +GET /Product/?category=software&select(name) +``` + +#### `limit(start,end)` or `limit(end)` + +This function specifies a limit on the number of records returned, optionally providing a starting offset. + +For example, to find the first twenty records with a `rating` greater than 3, `inStock` equal to true, only returning the `rating` and `name` properties, you could use: + +```http +GET /Product/?rating=gt=3&inStock=true&select(rating,name)&limit(20) +``` + +#### `sort(property)`, `sort(+property,-property,...)` + +This function allows you to indicate the sort order for the returned results. The argument for `sort()` is one or more properties that should be used to sort. If the property is prefixed with '+' or no prefix, the sort will be performed in ascending order by the indicated attribute/property. If the property is prefixed with '-', it will be sorted in descending order. If the multiple properties are specified, the sort will be performed on the first property, and for records with the same value for that property, the next property will be used to break the tie and sort results. This tie breaking will continue through any provided properties. + +For example, to sort by product name (in ascending order): + +```http +GET /Product?rating=gt=3&sort(+name) +``` + +To sort by rating in ascending order, then by price in descending order for products with the same rating: + +```http +GET /Product?sort(+rating,-price) +``` + +## Relationships + +Harper supports relationships in its data models, allowing for tables to define a relationship with data from other tables (or even itself) through foreign keys. These relationships can be one-to-many, many-to-one, or many-to-many (and even with ordered relationships). These relationships are defined in the schema, and then can easily be queried through chained attributes that act as "join" queries, allowing related attributes to referenced in conditions and selected for returned results. + +### Chained Attributes and Joins + +To support relationships and hierarchical data structures, in addition to querying on top-level attributes, you can also query on chained attributes. Most importantly, this provides Harper's "join" functionality, allowing related tables to be queried and joined in the results. Chained properties are specified by using dot syntax. In order to effectively leverage join functionality, you need to define a relationship in your schema: + +```graphql +type Product @table @export { + id: ID @primaryKey + name: String + brandId: ID @indexed + brand: Brand @relationship(from: "brandId") +} +type Brand @table @export { + id: ID @primaryKey + name: String + products: [Product] @relationship(to: "brandId") +} +``` + +And then you could query a product by brand name: + +```http +GET /Product/?brand.name=Microsoft +``` + +This will query for products for which the `brandId` references a `Brand` record with a `name` of `"Microsoft"`. + +The `brand` attribute in `Product` is a "computed" attribute from the foreign key (`brandId`), for the many-to-one relationship to the `Brand`. In the schema above, we also defined the reverse one-to-many relationship from a `Brand` to a `Product`, and we could likewise query that: + +```http +GET /Brand/?products.name=Keyboard +``` + +This would return any `Brand` with at least one product with a name `"Keyboard"`. Note, that both of these queries are effectively acting as an "INNER JOIN". + +#### Chained/Nested Select + +Computed relationship attributes are not included by default in query results. However, we can include them by specifying them in a select: + +```http +GET /Product/?brand.name=Microsoft&select(name,brand) +``` + +We can also do a "nested" select and specify which sub-attributes to include. For example, if we only wanted to include the name property from the brand, we could do so: + +```http +GET /Product/?brand.name=Microsoft&select(name,brand{name}) +``` + +Or to specify multiple sub-attributes, we can comma delimit them. Note that selects can "join" to another table without any constraint/filter on the related/joined table: + +```http +GET /Product/?name=Keyboard&select(name,brand{name,id}) +``` + +When selecting properties from a related table without any constraints on the related table, this effectively acts like a "LEFT JOIN" and will omit the `brand` property if the brandId is `null` or references a non-existent brand. + +#### Many-to-many Relationships (Array of Foreign Keys) + +Many-to-many relationships are also supported, and can easily be created using an array of foreign key values, without requiring the traditional use of a junction table. This can be done by simply creating a relationship on an array-typed property that references a local array of foreign keys. For example, we could create a relationship to the resellers of a product (each product can have multiple resellers, each ) + +```graphql +type Product @table @export { + id: ID @primaryKey + name: String + resellerIds: [ID] @indexed + resellers: [Reseller] @relationship(from: "resellerId") +} +type Reseller @table { + id: ID @primaryKey + name: String + ... +} +``` + +The product record can then hold an array of the reseller ids. When the `reseller` property is accessed (either through code or through select, conditions), the array of ids is resolved to an array of reseller records. We can also query through the resellers relationships like with the other relationships. For example, to query the products that are available through the "Cool Shop": + +```http +GET /Product/?resellers.name=Cool Shop&select(id,name,resellers{name,id}) +``` + +One of the benefits of using an array of foreign key values is that the this can be manipulated using standard array methods (in JavaScript), and the array can dictate an order to keys and therefore to the resulting records. For example, you may wish to define a specific order to the resellers and how they are listed (which comes first, last): + +```http +PUT /Product/123 +Content-Type: application/json + +{ "id": "123", "resellerIds": ["first-reseller-id", "second-reseller-id", "last-reseller-id"], +...} +``` + +#### Type Conversion + +Queries parameters are simply text, so there are several features for converting parameter values to properly typed values for performing correct searches. For the FIQL comparators, which includes `==`, `!=`, `=gt=`, `=lt=`, `=ge=`, `=gt=`, the parser will perform type conversion, according to the following rules: + +- `name==null`: Will convert the value to `null` for searching. +- `name==123`: Will convert the value to a number _if_ the attribute is untyped (there is no type specified in a GraphQL schema, or the type is specified to be `Any`). +- `name==true`: Will convert the value to a boolean _if_ the attribute is untyped (there is no type specified in a GraphQL schema, or the type is specified to be `Any`). +- `name==number:123`: Will explicitly convert the value after "number:" to a number. +- `name==boolean:true`: Will explicitly convert the value after "boolean:" to a boolean. +- `name==string:some%20text`: Will explicitly keep the value after "string:" as a string (and perform URL component decoding) +- `name==date:2024-01-05T20%3A07%3A27.955Z`: Will explicitly convert the value after "date:" to a Date object. + +If the attribute specifies a type (like `Float`) in the schema definition, the value will always be converted to the specified type before searching. + +For "strict" operators, which includes `=`, `===`, and `!==`, no automatic type conversion will be applied, the value will be decoded as string with URL component decoding, and have type conversion applied if the attribute specifies a type, in which case the attribute type will specify the type conversion. + +#### Content Types and Negotiation + +HTTP defines a couple of headers for indicating the (preferred) content type of the request and response. The `Content-Type` request header can be used to specify the content type of the request body (for PUT, PATCH, and POST). The `Accept` request header indicates the preferred content type of the response. For general records with object structures, Harper supports the following content types: `application/json` - Common format, easy to read, with great tooling support. `application/cbor` - Recommended binary format for optimal encoding efficiency and performance. `application/x-msgpack` - This is also an efficient format, but CBOR is preferable, as it has better streaming capabilities and faster time-to-first-byte. `text/csv` - CSV, lacks explicit typing, not well suited for heterogeneous data structures, but good for moving data to and from a spreadsheet. + +CBOR is generally the most efficient and powerful encoding format, with the best performance, most compact encoding, and most expansive ability to encode different data types like Dates, Maps, and Sets. MessagePack is very similar and tends to have broader adoption. However, JSON can be easier to work with and may have better tooling. Also, if you are using compression for data transfer (gzip or brotli), JSON will often result in more compact compressed data due to character frequencies that better align with Huffman coding, making JSON a good choice for web applications that do not require specific data types beyond the standard JSON types. + +Requesting a specific content type can also be done in a URL by suffixing the path with extension for the content type. If you want to retrieve a record in CSV format, you could request: + +```http +GET /product/some-id.csv +``` + +Or you could request a query response in MessagePack: + +```http +GET /product/.msgpack?category=software +``` + +However, generally it is not recommended that you use extensions in paths and it is best practice to use the `Accept` header to specify acceptable content types. + +#### Specific Content Objects + +You can specify other content types, and the data will be stored as a record or object that holds the type and contents of the data. For example, if you do: + +``` +PUT /my-resource/33 +Content-Type: text/calendar + +BEGIN:VCALENDAR +VERSION:2.0 +... +``` + +This would store a record equivalent to JSON: + +``` +{ "contentType": "text/calendar", data: "BEGIN:VCALENDAR\nVERSION:2.0\n... +``` + +Retrieving a record with `contentType` and `data` properties will likewise return a response with the specified `Content-Type` and body. If the `Content-Type` is not of the `text` family, the data will be treated as binary data (a Node.js `Buffer`). + +You can also use `application/octet-stream` to indicate that the request body should be preserved in binary form. This also useful for uploading to a specific property: + +``` +PUT /my-resource/33/image +Content-Type: image/gif + +...image data... +``` diff --git a/site/versioned_docs/version-4.6/developers/security/basic-auth.md b/site/versioned_docs/version-4.6/developers/security/basic-auth.md new file mode 100644 index 00000000..6e3dac3a --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/security/basic-auth.md @@ -0,0 +1,57 @@ +--- +title: Basic Authentication +--- + +# Basic Authentication + +Harper uses Basic Auth and JSON Web Tokens (JWTs) to secure our HTTP requests. In the context of an HTTP transaction, **basic access authentication** is a method for an HTTP user agent to provide a username and password when making a request. + +** _**You do not need to log in separately. Basic Auth is added to each HTTP request like create_database, create_table, insert etc… via headers.**_ ** + +A header is added to each HTTP request. The header key is **“Authorization”** the header value is **“Basic <<your username and password buffer token>>”** + +## Authentication in Harper Studio + +In the below code sample, you can see where we add the authorization header to the request. This needs to be added for each and every HTTP request for Harper. + +_Note: This function uses btoa. Learn about_ [_btoa here_](https:/developer.mozilla.org/en-US/docs/Web/API/btoa)_._ + +```javascript +function callHarperDB(call_object, operation, callback) { + const options = { + method: 'POST', + hostname: call_object.endpoint_url, + port: call_object.endpoint_port, + path: '/', + headers: { + 'content-type': 'application/json', + 'authorization': 'Basic ' + btoa(call_object.username + ':' + call_object.password), + 'cache-control': 'no-cache', + }, + }; + + const http_req = http.request(options, function (hdb_res) { + let chunks = []; + + hdb_res.on('data', function (chunk) { + chunks.push(chunk); + }); + + hdb_res.on('end', function () { + const body = Buffer.concat(chunks); + if (isJson(body)) { + return callback(null, JSON.parse(body)); + } else { + return callback(body, null); + } + }); + }); + + http_req.on('error', function (chunk) { + return callback('Failed to connect', null); + }); + + http_req.write(JSON.stringify(operation)); + http_req.end(); +} +``` diff --git a/site/versioned_docs/version-4.6/developers/security/certificate-management.md b/site/versioned_docs/version-4.6/developers/security/certificate-management.md new file mode 100644 index 00000000..fdc8cc22 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/security/certificate-management.md @@ -0,0 +1,74 @@ +--- +title: Certificate Management +--- + +# Certificate Management + +This document is information on managing certificates for Harper external facing APIs. For information on certificate management for clustering see [clustering certificate management](../clustering/certificate-management). + +## Development + +An out of the box install of Harper does not have HTTPS enabled (see [configuration](../../deployments/configuration#http) for relevant configuration file settings.) This is great for local development. If you are developing using a remote server and your requests are traversing the Internet, we recommend that you enable HTTPS. + +To enable HTTPS, set `http.securePort` in `harperdb-config.yaml` to the port you wish to use for HTTPS connections and restart Harper. + +By default Harper will generate certificates and place them at `/keys/`. These certificates will not have a valid Common Name (CN) for your Harper node, so you will be able to use HTTPS, but your HTTPS client must be configured to accept the invalid certificate. + +## Production + +For production deployments, in addition to using HTTPS, we recommend using your own certificate authority (CA) or a public CA such as Let's Encrypt, to generate certificates with CNs that match the Fully Qualified Domain Name (FQDN) of your Harper node. + +We have a few recommended options for enabling HTTPS in a production setting. + +### Option: Enable Harper HTTPS and Replace Certificates + +To enable HTTPS, set `http.securePort` in `harperdb-config.yaml` to the port you wish to use for HTTPS connections and restart Harper. + +To replace the certificates, either replace the contents of the existing certificate files at `/keys/`, or update the Harper configuration with the path of your new certificate files, and then restart Harper. + +```yaml +tls: + certificate: ~/hdb/keys/certificate.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +`operationsApi.tls` configuration is optional. If it is not set Harper will default to the values in the `tls` section. + +```yaml +operationsApi: + tls: + certificate: ~/hdb/keys/certificate.pem + privateKey: ~/hdb/keys/privateKey.pem +``` + +### mTLS + +Mutual TLS (mTLS) is a security protocol that requires both the client and the server to present certificates to each other. Requiring a client certificate can be useful for authenticating clients and ensuring that only authorized clients can access your Harper instance. This can be enabled by setting the `http.mtls` configuration in `harperdb-config.yaml` to `true` and providing a certificate authority in the TLS section: + +```yaml + +http: + mtls: true + ... +tls: + certificateAuthority: ~/hdb/keys/ca.pem + ... +``` + +### Option: Nginx Reverse Proxy + +Instead of enabling HTTPS for Harper, Nginx can be used as a reverse proxy for Harper. + +Install Nginx, configure Nginx to use certificates issued from your own CA or a public CA, then configure Nginx to listen for HTTPS requests and forward to Harper as HTTP requests. + +[Certbot](https:/certbot.eff.org/) is a great tool for automatically requesting and renewing Let’s Encrypt certificates used by Nginx. + +### Option: External Reverse Proxy + +Instead of enabling HTTPS for Harper, a number of different external services can be used as a reverse proxy for Harper. These services typically have integrated certificate management. Configure the service to listen for HTTPS requests and forward (over a private network) to Harper as HTTP requests. + +Examples of these types of services include an AWS Application Load Balancer or a GCP external HTTP(S) load balancer. + +### Additional Considerations + +It is possible to use different certificates for the Operations API and the Custom Functions API. In scenarios where only your Custom Functions endpoints need to be exposed to the Internet and the Operations API is reserved for Harper administration, you may want to use a private CA to issue certificates for the Operations API and a public CA for the Custom Functions API certificates. diff --git a/site/versioned_docs/version-4.6/developers/security/configuration.md b/site/versioned_docs/version-4.6/developers/security/configuration.md new file mode 100644 index 00000000..de30868c --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/security/configuration.md @@ -0,0 +1,40 @@ +--- +title: Configuration +--- + +# Configuration + +Harper was set up to require very minimal configuration to work out of the box. There are, however, some best practices we encourage for anyone building an app with Harper. + +## CORS + +Harper allows for managing [cross-origin HTTP requests](https:/developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS). By default, Harper enables CORS for all domains if you need to disable CORS completely or set up an access list of domains you can do the following: + +1. Open the harperdb-config.yaml file, which can be found in \, the location you specified during install. +1. In harperdb-config.yaml there should be 2 entries under `operationsApi.network`: cors and corsAccessList. + - `cors` + 1. To turn off, change to: `cors: false` + 1. To turn on, change to: `cors: true` + - `corsAccessList` + 1. The `corsAccessList` will only be recognized by the system when `cors` is `true` + 1. To create an access list you set `corsAccessList` to a comma-separated list of domains. + + i.e. `corsAccessList` is `http:/harpersystems.dev,http:/products.harpersystems.dev` + + 1. To clear out the access list and allow all domains: `corsAccessList` is `[null]` + +## SSL + +Harper provides the option to use an HTTP or HTTPS and HTTP/2 interface. The default port for the server is 9925. + +These default ports can be changed by updating the `operationsApi.network.port` value in `/harperdb-config.yaml` + +By default, HTTPS is turned off and HTTP is turned on. It is recommended that you never directly expose Harper's HTTP interface through a publicly available port. HTTP is intended for local or private network use. + +You can toggle HTTPS and HTTP in the settings file. By setting `operationsApi.network.https` to true/false. When `https` is set to `false`, the server will use HTTP (version 1.1). Enabling HTTPS will enable both HTTPS/1.1 and HTTPS/2. + +Harper automatically generates a certificate (certificate.pem), a certificate authority (ca.pem) and a private key file (privateKey.pem) which live at `/keys/`. + +You can replace these with your own certificates and key. + +**Changes to these settings require a restart. Use operation `harperdb restart` from Harper Operations API.** diff --git a/site/versioned_docs/version-4.6/developers/security/index.md b/site/versioned_docs/version-4.6/developers/security/index.md new file mode 100644 index 00000000..6539f007 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/security/index.md @@ -0,0 +1,13 @@ +--- +title: Security +--- + +# Security + +Harper uses role-based, attribute-level security to ensure that users can only gain access to the data they’re supposed to be able to access. Our granular permissions allow for unparalleled flexibility and control, and can actually lower the total cost of ownership compared to other database solutions, since you no longer have to replicate subsets of your data to isolate use cases. + +- [JWT Authentication](./jwt-auth) +- [Basic Authentication](./basic-auth) +- [mTLS Authentication](./mtls-auth) +- [Configuration](./configuration) +- [Users and Roles](./users-and-roles) diff --git a/site/versioned_docs/version-4.6/developers/security/jwt-auth.md b/site/versioned_docs/version-4.6/developers/security/jwt-auth.md new file mode 100644 index 00000000..570de46d --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/security/jwt-auth.md @@ -0,0 +1,96 @@ +--- +title: JWT Authentication +--- + +# JWT Authentication + +Harper uses token based authentication with JSON Web Tokens, JWTs. + +This consists of two primary operations `create_authentication_tokens` and `refresh_operation_token`. These generate two types of tokens, as follows: + +- The `operation_token` which is used to authenticate all Harper operations in the Bearer Token Authorization Header. The default expiry is one day. +- The `refresh_token` which is used to generate a new `operation_token` upon expiry. This token is used in the Bearer Token Authorization Header for the `refresh_operation_token` operation only. The default expiry is thirty days. + +The `create_authentication_tokens` operation can be used at any time to refresh both tokens in the event that both have expired or been lost. + +## Create Authentication Tokens + +Users must initially create tokens using their Harper credentials. The following POST body is sent to Harper. No headers are required for this POST operation. + +```json +{ + "operation": "create_authentication_tokens", + "username": "username", + "password": "password" +} +``` + +A full cURL example can be seen here: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "operation": "create_authentication_tokens", + "username": "username", + "password": "password" +}' +``` + +An example expected return object is: + +```json +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDUwNjQ2MDAsInN1YiI6Im9wZXJhdGlvbiJ9.MpQA-9CMjA-mn-7mHyUXSuSC_-kqMqJXp_NDiKLFtbtMRbodCuY3DzH401rvy_4vb0yCELf0B5EapLVY1545sv80nxSl6FoZFxQaDWYXycoia6zHpiveR8hKlmA6_XTWHJbY2FM1HAFrdtt3yUTiF-ylkdNbPG7u7fRjTmHfsZ78gd2MNWIDkHoqWuFxIyqk8XydQpsjULf2Uacirt9FmHfkMZ-Jr_rRpcIEW0FZyLInbm6uxLfseFt87wA0TbZ0ofImjAuaW_3mYs-3H48CxP152UJ0jByPb0kHsk1QKP7YHWx1-Wce9NgNADfG5rfgMHANL85zvkv8sJmIGZIoSpMuU3CIqD2rgYnMY-L5dQN1fgfROrPMuAtlYCRK7r-IpjvMDQtRmCiNG45nGsM4DTzsa5GyDrkGssd5OBhl9gr9z9Bb5HQVYhSKIOiy72dK5dQNBklD4eGLMmo-u322zBITmE0lKaBcwYGJw2mmkYcrjDOmsDseU6Bf_zVUd9WF3FqwNkhg4D7nrfNSC_flalkxPHckU5EC_79cqoUIX2ogufBW5XgYbU4WfLloKcIpb51YTZlZfwBHlHPSyaq_guaXFaeCUXKq39_i1n0HRF_mRaxNru0cNDFT9Fm3eD7V8axFijSVAMDyQs_JR7SY483YDKUfN4l-vw-EVynImr4", + "refresh_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDc1NzAyMDAsInN1YiI6InJlZnJlc2gifQ.acaCsk-CJWIMLGDZdGnsthyZsJfQ8ihXLyE8mTji8PgGkpbwhs7e1O0uitMgP_pGjHq2tey1BHSwoeCL49b18WyMIB10hK-q2BXGKQkykltjTrQbg7VsdFi0h57mGfO0IqAwYd55_hzHZNnyJMh4b0iPQFDwU7iTD7x9doHhZAvzElpkWbc_NKVw5_Mw3znjntSzbuPN105zlp4Niurin-_5BnukwvoJWLEJ-ZlF6hE4wKhaMB1pWTJjMvJQJE8khTTvlUN8tGxmzoaDYoe1aCGNxmDEQnx8Y5gKzVd89sylhqi54d2nQrJ2-ElfEDsMoXpR01Ps6fNDFtLTuPTp7ixj8LvgL2nCjAg996Ga3PtdvXJAZPDYCqqvaBkZZcsiqOgqLV0vGo3VVlfrcgJXQImMYRr_Inu0FCe47A93IAWuQTs-KplM1KdGJsHSnNBV6oe6QEkROJT5qZME-8xhvBYvOXqp9Znwg39bmiBCMxk26Ce66_vw06MNgoa3D5AlXPWemfdVKPZDnj_aLVjZSs0gAfFElcVn7l9yjWJOaT2Muk26U8bJl-2BEq_DSclqKHODuYM5kkPKIdE4NFrsqsDYuGxcA25rlNETFyl0q-UXj1aoz_joy5Hdnr4mFELmjnoo4jYQuakufP9xeGPsj1skaodKl0mmoGcCD6v1F60" +} +``` + +## Using JWT Authentication Tokens + +The `operation_token` value is used to authenticate all operations in place of our standard Basic auth. In order to pass the token you will need to create an Bearer Token Authorization Header like the following request: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDUwNjQ2MDAsInN1YiI6Im9wZXJhdGlvbiJ9.MpQA-9CMjA-mn-7mHyUXSuSC_-kqMqJXp_NDiKLFtbtMRbodCuY3DzH401rvy_4vb0yCELf0B5EapLVY1545sv80nxSl6FoZFxQaDWYXycoia6zHpiveR8hKlmA6_XTWHJbY2FM1HAFrdtt3yUTiF-ylkdNbPG7u7fRjTmHfsZ78gd2MNWIDkHoqWuFxIyqk8XydQpsjULf2Uacirt9FmHfkMZ-Jr_rRpcIEW0FZyLInbm6uxLfseFt87wA0TbZ0ofImjAuaW_3mYs-3H48CxP152UJ0jByPb0kHsk1QKP7YHWx1-Wce9NgNADfG5rfgMHANL85zvkv8sJmIGZIoSpMuU3CIqD2rgYnMY-L5dQN1fgfROrPMuAtlYCRK7r-IpjvMDQtRmCiNG45nGsM4DTzsa5GyDrkGssd5OBhl9gr9z9Bb5HQVYhSKIOiy72dK5dQNBklD4eGLMmo-u322zBITmE0lKaBcwYGJw2mmkYcrjDOmsDseU6Bf_zVUd9WF3FqwNkhg4D7nrfNSC_flalkxPHckU5EC_79cqoUIX2ogufBW5XgYbU4WfLloKcIpb51YTZlZfwBHlHPSyaq_guaXFaeCUXKq39_i1n0HRF_mRaxNru0cNDFT9Fm3eD7V8axFijSVAMDyQs_JR7SY483YDKUfN4l-vw-EVynImr4' \ +--data-raw '{ + "operation":"search_by_hash", + "schema":"dev", + "table":"dog", + "hash_values":[1], + "get_attributes": ["*"] +}' +``` + +## Token Expiration + +`operation_token` expires at a set interval. Once it expires it will no longer be accepted by Harper. This duration defaults to one day, and is configurable in [harperdb-config.yaml](../../deployments/configuration). To generate a new `operation_token`, the `refresh_operation_token` operation is used, passing the `refresh_token` in the Bearer Token Authorization Header. A full cURL example can be seen here: + +```bash +curl --location --request POST 'http:/localhost:9925' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6InVzZXJuYW1lIiwiaWF0IjoxNjA0OTc4MjAwLCJleHAiOjE2MDc1NzAyMDAsInN1YiI6InJlZnJlc2gifQ.acaCsk-CJWIMLGDZdGnsthyZsJfQ8ihXLyE8mTji8PgGkpbwhs7e1O0uitMgP_pGjHq2tey1BHSwoeCL49b18WyMIB10hK-q2BXGKQkykltjTrQbg7VsdFi0h57mGfO0IqAwYd55_hzHZNnyJMh4b0iPQFDwU7iTD7x9doHhZAvzElpkWbc_NKVw5_Mw3znjntSzbuPN105zlp4Niurin-_5BnukwvoJWLEJ-ZlF6hE4wKhaMB1pWTJjMvJQJE8khTTvlUN8tGxmzoaDYoe1aCGNxmDEQnx8Y5gKzVd89sylhqi54d2nQrJ2-ElfEDsMoXpR01Ps6fNDFtLTuPTp7ixj8LvgL2nCjAg996Ga3PtdvXJAZPDYCqqvaBkZZcsiqOgqLV0vGo3VVlfrcgJXQImMYRr_Inu0FCe47A93IAWuQTs-KplM1KdGJsHSnNBV6oe6QEkROJT5qZME-8xhvBYvOXqp9Znwg39bmiBCMxk26Ce66_vw06MNgoa3D5AlXPWemfdVKPZDnj_aLVjZSs0gAfFElcVn7l9yjWJOaT2Muk26U8bJl-2BEq_DSclqKHODuYM5kkPKIdE4NFrsqsDYuGxcA25rlNETFyl0q-UXj1aoz_joy5Hdnr4mFELmjnoo4jYQuakufP9xeGPsj1skaodKl0mmoGcCD6v1F60' \ +--data-raw '{ + "operation":"refresh_operation_token" +}' +``` + +This will return a new `operation_token`. An example expected return object is: + +```bash +{ + "operation_token": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6eyJfX2NyZWF0ZWR0aW1lX18iOjE2MDQ5NzgxODkxNTEsIl9fdXBkYXRlZHRpbWVfXyI6MTYwNDk3ODE4OTE1MSwiYWN0aXZlIjp0cnVlLCJyb2xlIjp7Il9fY3JlYXRlZHRpbWVfXyI6MTYwNDk0NDE1MTM0NywiX191cGRhdGVkdGltZV9fIjoxNjA0OTQ0MTUxMzQ3LCJpZCI6IjdiNDNlNzM1LTkzYzctNDQzYi05NGY3LWQwMzY3Njg5NDc4YSIsInBlcm1pc3Npb24iOnsic3VwZXJfdXNlciI6dHJ1ZSwic3lzdGVtIjp7InRhYmxlcyI6eyJoZGJfdGFibGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9hdHRyaWJ1dGUiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9zY2hlbWEiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl91c2VyIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfcm9sZSI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2pvYiI6eyJyZWFkIjp0cnVlLCJpbnNlcnQiOmZhbHNlLCJ1cGRhdGUiOmZhbHNlLCJkZWxldGUiOmZhbHNlLCJhdHRyaWJ1dGVfcGVybWlzc2lvbnMiOltdfSwiaGRiX2xpY2Vuc2UiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl9pbmZvIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119LCJoZGJfbm9kZXMiOnsicmVhZCI6dHJ1ZSwiaW5zZXJ0IjpmYWxzZSwidXBkYXRlIjpmYWxzZSwiZGVsZXRlIjpmYWxzZSwiYXR0cmlidXRlX3Blcm1pc3Npb25zIjpbXX0sImhkYl90ZW1wIjp7InJlYWQiOnRydWUsImluc2VydCI6ZmFsc2UsInVwZGF0ZSI6ZmFsc2UsImRlbGV0ZSI6ZmFsc2UsImF0dHJpYnV0ZV9wZXJtaXNzaW9ucyI6W119fX19LCJyb2xlIjoic3VwZXJfdXNlciJ9LCJ1c2VybmFtZSI6InVzZXJuYW1lIn0sImlhdCI6MTYwNDk3ODcxMywiZXhwIjoxNjA1MDY1MTEzLCJzdWIiOiJvcGVyYXRpb24ifQ.qB4FS7fzryCO5epQlFCQe4mQcUEhzXjfsXRFPgauXrGZwSeSr2o2a1tE1xjiI3qjK0r3f2bdi2xpFlDR1thdY-m0mOpHTICNOae4KdKzp7cyzRaOFurQnVYmkWjuV_Ww4PJgr6P3XDgXs5_B2d7ZVBR-BaAimYhVRIIShfpWk-4iN1XDk96TwloCkYx01BuN87o-VOvAnOG-K_EISA9RuEBpSkfUEuvHx8IU4VgfywdbhNMh6WXM0VP7ZzSpshgsS07MGjysGtZHNTVExEvFh14lyfjfqKjDoIJbo2msQwD2FvrTTb0iaQry1-Wwz9QJjVAUtid7tJuP8aBeNqvKyMIXRVnl5viFUr-Gs-Zl_WtyVvKlYWw0_rUn3ucmurK8tTy6iHyJ6XdUf4pYQebpEkIvi2rd__e_Z60V84MPvIYs6F_8CAy78aaYmUg5pihUEehIvGRj1RUZgdfaXElw90-m-M5hMOTI04LrzzVnBu7DcMYg4UC1W-WDrrj4zUq7y8_LczDA-yBC2-bkvWwLVtHLgV5yIEuIx2zAN74RQ4eCy1ffWDrVxYJBau4yiIyCc68dsatwHHH6bMK0uI9ib6Y9lsxCYjh-7MFcbP-4UBhgoDDXN9xoUToDLRqR9FTHqAHrGHp7BCdF5d6TQTVL5fmmg61MrLucOo-LZBXs1NY" +} +``` + +The `refresh_token` also expires at a set interval, but a longer interval. Once it expires it will no longer be accepted by Harper. This duration defaults to thirty days, and is configurable in [harperdb-config.yaml](../../deployments/configuration). To generate a new `operation_token` and a new `refresh_token` the `create_authentication_tokensoperation` is called. + +## Configuration + +Token timeouts are configurable in [harperdb-config.yaml](../../deployments/configuration) with the following parameters: + +- `operationsApi.authentication.operationTokenTimeout`: Defines the length of time until the operation_token expires (default 1d). +- `operationsApi.authentication.refreshTokenTimeout`: Defines the length of time until the refresh_token expires (default 30d). + +A full list of valid values for both parameters can be found [here](https:/github.com/vercel/ms). diff --git a/site/versioned_docs/version-4.6/developers/security/mtls-auth.md b/site/versioned_docs/version-4.6/developers/security/mtls-auth.md new file mode 100644 index 00000000..375ec927 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/security/mtls-auth.md @@ -0,0 +1,7 @@ +--- +title: mTLS Authentication +--- + +# mTLS Authentication + +Harper supports mTLS authentication for incoming connections. When enabled in the [HTTP config settings](../../deployments/configuration#http) the client certificate will be checked against the certificate authority specified with `tls.certificateAuthority`. If the certificate can be properly verified, the connection will authenticate users where the user's id/username is specified by the `CN` (common name) from the client certificate's `subject`, by default. The [HTTP config settings](../../deployments/configuration#http) allow you to determine if mTLS is required for all connections or optional. diff --git a/site/versioned_docs/version-4.6/developers/security/users-and-roles.md b/site/versioned_docs/version-4.6/developers/security/users-and-roles.md new file mode 100644 index 00000000..80835953 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/security/users-and-roles.md @@ -0,0 +1,272 @@ +--- +title: Users & Roles +--- + +# Users & Roles + +Harper utilizes a Role-Based Access Control (RBAC) framework to manage access to Harper instances. A user is assigned a role that determines the user’s permissions to access database resources and run core operations. + +## Roles in Harper + +Role permissions in Harper are broken into two categories – permissions around database manipulation and permissions around database definition. + +**Database Manipulation**: A role defines CRUD (create, read, update, delete) permissions against database resources (i.e. data) in a Harper instance. + +1. At the table-level access, permissions must be explicitly defined when adding or altering a role – _i.e. Harper will assume CRUD access to be FALSE if not explicitly provided in the permissions JSON passed to the `add_role` and/or `alter_role` API operations._ +1. At the attribute-level, permissions for attributes in all tables included in the permissions set will be assigned based on either the specific attribute-level permissions defined in the table’s permission set or, if there are no attribute-level permissions defined, permissions will be based on the table’s CRUD set. + +**Database Definition**: Permissions related to managing databases, tables, roles, users, and other system settings and operations are restricted to the built-in `super_user` role. + +**Built-In Roles** + +There are three built-in roles within Harper. See full breakdown of operations restricted to only super_user roles [here](./users-and-roles#role-based-operation-restrictions). + +- `super_user` - This role provides full access to all operations and methods within a Harper instance, this can be considered the admin role. + - This role provides full access to all Database Definition operations and the ability to run Database Manipulation operations across the entire database schema with no restrictions. +- `cluster_user` - This role is an internal system role type that is managed internally to allow clustered instances to communicate with one another. + - This role is an internally managed role to facilitate communication between clustered instances. +- `structure_user` - This role provides specific access for creation and deletion of data. + - When defining this role type you can either assign a value of true which will allow the role to create and drop databases & tables. Alternatively the role type can be assigned a string array. The values in this array are databases and allows the role to only create and drop tables in the designated databases. + +**User-Defined Roles** + +In addition to built-in roles, admins (i.e. users assigned to the super_user role) can create customized roles for other users to interact with and manipulate the data within explicitly defined tables and attributes. + +- Unless the user-defined role is given `super_user` permissions, permissions must be defined explicitly within the request body JSON. +- Describe operations will return metadata for all databases, tables, and attributes that a user-defined role has CRUD permissions for. + +**Role Permissions** + +When creating a new, user-defined role in a Harper instance, you must provide a role name and the permissions to assign to that role. _Reminder, only super users can create and manage roles._ + +- `role` name used to easily identify the role assigned to individual users. + + _Roles can be altered/dropped based on the role name used in and returned from a successful `add_role` , `alter_role`, or `list_roles` operation._ + +- `permissions` used to explicitly define CRUD access to existing table data. + +Example JSON for `add_role` request + +```json +{ + "operation": "add_role", + "role": "software_developer", + "permission": { + "super_user": false, + "database_name": { + "tables": { + "table_name1": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [ + { + "attribute_name": "attribute1", + "read": true, + "insert": true, + "update": true + } + ] + }, + "table_name2": { + "read": true, + "insert": true, + "update": true, + "delete": false, + "attribute_permissions": [] + } + } + } + } +} +``` + +**Setting Role Permissions** + +There are two parts to a permissions set: + +- `super_user` – boolean value indicating if role should be provided super_user access. + + _If `super_user` is set to true, there should be no additional database-specific permissions values included since the role will have access to the entire database schema. If permissions are included in the body of the operation, they will be stored within Harper, but ignored, as super_users have full access to the database._ + +- `permissions`: Database tables that a role should have specific CRUD access to should be included in the final, database-specific `permissions` JSON. + + _For user-defined roles (i.e. non-super_user roles, blank permissions will result in the user being restricted from accessing any of the database schema._ + +**Table Permissions JSON** + +Each table that a role should be given some level of CRUD permissions to must be included in the `tables` array for its database in the roles permissions JSON passed to the API (_see example above_). + +```json +{ + "table_name": { / the name of the table to define CRUD perms for + "read": boolean, / access to read from this table + "insert": boolean, / access to insert data to table + "update": boolean, / access to update data in table + "delete": boolean, / access to delete row data in table + "attribute_permissions": [ / permissions for specific table attributes + { + "attribute_name": "attribute_name", / attribute to assign permissions to + "read": boolean, / access to read this attribute from table + "insert": boolean, / access to insert this attribute into the table + "update": boolean / access to update this attribute in the table + } + ] +} +``` + +**Important Notes About Table Permissions** + +1. If a database and/or any of its tables are not included in the permissions JSON, the role will not have any CRUD access to the database and/or tables. +1. If a table-level CRUD permission is set to false, any attribute-level with that same CRUD permission set to true will return an error. + +**Important Notes About Attribute Permissions** + +1. If there are attribute-specific CRUD permissions that need to be enforced on a table, those need to be explicitly described in the `attribute_permissions` array. +1. If a non-hash attribute is given some level of CRUD access, that same access will be assigned to the table’s `hash_attribute` (also referred to as the `primary_key`), even if it is not explicitly defined in the permissions JSON. + + _See table_name1’s permission set for an example of this – even though the table’s hash attribute is not specifically defined in the attribute_permissions array, because the role has CRUD access to ‘attribute1’, the role will have the same access to the table’s hash attribute._ + +1. If attribute-level permissions are set – _i.e. attribute_permissions.length > 0_ – any table attribute not explicitly included will be assumed to have not CRUD access (with the exception of the `hash_attribute` described in #2). + + _See table_name1’s permission set for an example of this – in this scenario, the role will have the ability to create, insert and update ‘attribute1’ and the table’s hash attribute but no other attributes on that table._ + +1. If an `attribute_permissions` array is empty, the role’s access to a table’s attributes will be based on the table-level CRUD permissions. + + _See table_name2’s permission set for an example of this._ + +1. The `__createdtime__` and `__updatedtime__` attributes that Harper manages internally can have read perms set but, if set, all other attribute-level permissions will be ignored. +1. Please note that DELETE permissions are not included as a part of an individual attribute-level permission set. That is because it is not possible to delete individual attributes from a row, rows must be deleted in full. + - If a role needs the ability to delete rows from a table, that permission should be set on the table-level. + - The practical approach to deleting an individual attribute of a row would be to set that attribute to null via an update statement. + +## `Role-Based Operation Restrictions ` + +The table below includes all API operations available in Harper and indicates whether or not the operation is restricted to super_user roles. + +_Keep in mind that non-super_user roles will also be restricted within the operations they do have access to by the database-level CRUD permissions set for the roles._ + +| Databases and Tables | Restricted to Super_Users | +| -------------------- | :-----------------------: | +| describe_all | | +| describe_database | | +| describe_table | | +| create_database | X | +| drop_database | X | +| create_table | X | +| drop_table | X | +| create_attribute | | +| drop_attribute | X | + +| NoSQL Operations | Restricted to Super_Users | +| -------------------- | :-----------------------: | +| insert | | +| update | | +| upsert | | +| delete | | +| search_by_hash | | +| search_by_value | | +| search_by_conditions | | + +| SQL Operations | Restricted to Super_Users | +| -------------- | :-----------------------: | +| select | | +| insert | | +| update | | +| delete | | + +| Bulk Operations | Restricted to Super_Users | +| --------------- | :-----------------------: | +| csv_data_load | | +| csv_file_load | | +| csv_url_load | | +| import_from_s3 | | + +| Users and Roles | Restricted to Super_Users | +| --------------- | :-----------------------: | +| list_roles | X | +| add_role | X | +| alter_role | X | +| drop_role | X | +| list_users | X | +| user_info | | +| add_user | X | +| alter_user | X | +| drop_user | X | + +| Clustering | Restricted to Super_Users | +| --------------------- | :-----------------------: | +| cluster_set_routes | X | +| cluster_get_routes | X | +| cluster_delete_routes | X | +| add_node | X | +| update_node | X | +| cluster_status | X | +| remove_node | X | +| configure_cluster | X | + +| Components | Restricted to Super_Users | +| ------------------ | :-----------------------: | +| get_components | X | +| get_component_file | X | +| set_component_file | X | +| drop_component | X | +| add_component | X | +| package_component | X | +| deploy_component | X | + +| Custom Functions | Restricted to Super_Users | +| ------------------------------- | :-----------------------: | +| custom_functions_status | X | +| get_custom_functions | X | +| get_custom_function | X | +| set_custom_function | X | +| drop_custom_function | X | +| add_custom_function_project | X | +| drop_custom_function_project | X | +| package_custom_function_project | X | +| deploy_custom_function_project | X | + +| Registration | Restricted to Super_Users | +| ----------------- | :-----------------------: | +| registration_info | | +| get_fingerprint | X | +| set_license | X | + +| Jobs | Restricted to Super_Users | +| ------------------------- | :-----------------------: | +| get_job | | +| search_jobs_by_start_date | X | + +| Logs | Restricted to Super_Users | +| ------------------------------ | :-----------------------: | +| read_log | X | +| read_transaction_log | X | +| delete_transaction_logs_before | X | +| read_audit_log | X | +| delete_audit_logs_before | X | + +| Utilities | Restricted to Super_Users | +| --------------------- | :-----------------------: | +| delete_records_before | X | +| export_local | X | +| export_to_s3 | X | +| system_information | X | +| restart | X | +| restart_service | X | +| get_configuration | X | +| configure_cluster | X | + +| Token Authentication | Restricted to Super_Users | +| ---------------------------- | :-----------------------: | +| create_authentication_tokens | | +| refresh_operation_token | | + +## Error: Must execute as User + +**You may have gotten an error like,** `Error: Must execute as <>`. + +This means that you installed Harper as `<>`. Because Harper stores files natively on the operating system, we only allow the Harper executable to be run by a single user. This prevents permissions issues on files. + +For example if you installed as user_a, but later wanted to run as user_b. User_b may not have access to the hdb files Harper needs. This also keeps Harper more secure as it allows you to lock files down to a specific user and prevents other users from accessing your files. diff --git a/site/versioned_docs/version-4.6/developers/sql-guide/date-functions.md b/site/versioned_docs/version-4.6/developers/sql-guide/date-functions.md new file mode 100644 index 00000000..0133e089 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/sql-guide/date-functions.md @@ -0,0 +1,227 @@ +--- +title: SQL Date Functions +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL Date Functions + +Harper utilizes [Coordinated Universal Time (UTC)](https:/en.wikipedia.org/wiki/Coordinated_Universal_Time) in all internal SQL operations. This means that date values passed into any of the functions below will be assumed to be in UTC or in a format that can be translated to UTC. + +When parsing date values passed to SQL date functions in HDB, we first check for [ISO 8601](https:/en.wikipedia.org/wiki/ISO_8601) formats, then for [RFC 2822](https:/tools.ietf.org/html/rfc2822#section-3.3) date-time format and then fall back to new Date(date_string)if a known format is not found. + +### CURRENT_DATE() + +Returns the current date in UTC in `YYYY-MM-DD` String format. + +``` +"SELECT CURRENT_DATE() AS current_date_result" returns + { + "current_date_result": "2020-04-22" + } +``` + +### CURRENT_TIME() + +Returns the current time in UTC in `HH:mm:ss.SSS` String format. + +``` +"SELECT CURRENT_TIME() AS current_time_result" returns + { + "current_time_result": "15:18:14.639" + } +``` + +### CURRENT_TIMESTAMP + +Referencing this variable will evaluate as the current Unix Timestamp in milliseconds. + +``` +"SELECT CURRENT_TIMESTAMP AS current_timestamp_result" returns + { + "current_timestamp_result": 1587568845765 + } +``` + +### DATE([date_string]) + +Formats and returns the date_string argument in UTC in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. + +If a date_string is not provided, the function will return the current UTC date/time value in the return format defined above. + +``` +"SELECT DATE(1587568845765) AS date_result" returns + { + "date_result": "2020-04-22T15:20:45.765+0000" + } +``` + +``` +"SELECT DATE(CURRENT_TIMESTAMP) AS date_result2" returns + { + "date_result2": "2020-04-22T15:20:45.765+0000" + } +``` + +### DATE_ADD(date, value, interval) + +Adds the defined amount of time to the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted interval values: Either string value (key or shorthand) can be passed as the interval argument. + +| Key | Shorthand | +| ------------ | --------- | +| years | y | +| quarters | Q | +| months | M | +| weeks | w | +| days | d | +| hours | h | +| minutes | m | +| seconds | s | +| milliseconds | ms | + +``` +"SELECT DATE_ADD(1587568845765, 1, 'days') AS date_add_result" AND +"SELECT DATE_ADD(1587568845765, 1, 'd') AS date_add_result" both return + { + "date_add_result": 1587655245765 + } +``` + +``` +"SELECT DATE_ADD(CURRENT_TIMESTAMP, 2, 'years') +AS date_add_result2" returns + { + "date_add_result2": 1650643129017 + } +``` + +### DATE_DIFF(date_1, date_2[, interval]) + +Returns the difference between the two date values passed based on the interval as a Number. If an interval is not provided, the function will return the difference value in milliseconds. + +Accepted interval values: + +- years +- months +- weeks +- days +- hours +- minutes +- seconds + +``` +"SELECT DATE_DIFF(CURRENT_TIMESTAMP, 1650643129017, 'hours') +AS date_diff_result" returns + { + "date_diff_result": -17519.753333333334 + } +``` + +### DATE_FORMAT(date, format) + +Formats and returns a date value in the String format provided. Find more details on accepted format values in the [moment.js docs](https:/momentjs.com/docs/#/displaying/format/). + +``` +"SELECT DATE_FORMAT(1524412627973, 'YYYY-MM-DD HH:mm:ss') +AS date_format_result" returns + { + "date_format_result": "2018-04-22 15:57:07" + } +``` + +### DATE_SUB(date, value, interval) + +Subtracts the defined amount of time from the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted date_sub interval values- Either string value (key or shorthand) can be passed as the interval argument. + +| Key | Shorthand | +| ------------ | --------- | +| years | y | +| quarters | Q | +| months | M | +| weeks | w | +| days | d | +| hours | h | +| minutes | m | +| seconds | s | +| milliseconds | ms | + +``` +"SELECT DATE_SUB(1587568845765, 2, 'years') AS date_sub_result" returns + { + "date_sub_result": 1524410445765 + } +``` + +### EXTRACT(date, date_part) + +Extracts and returns the date_part requested as a String value. Accepted date_part values below show value returned for date = “2020-03-26T15:13:02.041+000” + +| date_part | Example return value\* | +| ----------- | ---------------------- | +| year | “2020” | +| month | “3” | +| day | “26” | +| hour | “15” | +| minute | “13” | +| second | “2” | +| millisecond | “41” | + +``` +"SELECT EXTRACT(1587568845765, 'year') AS extract_result" returns + { + "extract_result": "2020" + } +``` + +### GETDATE() + +Returns the current Unix Timestamp in milliseconds. + +``` +"SELECT GETDATE() AS getdate_result" returns + { + "getdate_result": 1587568845765 + } +``` + +### GET_SERVER_TIME() + +Returns the current date/time value based on the server’s timezone in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. + +``` +"SELECT GET_SERVER_TIME() AS get_server_time_result" returns + { + "get_server_time_result": "2020-04-22T15:20:45.765+0000" + } +``` + +### OFFSET_UTC(date, offset) + +Returns the UTC date time value with the offset provided included in the return String value formatted as `YYYY-MM-DDTHH:mm:ss.SSSZZ`. The offset argument will be added as minutes unless the value is less than 16 and greater than -16, in which case it will be treated as hours. + +``` +"SELECT OFFSET_UTC(1587568845765, 240) AS offset_utc_result" returns + { + "offset_utc_result": "2020-04-22T19:20:45.765+0400" + } +``` + +``` +"SELECT OFFSET_UTC(1587568845765, 10) AS offset_utc_result2" returns + { + "offset_utc_result2": "2020-04-23T01:20:45.765+1000" + } +``` + +### NOW() + +Returns the current Unix Timestamp in milliseconds. + +``` +"SELECT NOW() AS now_result" returns + { + "now_result": 1587568845765 + } +``` diff --git a/site/versioned_docs/version-4.6/developers/sql-guide/features-matrix.md b/site/versioned_docs/version-4.6/developers/sql-guide/features-matrix.md new file mode 100644 index 00000000..f436ad62 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/sql-guide/features-matrix.md @@ -0,0 +1,88 @@ +--- +title: SQL Features Matrix +--- + +# SQL Features Matrix + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +## SQL Features Matrix + +Harper provides access to most SQL functions, and we’re always expanding that list. Check below to see if we cover what you need. + +| INSERT | | +| ---------------------------------- | --- | +| Values - multiple values supported | ✔ | +| Sub-SELECT | ✗ | + +| UPDATE | | +| ---------------- | --- | +| SET | ✔ | +| Sub-SELECT | ✗ | +| Conditions | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | + +| DELETE | | +| ---------- | --- | +| FROM | ✔ | +| Sub-SELECT | ✗ | +| Conditions | ✔ | + +| SELECT | | +| -------------------- | --- | +| Column SELECT | ✔ | +| Aliases | ✔ | +| Aggregator Functions | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | +| Constant Values | ✔ | +| Distinct | ✔ | +| Sub-SELECT | ✗ | + +| FROM | | +| ---------------- | --- | +| Multi-table JOIN | ✔ | +| INNER JOIN | ✔ | +| LEFT OUTER JOIN | ✔ | +| LEFT INNER JOIN | ✔ | +| RIGHT OUTER JOIN | ✔ | +| RIGHT INNER JOIN | ✔ | +| FULL JOIN | ✔ | +| UNION | ✗ | +| Sub-SELECT | ✗ | +| TOP | ✔ | + +| WHERE | | +| -------------------------- | --- | +| Multi-Conditions | ✔ | +| Wildcards | ✔ | +| IN | ✔ | +| LIKE | ✔ | +| Bit-wise Operators AND, OR | ✔ | +| Bit-wise Operators NOT | ✔ | +| NULL | ✔ | +| BETWEEN | ✔ | +| EXISTS,ANY,ALL | ✔ | +| Compare columns | ✔ | +| Compare constants | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | +| Sub-SELECT | ✗ | + +| GROUP BY | | +| --------------------- | --- | +| Multi-Column GROUP BY | ✔ | + +| HAVING | | +| ----------------------------- | --- | +| Aggregate function conditions | ✔ | + +| ORDER BY | | +| --------------------- | --- | +| Multi-Column ORDER BY | ✔ | +| Aliases | ✔ | +| Date Functions\* | ✔ | +| Math Functions | ✔ | diff --git a/site/versioned_docs/version-4.6/developers/sql-guide/functions.md b/site/versioned_docs/version-4.6/developers/sql-guide/functions.md new file mode 100644 index 00000000..bf5fd219 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/sql-guide/functions.md @@ -0,0 +1,159 @@ +--- +title: Harper SQL Functions +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# Harper SQL Functions + +This SQL keywords reference contains the SQL functions available in Harper. + +## Functions + +### Aggregate + +| Keyword | Syntax | Description | +| ---------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- | +| AVG | AVG(_expression_) | Returns the average of a given numeric expression. | +| COUNT | SELECT COUNT(_column_name_) FROM _database.table_ WHERE _condition_ | Returns the number records that match the given criteria. Nulls are not counted. | +| GROUP_CONCAT | GROUP*CONCAT(\_expression*) | Returns a string with concatenated values that are comma separated and that are non-null from a group. Will return null when there are non-null values. | +| MAX | SELECT MAX(_column_name_) FROM _database.table_ WHERE _condition_ | Returns largest value in a specified column. | +| MIN | SELECT MIN(_column_name_) FROM _database.table_ WHERE _condition_ | Returns smallest value in a specified column. | +| SUM | SUM(_column_name_) | Returns the sum of the numeric values provided. | +| ARRAY\* | ARRAY(_expression_) | Returns a list of data as a field. | +| DISTINCT_ARRAY\* | DISTINCT*ARRAY(\_expression*) | When placed around a standard ARRAY() function, returns a distinct (deduplicated) results set. | + +\*For more information on ARRAY() and DISTINCT_ARRAY() see [this blog](https:/www.harperdb.io/post/sql-queries-to-complex-objects). + +### Conversion + +| Keyword | Syntax | Description | +| ------- | ----------------------------------------------- | ---------------------------------------------------------------------- | +| CAST | CAST(_expression AS datatype(length)_) | Converts a value to a specified datatype. | +| CONVERT | CONVERT(_data_type(length), expression, style_) | Converts a value from one datatype to a different, specified datatype. | + +### Date & Time + +| Keyword | Syntax | Description | +| ----------------- | ----------------- | --------------------------------------------------------------------------------------------------------------------- | +| CURRENT_DATE | CURRENT_DATE() | Returns the current date in UTC in “YYYY-MM-DD” String format. | +| CURRENT_TIME | CURRENT_TIME() | Returns the current time in UTC in “HH:mm:ss.SSS” string format. | +| CURRENT_TIMESTAMP | CURRENT_TIMESTAMP | Referencing this variable will evaluate as the current Unix Timestamp in milliseconds. For more information, go here. | + +| +| DATE | DATE([_date_string_]) | Formats and returns the date*string argument in UTC in ‘YYYY-MM-DDTHH:mm:ss.SSSZZ’ string format. If a date_string is not provided, the function will return the current UTC date/time value in the return format defined above. For more information, go here. | +| +| DATE_ADD | DATE_ADD(\_date, value, interval*) | Adds the defined amount of time to the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted interval values: Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | +| +| DATE*DIFF | DATEDIFF(\_date_1, date_2[, interval]*) | Returns the difference between the two date values passed based on the interval as a Number. If an interval is not provided, the function will return the difference value in milliseconds. For more information, go here. | +| +| DATE*FORMAT | DATE_FORMAT(\_date, format*) | Formats and returns a date value in the String format provided. Find more details on accepted format values in the moment.js docs. For more information, go here. | +| +| DATE*SUB | DATE_SUB(\_date, format*) | Subtracts the defined amount of time from the date provided in UTC and returns the resulting Unix Timestamp in milliseconds. Accepted date*sub interval values- Either string value (key or shorthand) can be passed as the interval argument. For more information, go here. | +| +| DAY | DAY(\_date*) | Return the day of the month for the given date. | +| +| DAYOFWEEK | DAYOFWEEK(_date_) | Returns the numeric value of the weekday of the date given(“YYYY-MM-DD”).NOTE: 0=Sunday, 1=Monday, 2=Tuesday, 3=Wednesday, 4=Thursday, 5=Friday, and 6=Saturday. | +| EXTRACT | EXTRACT(_date, date_part_) | Extracts and returns the date*part requested as a String value. Accepted date_part values below show value returned for date = “2020-03-26T15:13:02.041+000” For more information, go here. | +| +| GETDATE | GETDATE() | Returns the current Unix Timestamp in milliseconds. | +| GET_SERVER_TIME | GET_SERVER_TIME() | Returns the current date/time value based on the server’s timezone in `YYYY-MM-DDTHH:mm:ss.SSSZZ` String format. | +| OFFSET_UTC | OFFSET_UTC(\_date, offset*) | Returns the UTC date time value with the offset provided included in the return String value formatted as `YYYY-MM-DDTHH:mm:ss.SSSZZ`. The offset argument will be added as minutes unless the value is less than 16 and greater than -16, in which case it will be treated as hours. | +| NOW | NOW() | Returns the current Unix Timestamp in milliseconds. | +| +| HOUR | HOUR(_datetime_) | Returns the hour part of a given date in range of 0 to 838. | +| +| MINUTE | MINUTE(_datetime_) | Returns the minute part of a time/datetime in range of 0 to 59. | +| +| MONTH | MONTH(_date_) | Returns month part for a specified date in range of 1 to 12. | +| +| SECOND | SECOND(_datetime_) | Returns the seconds part of a time/datetime in range of 0 to 59. | +| YEAR | YEAR(_date_) | Returns the year part for a specified date. | +| + +### Logical + +| Keyword | Syntax | Description | +| ------- | ----------------------------------------------- | ------------------------------------------------------------------------------------------ | +| IF | IF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | +| IIF | IIF(_condition, value_if_true, value_if_false_) | Returns a value if the condition is true, or another value if the condition is false. | +| IFNULL | IFNULL(_expression, alt_value_) | Returns a specified value if the expression is null. | +| NULLIF | NULLIF(_expression_1, expression_2_) | Returns null if expression_1 is equal to expression_2, if not equal, returns expression_1. | + +### Mathematical + +| Keyword | Syntax | Description | +| ------- | ------------------------------ | --------------------------------------------------------------------------------------------------- | +| ABS | ABS(_expression_) | Returns the absolute value of a given numeric expression. | +| CEIL | CEIL(_number_) | Returns integer ceiling, the smallest integer value that is bigger than or equal to a given number. | +| EXP | EXP(_number_) | Returns e to the power of a specified number. | +| FLOOR | FLOOR(_number_) | Returns the largest integer value that is smaller than, or equal to, a given number. | +| RANDOM | RANDOM(_seed_) | Returns a pseudo random number. | +| ROUND | ROUND(_number,decimal_places_) | Rounds a given number to a specified number of decimal places. | +| SQRT | SQRT(_expression_) | Returns the square root of an expression. | + +### String + +| Keyword | Syntax | Description | +| ----------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| CONCAT | CONCAT(_string_1, string_2, ...., string_n_) | Concatenates, or joins, two or more strings together, resulting in a single string. | +| CONCAT_WS | CONCAT*WS(\_separator, string_1, string_2, ...., string_n*) | Concatenates, or joins, two or more strings together with a separator, resulting in a single string. | +| INSTR | INSTR(_string_1, string_2_) | Returns the first position, as an integer, of string_2 within string_1. | +| LEN | LEN(_string_) | Returns the length of a string. | +| LOWER | LOWER(_string_) | Converts a string to lower-case. | +| REGEXP | SELECT _column_name_ FROM _database.table_ WHERE _column_name_ REGEXP _pattern_ | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | +| REGEXP_LIKE | SELECT _column_name_ FROM _database.table_ WHERE REGEXP*LIKE(\_column_name, pattern*) | Searches column for matching string against a given regular expression pattern, provided as a string, and returns all matches. If no matches are found, it returns null. | +| REPLACE | REPLACE(_string, old_string, new_string_) | Replaces all instances of old_string within new_string, with string. | +| SUBSTRING | SUBSTRING(_string, string_position, length_of_substring_) | Extracts a specified amount of characters from a string. | +| TRIM | TRIM([_character(s) FROM_] _string_) | Removes leading and trailing spaces, or specified character(s), from a string. | +| UPPER | UPPER(_string_) | Converts a string to upper-case. | + +## Operators + +### Logical Operators + +| Keyword | Syntax | Description | +| ------- | ------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- | +| BETWEEN | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ BETWEEN _value_1_ AND _value_2_ | (inclusive) Returns values(numbers, text, or dates) within a given range. | +| IN | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IN(_value(s)_) | Used to specify multiple values in a WHERE clause. | +| LIKE | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_n_ LIKE _pattern_ | Searches for a specified pattern within a WHERE clause. | + +## Queries + +### General + +| Keyword | Syntax | Description | +| -------- | -------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| DISTINCT | SELECT DISTINCT _column_name(s)_ FROM _database.table_ | Returns only unique values, eliminating duplicate records. | +| FROM | FROM _database.table_ | Used to list the database(s), table(s), and any joins required for a SQL statement. | +| GROUP BY | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ GROUP BY _column_name(s)_ ORDER BY _column_name(s)_ | Groups rows that have the same values into summary rows. | +| HAVING | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ GROUP BY _column_name(s)_ HAVING _condition_ ORDER BY _column_name(s)_ | Filters data based on a group or aggregate function. | +| SELECT | SELECT _column_name(s)_ FROM _database.table_ | Selects data from table. | +| WHERE | SELECT _column_name(s)_ FROM _database.table_ WHERE _condition_ | Extracts records based on a defined condition. | + +### Joins + +| Keyword | Syntax | Description | +| ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| CROSS JOIN | SELECT _column_name(s)_ FROM _database.table_1_ CROSS JOIN _database.table_2_ | Returns a paired combination of each row from _table_1_ with row from _table_2_. _Note: CROSS JOIN can return very large result sets and is generally considered bad practice._ | +| FULL OUTER | SELECT _column_name(s)_ FROM _database.table_1_ FULL OUTER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ WHERE _condition_ | Returns all records when there is a match in either _table_1_ (left table) or _table_2_ (right table). | +| [INNER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ INNER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return only matching records from _table_1_ (left table) and _table_2_ (right table). The INNER keyword is optional and does not affect the result. | +| LEFT [OUTER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ LEFT OUTER JOIN _database.table_2_ ON _table_1.column_name_ _= table_2.column_name_ | Return all records from _table_1_ (left table) and matching data from _table_2_ (right table). The OUTER keyword is optional and does not affect the result. | +| RIGHT [OUTER] JOIN | SELECT _column_name(s)_ FROM _database.table_1_ RIGHT OUTER JOIN _database.table_2_ ON _table_1.column_name = table_2.column_name_ | Return all records from _table_2_ (right table) and matching data from _table_1_ (left table). The OUTER keyword is optional and does not affect the result. | + +### Predicates + +| Keyword | Syntax | Description | +| ----------- | ----------------------------------------------------------------------------- | -------------------------- | +| IS NOT NULL | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IS NOT NULL | Tests for non-null values. | +| IS NULL | SELECT _column_name(s)_ FROM _database.table_ WHERE _column_name_ IS NULL | Tests for null values. | + +### Statements + +| Keyword | Syntax | Description | +| ------- | --------------------------------------------------------------------------------------------- | ----------------------------------- | +| DELETE | DELETE FROM _database.table_ WHERE condition | Deletes existing data from a table. | +| INSERT | INSERT INTO _database.table(column_name(s))_ VALUES(_value(s)_) | Inserts new records into a table. | +| UPDATE | UPDATE _database.table_ SET _column_1 = value_1, column_2 = value_2, ....,_ WHERE _condition_ | Alters existing records in a table. | diff --git a/site/versioned_docs/version-4.6/developers/sql-guide/index.md b/site/versioned_docs/version-4.6/developers/sql-guide/index.md new file mode 100644 index 00000000..18674950 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/sql-guide/index.md @@ -0,0 +1,88 @@ +--- +title: SQL Guide +--- + +# SQL Guide + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +## Harper SQL Guide + +The purpose of this guide is to describe the available functionality of Harper as it relates to supported SQL functionality. The SQL parser is still actively being developed, many SQL features may not be optimized or utilize indexes. This document will be updated as more features and functionality becomes available. Generally, the REST interface provides a more stable, secure, and performant interface for data interaction, but the SQL functionality can be useful for administrative ad-hoc querying, and utilizing existing SQL statements. **A high-level view of supported features can be found** [**here**](./features-matrix)**.** + +Harper adheres to the concept of database & tables. This allows developers to isolate table structures from each other all within one database. + +## Select + +Harper has robust SELECT support, from simple queries all the way to complex joins with multi-conditions, aggregates, grouping & ordering. + +All results are returned as JSON object arrays. + +Query for all records and attributes in the dev.dog table: + +``` +SELECT * FROM dev.dog +``` + +Query specific columns from all rows in the dev.dog table: + +``` +SELECT id, dog_name, age FROM dev.dog +``` + +Query for all records and attributes in the dev.dog table ORDERED BY age in ASC order: + +``` +SELECT * FROM dev.dog ORDER BY age +``` + +_The ORDER BY keyword sorts in ascending order by default. To sort in descending order, use the DESC keyword._ + +## Insert + +Harper supports inserting 1 to n records into a table. The primary key must be unique (not used by any other record). If no primary key is provided, it will be assigned an auto-generated UUID. Harper does not support selecting from one table to insert into another at this time. + +``` +INSERT INTO dev.dog (id, dog_name, age, breed_id) + VALUES(1, 'Penny', 5, 347), (2, 'Kato', 4, 347) +``` + +## Update + +Harper supports updating existing table row(s) via UPDATE statements. Multiple conditions can be applied to filter the row(s) to update. At this time selecting from one table to update another is not supported. + +``` +UPDATE dev.dog + SET owner_name = 'Kyle' + WHERE id IN (1, 2) +``` + +## Delete + +Harper supports deleting records from a table with condition support. + +``` +DELETE FROM dev.dog + WHERE age < 4 +``` + +## Joins + +Harper allows developers to join any number of tables and currently supports the following join types: + +- INNER JOIN LEFT +- INNER JOIN LEFT +- OUTER JOIN + +Here’s a basic example joining two tables from our Get Started example- joining a dogs table with a breeds table: + +``` +SELECT d.id, d.dog_name, d.owner_name, b.name, b.section + FROM dev.dog AS d + INNER JOIN dev.breed AS b ON d.breed_id = b.id + WHERE d.owner_name IN ('Kyle', 'Zach', 'Stephen') + AND b.section = 'Mutt' + ORDER BY d.dog_name +``` diff --git a/site/versioned_docs/version-4.6/developers/sql-guide/json-search.md b/site/versioned_docs/version-4.6/developers/sql-guide/json-search.md new file mode 100644 index 00000000..b078baa7 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/sql-guide/json-search.md @@ -0,0 +1,177 @@ +--- +title: SQL JSON Search +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL JSON Search + +Harper automatically indexes all top level attributes in a row / object written to a table. However, any attributes which hold JSON data do not have their nested attributes indexed. In order to make searching and/or transforming these JSON documents easy, Harper offers a special SQL function called SEARCH_JSON. The SEARCH_JSON function works in SELECT & WHERE clauses allowing queries to perform powerful filtering on any element of your JSON by implementing the [JSONata library](http:/docs.jsonata.org/overview.html) into our SQL engine. + +## Syntax + +SEARCH_JSON(_expression, attribute_) + +Executes the supplied string _expression_ against data of the defined top level _attribute_ for each row. The expression both filters and defines output from the JSON document. + +### Example 1 + +#### Search a string array + +Here are two records in the database: + +```json +[ + { + "id": 1, + "name": ["Harper", "Penny"] + }, + { + "id": 2, + "name": ["Penny"] + } +] +``` + +Here is a simple query that gets any record with "Harper" found in the name. + +``` +SELECT * +FROM dev.dog +WHERE search_json('"Harper" in *', name) +``` + +### Example 2 + +The purpose of this query is to give us every movie where at least two of our favorite actors from Marvel films have acted together. The results will return the movie title, the overview, release date and an object array of the actor’s name and their character name in the movie. + +Both function calls evaluate the credits.cast attribute, this attribute is an object array of every cast member in a movie. + +``` +SELECT m.title, + m.overview, + m.release_date, + SEARCH_JSON($[name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"]].{"actor": name, "character": character}, c.`cast`) AS characters +FROM movies.credits c + INNER JOIN movies.movie m + ON c.movie_id = m.id +WHERE SEARCH_JSON($count($[name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"]]), c.`cast`) >= 2 +``` + +A sample of this data from the movie The Avengers looks like + +```json +[ + { + "cast_id": 46, + "character": "Tony Stark / Iron Man", + "credit_id": "52fe4495c3a368484e02b251", + "gender": "male", + "id": 3223, + "name": "Robert Downey Jr.", + "order": 0 + }, + { + "cast_id": 2, + "character": "Steve Rogers / Captain America", + "credit_id": "52fe4495c3a368484e02b19b", + "gender": "male", + "id": 16828, + "name": "Chris Evans", + "order": 1 + }, + { + "cast_id": 307, + "character": "Bruce Banner / The Hulk", + "credit_id": "5e85e8083344c60015411cfa", + "gender": "male", + "id": 103, + "name": "Mark Ruffalo", + "order": 2 + } +] +``` + +Let’s break down the SEARCH_JSON function call in the SELECT: + +``` +SEARCH_JSON( + $[name in [ + "Robert Downey Jr.", + "Chris Evans", + "Scarlett Johansson", + "Mark Ruffalo", + "Chris Hemsworth", + "Jeremy Renner", + "Clark Gregg", + "Samuel L. Jackson", + "Gwyneth Paltrow", + "Don Cheadle" + ]].{ + "actor": name, + "character": character + }, + c.`cast` +) +``` + +The first argument passed to SEARCH_JSON is the expression to execute against the second argument which is the cast attribute on the credits table. This expression will execute for every row. Looking into the expression it starts with “$\[…]” this tells the expression to iterate all elements of the cast array. + +Then the expression tells the function to only return entries where the name attribute matches any of the actors defined in the array: + +``` +name in ["Robert Downey Jr.", "Chris Evans", "Scarlett Johansson", "Mark Ruffalo", "Chris Hemsworth", "Jeremy Renner", "Clark Gregg", "Samuel L. Jackson", "Gwyneth Paltrow", "Don Cheadle"] +``` + +So far, we’ve iterated the array and filtered out rows, but we also want the results formatted in a specific way, so we’ve chained an expression on our filter with: `{“actor”: name, “character”: character}`. This tells the function to create a specific object for each matching entry. + +**Sample Result** + +```json +[ + { + "actor": "Robert Downey Jr.", + "character": "Tony Stark / Iron Man" + }, + { + "actor": "Chris Evans", + "character": "Steve Rogers / Captain America" + }, + { + "actor": "Mark Ruffalo", + "character": "Bruce Banner / The Hulk" + } +] +``` + +Just having the SEARCH_JSON function in our SELECT is powerful, but given our criteria it would still return every other movie that doesn’t have our matching actors, in order to filter out the movies we do not want we also use SEARCH_JSON in the WHERE clause. + +This function call in the WHERE clause is similar, but we don’t need to perform the same transformation as occurred in the SELECT: + +``` +SEARCH_JSON( + $count( + $[name in [ + "Robert Downey Jr.", + "Chris Evans", + "Scarlett Johansson", + "Mark Ruffalo", + "Chris Hemsworth", + "Jeremy Renner", + "Clark Gregg", + "Samuel L. Jackson", + "Gwyneth Paltrow", + "Don Cheadle" + ]] + ), + c.`cast` +) >= 2 +``` + +As seen above we execute the same name filter against the cast array, the primary difference is we are wrapping the filtered results in $count(…). As it looks this returns a count of the results back which we then use against our SQL comparator of >= 2. + +To see further SEARCH_JSON examples in action view our Postman Collection that provides a [sample database & data with query examples](../operations-api/advanced-json-sql-examples). + +To learn more about how to build expressions check out the JSONata documentation: [http:/docs.jsonata.org/overview](http:/docs.jsonata.org/overview) diff --git a/site/versioned_docs/version-4.6/developers/sql-guide/reserved-word.md b/site/versioned_docs/version-4.6/developers/sql-guide/reserved-word.md new file mode 100644 index 00000000..2cd812ba --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/sql-guide/reserved-word.md @@ -0,0 +1,207 @@ +--- +title: Harper SQL Reserved Words +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# Harper SQL Reserved Words + +This is a list of reserved words in the SQL Parser. Use of these words or symbols may result in unexpected behavior or inaccessible tables/attributes. If any of these words must be used, any SQL call referencing a database, table, or attribute must have backticks (`…`) or brackets ([…]) around the variable. + +For Example, for a table called `ASSERT` in the `data` database, a SQL select on that table would look like: + +``` +SELECT * from data.`ASSERT` +``` + +Alternatively: + +``` +SELECT * from data.[ASSERT] +``` + +### RESERVED WORD LIST + +- ABSOLUTE +- ACTION +- ADD +- AGGR +- ALL +- ALTER +- AND +- ANTI +- ANY +- APPLY +- ARRAY +- AS +- ASSERT +- ASC +- ATTACH +- AUTOINCREMENT +- AUTO_INCREMENT +- AVG +- BEGIN +- BETWEEN +- BREAK +- BY +- CALL +- CASE +- CAST +- CHECK +- CLASS +- CLOSE +- COLLATE +- COLUMN +- COLUMNS +- COMMIT +- CONSTRAINT +- CONTENT +- CONTINUE +- CONVERT +- CORRESPONDING +- COUNT +- CREATE +- CROSS +- CUBE +- CURRENT_TIMESTAMP +- CURSOR +- DATABASE +- DECLARE +- DEFAULT +- DELETE +- DELETED +- DESC +- DETACH +- DISTINCT +- DOUBLEPRECISION +- DROP +- ECHO +- EDGE +- END +- ENUM +- ELSE +- EXCEPT +- EXISTS +- EXPLAIN +- FALSE +- FETCH +- FIRST +- FOREIGN +- FROM +- GO +- GRAPH +- GROUP +- GROUPING +- HAVING +- HDB_HASH +- HELP +- IF +- IDENTITY +- IS +- IN +- INDEX +- INNER +- INSERT +- INSERTED +- INTERSECT +- INTO +- JOIN +- KEY +- LAST +- LET +- LEFT +- LIKE +- LIMIT +- LOOP +- MATCHED +- MATRIX +- MAX +- MERGE +- MIN +- MINUS +- MODIFY +- NATURAL +- NEXT +- NEW +- NOCASE +- NO +- NOT +- NULL +- OFF +- ON +- ONLY +- OFFSET +- OPEN +- OPTION +- OR +- ORDER +- OUTER +- OVER +- PATH +- PARTITION +- PERCENT +- PLAN +- PRIMARY +- PRINT +- PRIOR +- QUERY +- READ +- RECORDSET +- REDUCE +- REFERENCES +- RELATIVE +- REPLACE +- REMOVE +- RENAME +- REQUIRE +- RESTORE +- RETURN +- RETURNS +- RIGHT +- ROLLBACK +- ROLLUP +- ROW +- SCHEMA +- SCHEMAS +- SEARCH +- SELECT +- SEMI +- SET +- SETS +- SHOW +- SOME +- SOURCE +- STRATEGY +- STORE +- SYSTEM +- SUM +- TABLE +- TABLES +- TARGET +- TEMP +- TEMPORARY +- TEXTSTRING +- THEN +- TIMEOUT +- TO +- TOP +- TRAN +- TRANSACTION +- TRIGGER +- TRUE +- TRUNCATE +- UNION +- UNIQUE +- UPDATE +- USE +- USING +- VALUE +- VERTEX +- VIEW +- WHEN +- WHERE +- WHILE +- WITH +- WORK diff --git a/site/versioned_docs/version-4.6/developers/sql-guide/sql-geospatial-functions.md b/site/versioned_docs/version-4.6/developers/sql-guide/sql-geospatial-functions.md new file mode 100644 index 00000000..e00986f3 --- /dev/null +++ b/site/versioned_docs/version-4.6/developers/sql-guide/sql-geospatial-functions.md @@ -0,0 +1,419 @@ +--- +title: SQL Geospatial Functions +--- + +:::warning +Harper encourages developers to utilize other querying tools over SQL for performance purposes. Harper SQL is intended for data investigation purposes and uses cases where performance is not a priority. SQL optimizations are on our roadmap for the future. +::: + +# SQL Geospatial Functions + +Harper geospatial features require data to be stored in a single column using the [GeoJSON standard](http:/geojson.org/), a standard commonly used in geospatial technologies. Geospatial functions are available to be used in SQL statements. + +If you are new to GeoJSON you should check out the full specification here: http:/geojson.org/. There are a few important things to point out before getting started. + +1. All GeoJSON coordinates are stored in `[longitude, latitude]` format. +1. Coordinates or GeoJSON geometries must be passed as string when written directly in a SQL statement. +1. Note if you are using Postman for you testing. Due to limitations in the Postman client, you will need to escape quotes in your strings and your SQL will need to be passed on a single line. + +In the examples contained in the left-hand navigation, database and table names may change, but all GeoJSON data will be stored in a column named geo_data. + +# geoArea + +The geoArea() function returns the area of one or more features in square meters. + +### Syntax + +geoArea(_geoJSON_) + +### Parameters + +| Parameter | Description | +| --------- | ------------------------------- | +| geoJSON | Required. One or more features. | + +#### Example 1 + +Calculate the area, in square meters, of a manually passed GeoJSON polygon. + +``` +SELECT geoArea('{ + "type":"Feature", + "geometry":{ + "type":"Polygon", + "coordinates":[[ + [0,0], + [0.123456,0], + [0.123456,0.123456], + [0,0.123456] + ]] + } +}') +``` + +#### Example 2 + +Find all records that have an area less than 1 square mile (or 2589988 square meters). + +``` +SELECT * FROM dev.locations +WHERE geoArea(geo_data) < 2589988 +``` + +# geoLength + +Takes a GeoJSON and measures its length in the specified units (default is kilometers). + +## Syntax + +geoLength(_geoJSON_[_, units_]) + +## Parameters + +| Parameter | Description | +| --------- | --------------------------------------------------------------------------------------------------------------------- | +| geoJSON | Required. GeoJSON to measure. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 + +Calculate the length, in kilometers, of a manually passed GeoJSON linestring. + +``` +SELECT geoLength('{ + "type": "Feature", + "geometry": { + "type": "LineString", + "coordinates": [ + [-104.97963309288025,39.76163265441438], + [-104.9823260307312,39.76365323407955], + [-104.99193906784058,39.75616442110704] + ] + } +}') +``` + +### Example 2 + +Find all data plus the calculated length in miles of the GeoJSON, restrict the response to only lengths less than 5 miles, and return the data in order of lengths smallest to largest. + +``` +SELECT *, geoLength(geo_data, 'miles') as length +FROM dev.locations +WHERE geoLength(geo_data, 'miles') < 5 +ORDER BY length ASC +``` + +# geoDifference + +Returns a new polygon with the difference of the second polygon clipped from the first polygon. + +## Syntax + +geoDifference(_polygon1, polygon2_) + +## Parameters + +| Parameter | Description | +| --------- | -------------------------------------------------------------------------- | +| polygon1 | Required. Polygon or MultiPolygon GeoJSON feature. | +| polygon2 | Required. Polygon or MultiPolygon GeoJSON feature to remove from polygon1. | + +### Example + +Return a GeoJSON Polygon that removes City Park (_polygon2_) from Colorado (_polygon1_). + +``` +SELECT geoDifference('{ + "type": "Feature", + "properties": { + "name":"Colorado" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-109.072265625,37.00255267215955], + [-102.01904296874999,37.00255267215955], + [-102.01904296874999,41.0130657870063], + [-109.072265625,41.0130657870063], + [-109.072265625,37.00255267215955] + ]] + } + }', + '{ + "type": "Feature", + "properties": { + "name":"City Park" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.95973110198975,39.7543828214657], + [-104.95955944061278,39.744781185675386], + [-104.95904445648193,39.74422022399989], + [-104.95835781097412,39.74402223643582], + [-104.94097709655762,39.74392324244047], + [-104.9408483505249,39.75434982844515], + [-104.95973110198975,39.7543828214657] + ]] + } + }' +) +``` + +# geoDistance + +Calculates the distance between two points in units (default is kilometers). + +## Syntax + +geoDistance(_point1, point2_[_, units_]) + +## Parameters + +| Parameter | Description | +| --------- | --------------------------------------------------------------------------------------------------------------------- | +| point1 | Required. GeoJSON Point specifying the origin. | +| point2 | Required. GeoJSON Point specifying the destination. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 + +Calculate the distance, in miles, between Harper’s headquarters and the Washington Monument. + +``` +SELECT geoDistance('[-104.979127,39.761563]', '[-77.035248,38.889475]', 'miles') +``` + +### Example 2 + +Find all locations that are within 40 kilometers of a given point, return that distance in miles, and sort by distance in an ascending order. + +``` +SELECT *, geoDistance('[-104.979127,39.761563]', geo_data, 'miles') as distance +FROM dev.locations +WHERE geoDistance('[-104.979127,39.761563]', geo_data, 'kilometers') < 40 +ORDER BY distance ASC +``` + +# geoNear + +Determines if point1 and point2 are within a specified distance from each other, default units are kilometers. Returns a Boolean. + +## Syntax + +geoNear(_point1, point2, distance_[_, units_]) + +## Parameters + +| Parameter | Description | +| --------- | --------------------------------------------------------------------------------------------------------------------- | +| point1 | Required. GeoJSON Point specifying the origin. | +| point2 | Required. GeoJSON Point specifying the destination. | +| distance | Required. The maximum distance in units as an integer or decimal. | +| units | Optional. Specified as a string. Options are ‘degrees’, ‘radians’, ‘miles’, or ‘kilometers’. Default is ‘kilometers’. | + +### Example 1 + +Return all locations within 50 miles of a given point. + +``` +SELECT * +FROM dev.locations +WHERE geoNear('[-104.979127,39.761563]', geo_data, 50, 'miles') +``` + +### Example 2 + +Return all locations within 2 degrees of the earth of a given point. (Each degree lat/long is about 69 miles [111 kilometers]). Return all data and the distance in miles, sorted by ascending distance. + +``` +SELECT *, geoDistance('[-104.979127,39.761563]', geo_data, 'miles') as distance +FROM dev.locations +WHERE geoNear('[-104.979127,39.761563]', geo_data, 2, 'degrees') +ORDER BY distance ASC +``` + +# geoContains + +Determines if geo2 is completely contained by geo1. Returns a Boolean. + +## Syntax + +geoContains(_geo1, geo2_) + +## Parameters + +| Parameter | Description | +| --------- | --------------------------------------------------------------------------------- | +| geo1 | Required. Polygon or MultiPolygon GeoJSON feature. | +| geo2 | Required. Polygon or MultiPolygon GeoJSON feature tested to be contained by geo1. | + +### Example 1 + +Return all locations within the state of Colorado (passed as a GeoJSON string). + +``` +SELECT * +FROM dev.locations +WHERE geoContains('{ + "type": "Feature", + "properties": { + "name":"Colorado" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-109.072265625,37.00255267], + [-102.01904296874999,37.00255267], + [-102.01904296874999,41.01306579], + [-109.072265625,41.01306579], + [-109.072265625,37.00255267] + ]] + } +}', geo_data) +``` + +### Example 2 + +Return all locations which contain Harper Headquarters. + +``` +SELECT * +FROM dev.locations +WHERE geoContains(geo_data, '{ + "type": "Feature", + "properties": { + "name": "Harper Headquarters" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.98060941696167,39.760704817357905], + [-104.98053967952728,39.76065120861263], + [-104.98055577278137,39.760642961109674], + [-104.98037070035934,39.76049450588716], + [-104.9802714586258,39.76056254790385], + [-104.9805235862732,39.76076461167841], + [-104.98060941696167,39.760704817357905] + ]] + } +}') +``` + +# geoEqual + +Determines if two GeoJSON features are the same type and have identical X,Y coordinate values. For more information see https:/developers.arcgis.com/documentation/spatial-references/. Returns a Boolean. + +## Syntax + +geoEqual(_geo1_, _geo2_) + +## Parameters + +| Parameter | Description | +| --------- | -------------------------------------- | +| geo1 | Required. GeoJSON geometry or feature. | +| geo2 | Required. GeoJSON geometry or feature. | + +### Example + +Find Harper Headquarters within all locations within the database. + +``` +SELECT * +FROM dev.locations +WHERE geoEqual(geo_data, '{ + "type": "Feature", + "properties": { + "name": "Harper Headquarters" + }, + "geometry": { + "type": "Polygon", + "coordinates": [[ + [-104.98060941696167,39.760704817357905], + [-104.98053967952728,39.76065120861263], + [-104.98055577278137,39.760642961109674], + [-104.98037070035934,39.76049450588716], + [-104.9802714586258,39.76056254790385], + [-104.9805235862732,39.76076461167841], + [-104.98060941696167,39.760704817357905] + ]] + } +}') +``` + +# geoCrosses + +Determines if the geometries cross over each other. Returns boolean. + +## Syntax + +geoCrosses(_geo1, geo2_) + +## Parameters + +| Parameter | Description | +| --------- | -------------------------------------- | +| geo1 | Required. GeoJSON geometry or feature. | +| geo2 | Required. GeoJSON geometry or feature. | + +### Example + +Find all locations that cross over a highway. + +``` +SELECT * +FROM dev.locations +WHERE geoCrosses( + geo_data, + '{ + "type": "Feature", + "properties": { + "name": "Highway I-25" + }, + "geometry": { + "type": "LineString", + "coordinates": [ + [-104.9139404296875,41.00477542222947], + [-105.0238037109375,39.715638134796336], + [-104.853515625,39.53370327008705], + [-104.853515625,38.81403111409755], + [-104.61181640625,38.39764411353178], + [-104.8974609375,37.68382032669382], + [-104.501953125,37.00255267215955] + ] + } + }' +) +``` + +# geoConvert + +Converts a series of coordinates into a GeoJSON of the specified type. + +## Syntax + +geoConvert(_coordinates, geo_type_[, _properties_]) + +## Parameters + +| Parameter | Description | +| ----------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| coordinates | Required. One or more coordinates | +| geo_type | Required. GeoJSON geometry type. Options are ‘point’, ‘lineString’, ‘multiLineString’, ‘multiPoint’, ‘multiPolygon’, and ‘polygon’ | +| properties | Optional. Escaped JSON array with properties to be added to the GeoJSON output. | + +### Example + +Convert a given coordinate into a GeoJSON point with specified properties. + +``` +SELECT geoConvert( + '[-104.979127,39.761563]', + 'point', + '{ + "name": "Harper Headquarters" + }' +) +``` diff --git a/site/versioned_docs/version-4.6/getting-started/first-harper-app.md b/site/versioned_docs/version-4.6/getting-started/first-harper-app.md new file mode 100644 index 00000000..d1f52b47 --- /dev/null +++ b/site/versioned_docs/version-4.6/getting-started/first-harper-app.md @@ -0,0 +1,171 @@ +--- +title: Create Your First Application +--- + +# Create Your First Application + +Now that you've set up Harper, let's build a simple API. Harper lets you build powerful APIs with minimal effort. In just a few minutes, you'll have a functional REST API with automatic validation, indexing, and querying—all without writing a single line of code. + +## Setup Your Project + +Start by cloning the Harper application template: + +```bash +git clone https:/github.com/HarperDB/application-template my-app +cd my-app +``` + +## Creating our first Table + +The core of a Harper application is the database, so let's create a database table. + +A quick and expressive way to define a table is through a [GraphQL Schema](https:/graphql.org/learn/schema). Using your editor of choice, edit the file named `schema.graphql` in the root of the application directory, `my-app`, that we created above. To create a table, we will need to add a `type` of `@table` named `Dog` (and you can remove the example table in the template): + +```graphql +type Dog @table { + # properties will go here soon +} +``` + +And then we'll add a primary key named `id` of type `ID`: + +_(Note: A GraphQL schema is a fast method to define tables in Harper, but you are by no means required to use GraphQL to query your application, nor should you necessarily do so)_ + +```graphql +type Dog @table { + id: ID @primaryKey +} +``` + +Now we tell Harper to run this as an application: + +```bash +harperdb dev . # tell Harper cli to run current directory as an application in dev mode +``` + +Harper will now create the `Dog` table and its `id` attribute we just defined. Not only is this an easy way to create a table, but this schema is included in our application, which will ensure that this table exists wherever we deploy this application (to any Harper instance). + +## Adding Attributes to our Table + +Next, let's expand our `Dog` table by adding additional typed attributes for dog `name`, `breed` and `age`. + +```graphql +type Dog @table { + id: ID @primaryKey + name: String + breed: String + age: Int +} +``` + +This will ensure that new records must have these properties with these types. + +Because we ran `harperdb dev .` earlier (dev mode), Harper is now monitoring the contents of our application directory for changes and reloading when they occur. This means that once we save our schema file with these new attributes, Harper will automatically reload our application, read `my-app/schema.graphql` and update the `Dog` table and attributes we just defined. The dev mode will also ensure that any logging or errors are immediately displayed in the console (rather only in the log file). + +As a document database, Harper supports heterogeneous records, so you can freely specify additional properties on any record. If you do want to restrict the records to only defined properties, you can always do that by adding the sealed directive: + +```graphql +type Dog @table @sealed { + id: ID @primaryKey + name: String + breed: String + age: Int + tricks: [String] +} +``` + +## Adding an Endpoint + +Now that we have a running application with a database (with data if you imported any data), let's make this data accessible from a RESTful URL by adding an endpoint. To do this, we simply add the `@export` directive to our `Dog` table: + +```graphql +type Dog @table @export { + id: ID @primaryKey + name: String + breed: String + age: Int + tricks: [String] +} +``` + +By default the application HTTP server port is `9926` (this can be [configured here](../deployments/configuration#http)), so the local URL would be `http:/localhost:9926/Dog/` with a full REST API. We can PUT or POST data into this table using this new path, and then GET or DELETE from it as well (you can even view data directly from the browser). If you have not added any records yet, we could use a PUT or POST to add a record. PUT is appropriate if you know the id, and POST can be used to assign an id: + +```json +POST /Dog/ +Content-Type: application/json + +{ + "name": "Harper", + "breed": "Labrador", + "age": 3, + "tricks": ["sits"] +} +``` + +With this a record will be created and the auto-assigned id will be available through the `Location` header. If you added a record, you can visit the path `/Dog/` to view that record. Alternately, the curl command curl `http:/localhost:9926/Dog/` will achieve the same thing. + +## Authenticating Endpoints + +Now that you've created your first API endpoints, it's important to ensure they're protected. Without authentication, anyone could potentially access, misuse, or overload your APIs, whether by accident or malicious intent. Authentication verifies who is making the request and enables you to control access based on identity, roles, or permissions. It’s a foundational step in building secure, reliable applications. + +Endpoints created with Harper automatically support `Basic`, `Cookie`, and `JWT` authentication methods. See the documentation on [security](../developers/security/) for more information on different levels of access. + +By default, Harper also automatically authorizes all requests from loopback IP addresses (from the same computer) as the superuser, to make it simple to interact for local development. If you want to test authentication/authorization, or enforce stricter security, you may want to disable the [`authentication.authorizeLocal` setting](../deployments/configuration#authentication). + +### Content Negotiation + +These endpoints support various content types, including `JSON`, `CBOR`, `MessagePack` and `CSV`. Simply include an `Accept` header in your requests with the preferred content type. We recommend `CBOR` as a compact, efficient encoding with rich data types, but `JSON` is familiar and great for web application development, and `CSV` can be useful for exporting data to spreadsheets or other processing. + +Harper works with other important standard HTTP headers as well, and these endpoints are even capable of caching interaction: + +``` +Authorization: Basic +Accept: application/cbor +If-None-Match: "etag-id" # browsers can automatically provide this +``` + +## Querying + +Querying your application database is straightforward and easy, as tables exported with the `@export` directive are automatically exposed via [REST endpoints](../developers/rest). Simple queries can be crafted through [URL query parameters](https:/en.wikipedia.org/wiki/Query_string). + +In order to maintain reasonable query speed on a database as it grows in size, it is critical to select and establish the proper indexes. So, before we add the `@export` declaration to our `Dog` table and begin querying it, let's take a moment to target some table properties for indexing. We'll use `name` and `breed` as indexed table properties on our `Dog` table. All we need to do to accomplish this is tag these properties with the `@indexed` directive: + +```graphql +type Dog @table { + id: ID @primaryKey + name: String @indexed + breed: String @indexed + owner: String + age: Int + tricks: [String] +} +``` + +And finally, we'll add the `@export` directive to expose the table as a RESTful endpoint + +```graphql +type Dog @table @export { + id: ID @primaryKey + name: String @indexed + breed: String @indexed + owner: String + age: Int + tricks: [String] +} +``` + +Now we can start querying. Again, we just simply access the endpoint with query parameters (basic GET requests), like: + +``` +http:/localhost:9926/Dog/?name=Harper +http:/localhost:9926/Dog/?breed=Labrador +http:/localhost:9926/Dog/?breed=Husky&name=Balto&select(id,name,breed) +``` + +Congratulations, you now have created a secure database application backend with a table, a well-defined structure, access controls, and a functional REST endpoint with query capabilities! See the [REST documentation for more information on HTTP access](../developers/rest) and see the [Schema reference](../developers/applications/defining-schemas) for more options for defining schemas. + +> Additionally, you may now use GraphQL (over HTTP) to create queries. See the documentation for that new feature [here](../technical-details/reference/graphql). + +## Key Takeaway + +Harper's schema-driven approach means you can build production-ready APIs in minutes, not hours. Start with pure schema definitions to get 90% of your functionality, then add custom code only where needed. This gives you the best of both worlds: rapid development with the flexibility to customize when required. diff --git a/site/versioned_docs/version-4.6/getting-started/harper-concepts.md b/site/versioned_docs/version-4.6/getting-started/harper-concepts.md new file mode 100644 index 00000000..87734062 --- /dev/null +++ b/site/versioned_docs/version-4.6/getting-started/harper-concepts.md @@ -0,0 +1,30 @@ +--- +title: Harper Concepts +--- + +# Harper Concepts + +As you begin your journey with Harper, there are a few concepts and definitions that you should understand. + +## Components + +Harper components are a core Harper concept defined as flexible JavaScript based extensions of the highly extensible core Harper platform. They are executed by Harper directly and have complete access to the Harper [Global APIs](../technical-details/reference/globals) (such as Resource, databases, and tables). + +A key aspect to components are their extensibility; components can be built on other components. For example, a [Harper Application](../developers/applications/) is a component that uses many other components. The [application template](https:/github.com/HarperDB/application-template) demonstrates many of Harper's built-in components such as [rest](../technical-details/reference/components/built-in-extensions#rest) (for automatic REST endpoint generation), [graphqlSchema](../technical-details/reference/components/built-in-extensions#graphqlschema) (for table schema definitions), and many more. + +## Applications + +Applications are a subset of components that cannot be used directly and must depend on other extensions. Examples include defining schemas (using [graphqlSchema](../technical-details/reference/components/built-in-extensions#graphqlschema) built-in extension), defining custom resources (using [jsResource](../technical-details/reference/components/built-in-extensions#jsresource) built-in extension), hosting static files (using [static](../technical-details/reference/components/built-in-extensions#static) built-in extension), enabling REST querying of resources (using [rest](../technical-details/reference/components/built-in-extensions#rest) built-in extension), and running [Next.js](https:/github.com/HarperDB/nextjs), [Astro](https:/github.com/HarperDB/astro), or [Apollo](https:/github.com/HarperDB/apollo) applications through their respective extensions. + +## Resources + +Resources in Harper encompass databases, tables, and schemas that store and structure data within the system. The concept is central to Harper's data management capabilities, with custom resources being enabled by the built-in jsResource extension. Resources represent the data layer of the Harper ecosystem and provide the foundation for data operations across applications built with the platform. + +## Server + +Harper is a multi-protocol server, handling incoming requests from clients and serving data from the data model. Harper supports multiple server protocols, with components for serving REST/HTTP (including Server-Sent Events), MQTT, WebSockets, and the Operations API (and custom server components can be added). Harper uses separate layers for the data model and the servers. The data model, which is defined with resources, can be exported and be used as the source for any of the servers. A single table or other resource can then be accessed and modified through REST, MQTT, SSE, or any other server protocol, for a powerful integrated model with multiple forms of access. +Networking in Harper handles different communication protocols including HTTP, WebSocket, and MQTT, as well as event-driven systems. These networking capabilities enable Harper applications to communicate with other services, receive requests, send responses, and participate in real-time data exchange. The networking layer is fundamental to Harper's functionality as a versatile application platform. + +\_\_ + +As you go through Harper, you will pick up more knowledge of other advanced areas along the way, but with these concepts, you're now ready to create your first application. diff --git a/site/versioned_docs/version-4.6/getting-started/index.md b/site/versioned_docs/version-4.6/getting-started/index.md new file mode 100644 index 00000000..841ff062 --- /dev/null +++ b/site/versioned_docs/version-4.6/getting-started/index.md @@ -0,0 +1,46 @@ +--- +title: Getting Started +--- + +# Getting Started + +If you're new to Harper, this section will guide you through the essential resources you need to get started. + +Follow the steps in this documentation to discover how Harper can simplify your backend stack, eliminate many inter-process communication delays, and achieve a more predictable and performant application experience. + +For more advanced concepts in Harper, see our [blog](https:/www.harpersystems.dev/blog). + +## Harper Basics + +
+
+

+ + Install Harper + +

+

+ Pick the installation method that best suits your environment +

+
+
+

+ + What is Harper + +

+

+ Learn about Harper, how it works, and some of its usecases +

+
+
+

+ + Harper Concepts + +

+

+ Learn about Harper's fundamental concepts and how they interact +

+
+
diff --git a/site/versioned_docs/version-4.6/getting-started/install-harper.md b/site/versioned_docs/version-4.6/getting-started/install-harper.md new file mode 100644 index 00000000..be315672 --- /dev/null +++ b/site/versioned_docs/version-4.6/getting-started/install-harper.md @@ -0,0 +1,134 @@ +--- +title: Install Harper +--- + +# Install Harper + +There are three ways to install a Harper instance: using a package manager like npm, deploying it as a Docker container, and offline installation. Below is a step-by-step tutorial for each method. + +## Installing via NPM + +Before you begin, ensure you have [Node.js](https:/nodejs.org/) LTS version or newer. Node.js comes with npm, which will be used to install Harper. + +Open your terminal or command prompt and install Harper globally by executing the command below. Installing globally allows the `harperdb` command to be accessible from anywhere on your machine, making it easier to manage multiple projects. + +```bash +npm install -g harperdb +``` + +Once the installation finishes, simply start your Harper instance by running the command below in your terminal. + +```bash +harperdb +``` + +This launches Harper as a standalone, where you can define your schemas, endpoints, and application logic within a single integrated environment. The first time you set this up, you will need to set up your Harper destination, username, password, config, and hostname. + +At this point, your local Harper instance is up and running, giving you the ability to develop and test your database applications using your favorite local development tools, including debuggers and version control systems. + +## Installing via Docker + +Using Docker to run Harper is an efficient way to manage a containerized instance that encapsulates all of Harper’s functionality. First, ensure that Docker is installed and running on your system. If it isn’t, download it from the [official Docker website](https:/docs.docker.com/engine/install/) and complete the installation process. + +Next, open your terminal and pull the latest Harper image by running the following command: + +```bash +docker pull harperdb/harperdb +``` + +This command downloads the official Harper image from Docker Hub, ensuring you have the most recent version of the containerized instance. Once the image is downloaded, you can start a new Harper container with the following command: + +```bash +docker run -d -p 9925:9925 harperdb/harperdb +``` + +In this command, the `-d` flag runs the container in detached mode, allowing it to operate in the background, and the `-p 9925:9925` flag maps port 9925 on your local machine to port 9925 within the container, which is Harper’s default port. This port mapping lets you interact with the Harper instance directly from your local environment. + +### How to Use this Image + +[Harper configuration settings⁠](https:/harperdb.io/docs/reference/configuration-file/) can be passed as Docker run environment variables. If no environment variables are provided, Harper will operate with default configuration settings, such as: + +- ROOTPATH=/home/harperdb/hdb +- OPERATIONSAPI_NETWORK_PORT=9925 +- HDB_ADMIN_USERNAME=HDB_ADMIN +- HDB_ADMIN_PASSWORD=password +- LOGGING_STDSTREAMS=true + +These defaults allow you to quickly start an instance, though you can customize your configuration to better suit your needs. + +Containers created from this image store all data and Harper configuration at `/home/harperdb/hdb`. To ensure that your data persists beyond the lifecycle of a container, you should mount this directory to a directory on the container host using a Docker volume. This ensures that your database remains available and your settings are not lost when the container is stopped or removed. + +:::info +Test your Harper instance is up and running by querying `curl http:/localhost:9925/health` +::: + +### Example Deployments + +To run a Harper container in the background with persistent storage and exposed ports, you can use a command like this: + +```bash +docker run -d \ + -v :/home/harperdb/hdb \ + -e HDB_ADMIN_USERNAME=HDB_ADMIN \ + -e HDB_ADMIN_PASSWORD=password \ + -e THREADS=4 \ + -p 9925:9925 \ + -p 9926:9926 \ + harperdb/harperdb +``` + +Here, the `` should be replaced with an actual directory path on your system where you want to store the persistent data. This command also exposes both the Harper Operations API (port 9925) and an additional HTTP port (9926). +For a more advanced setup, enabling HTTPS and clustering, you can run: + +```bash +docker run -d \ + -v :/home/harperdb/hdb \ + -e HDB_ADMIN_USERNAME=HDB_ADMIN \ + -e HDB_ADMIN_PASSWORD=password \ + -e THREADS=4 \ + -e OPERATIONSAPI_NETWORK_PORT=null \ + -e OPERATIONSAPI_NETWORK_SECUREPORT=9925 \ + -e HTTP_SECUREPORT=9926 \ + -e CLUSTERING_ENABLED=true \ + -e CLUSTERING_USER=cluster_user \ + -e CLUSTERING_PASSWORD=password \ + -e CLUSTERING_NODENAME=hdb1 \ + -p 9925:9925 \ + -p 9926:9926 \ + -p 9932:9932 \ + harperdb/harperdb +``` + +In this setup, additional environment variables disable the unsecure Operations API port and enable secure ports for HTTPS, along with clustering parameters such as the clustering user, password, and node name. The port 9932 is also exposed for Harper clustering communication. + +Finally, if you simply wish to check the Harper version using the container, execute: + +```bash +docker run --rm harperdb/harperdb /bin/bash -c "harperdb version" +``` + +This command runs the container momentarily to print the version information, then removes the container automatically when finished. + +### Logs and Troubleshooting + +To verify that the container is running properly, you can check your running containers with: + +```bash +docker ps +``` + +If you want to inspect the logs to ensure that Harper has started correctly, use this command (be sure to replace `` with the actual ID from the previous command): + +```bash +docker logs +``` + +Once verified, you can access your Harper instance by opening your web browser and navigating to http:/localhost:9925 (or the appropriate port based on your configuration). + +### Raw binary installation + +There's a different way to install Harper. You can choose your version and download the npm package and install it directly (you’ll still need Node.js and NPM). Click [this link](https:/products-harperdb-io.s3.us-east-2.amazonaws.com/index.html) to download and install the package. Once you’ve downloaded the .tgz file, run the following command from the directory where you’ve placed it: + +```bash +npm install -g harperdb-X.X.X.tgz harperdb install +``` diff --git a/site/versioned_docs/version-4.6/getting-started/what-is-harper.md b/site/versioned_docs/version-4.6/getting-started/what-is-harper.md new file mode 100644 index 00000000..84071733 --- /dev/null +++ b/site/versioned_docs/version-4.6/getting-started/what-is-harper.md @@ -0,0 +1,59 @@ +--- +title: What is Harper +--- + +# What is Harper + +:::info +[Connect with our team!](https:/www.harpersystems.dev/contact) +::: + +## What is Harper? Performance, Simplicity, and Scale. + +Harper is an all-in-one backend technology that fuses database technologies, caching, application hosting, and messaging functions into a single system. Unlike traditional architectures where each piece runs independently and incurs extra costs and latency from serialization and network operations between processes, Harper systems can handle workloads seamlessly and efficiently. + +Harper simplifies scaling with clustering and native data replication. At scale, architectures tend to include 4 to 16 redundant, geo-distributed nodes located near every user population center. This ensures that every user experiences minimal network latency and maximum reliability in addition to the already rapid server responses. + +![](/img/v4.6/harperstack.jpg) + +## Understanding the Paradigm Shift + +Have you ever combined MongoDB with Redis, Next.js with Postgres, or perhaps Fastify with anything else? The options seem endless. It turns out that the cost of serialization, network hops, and intermediary processes in these systems adds up to 50% of the total system resources used (often more). Not to mention the hundreds of milliseconds of latency they can add. + +What we realized is that networking systems together in this way is inefficient and only necessary because a fused technology did not exist. So, we built Harper, a database fused with a complete JavaScript application system. It’s not only orders of magnitude more performant than separated systems, but it’s also easier to deploy and manage at scale. + +## Build With Harper + +Start by running Harper locally with [npm](https:/www.npmjs.com/package/harperdb) or [Docker](https:/hub.docker.com/r/harperdb/harperdb). + +Since technology tends to be built around the storage, processing, and transfer of data, start by [defining your schema](../developers/applications/#creating-our-first-table) with the `schema.graphql` file in the root of the application directory. + +If you would like to [query](../developers/applications/#adding-an-endpoint) this data, add the `@export` directive to our data schema and test out the [REST](../developers/rest), [MQTT](../developers/real-time#mqtt), or [WebSocket](../developers/real-time#websockets) endpoints. + +When you are ready for something a little more advanced, start [customizing your application](../developers/applications/#custom-functionality-with-javascript). + +Finally, when it’s time to deploy, explore [replication](../developers/replication/) between nodes. + +If you would like to jump into the most advanced capabilities, learn about [components](../technical-details/reference/components/). + +:::warning +Need help? Please don’t hesitate to [reach out](https:/www.harpersystems.dev/contact). +::: + +## Popular Use Cases + +With so much functionality built in, the use cases span nearly all application systems. Some of the most popular are listed below, motivated by new levels of performance and system simplicity. + +### Online Catalogs & Content Delivery + +For use cases like e-commerce, real estate listing, and content-oriented sites, Harper’s breakthroughs in performance and distribution pay dividends in the form of better SEO and higher conversion rates. One common implementation leverages Harper’s [Next.js Component](https:/github.com/HarperDB/nextjs) to host modern, performant frontend applications. Other implementations leverage the built-in caching layer and JavaScript application system to [server-side render pages](https:/www.harpersystems.dev/development/tutorials/server-side-rendering-with-multi-tier-cache) that remain fully responsive because of built-in WebSocket connections. + +### Data Delivery Networks + +For use cases like real-time sports updates, flight tracking, and zero-day software update distribution, Harper is rapidly gaining popularity. Harper’s ability to receive and broadcast messages while simultaneously handling application logic and data storage streamlines operations and eliminates the need for multiple separate systems. To build an understanding of our messaging system function, refer to our [real-time documentation](../developers/real-time). + +### Edge Inference Systems + +Capturing, storing, and processing real-time data streams from client and IoT systems typically requires a stack of technology. Harper’s selective data replication and self-healing connections make for an ideal multi-tier system where edge and cloud systems both run Harper, making everything more performant. + +[We’re happy](https:/www.harpersystems.dev/contact) to walk you through how to do this. diff --git a/site/versioned_docs/version-4.6/index.md b/site/versioned_docs/version-4.6/index.md new file mode 100644 index 00000000..98e5f5d0 --- /dev/null +++ b/site/versioned_docs/version-4.6/index.md @@ -0,0 +1,104 @@ +--- +title: Harper Docs +--- + +# Harper Docs + +:::info +[Connect with our team!](https:/www.harpersystems.dev/contact) +::: + +Welcome to the Harper Documentation! Here, you'll find all things Harper, and everything you need to get started, troubleshoot issues, and make the most of our platform. + +## Getting Started + +
+
+

+ + Install Harper + +

+

+ Pick the installation method that best suits your environment +

+
+
+

+ + What is Harper + +

+

+ Learn about Harper, how it works, and some of its usecases +

+
+
+

+ + Harper Concepts + +

+

+ Learn about Harper's fundamental concepts and how they interact +

+
+
+ +## Building with Harper + +
+
+

+ + Harper Applications + +

+

+ Build your a fully featured Harper Component with custom functionality +

+
+
+

+ + REST Queries + +

+

+ The recommended HTTP interface for data access, querying, and manipulation +

+
+
+

+ + Operations API + +

+

+ Configure, deploy, administer, and control your Harper instance +

+
+
+ +
+
+

+ + Clustering & Replication + +

+

+ The process of connecting multiple Harper databases together to create a database mesh network that enables users to define data replication patterns. +

+
+
+

+ + Explore the Harper Studio + +

+

+ The web-based GUI for Harper. Studio enables you to administer, navigate, and monitor all of your Harper instances in a simple, user friendly interface. +

+
+
diff --git a/site/versioned_docs/version-4.6/technical-details/_category_.json b/site/versioned_docs/version-4.6/technical-details/_category_.json new file mode 100644 index 00000000..69ce80a6 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/_category_.json @@ -0,0 +1,12 @@ +{ + "label": "Technical Details", + "position": 4, + "link": { + "type": "generated-index", + "title": "Technical Details Documentation", + "description": "Reference documentation and technical specifications", + "keywords": [ + "technical-details" + ] + } +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.6/technical-details/reference/analytics.md b/site/versioned_docs/version-4.6/technical-details/reference/analytics.md new file mode 100644 index 00000000..39c92109 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/analytics.md @@ -0,0 +1,117 @@ +--- +title: Analytics +--- + +# Analytics + +Harper provides extensive telemetry and analytics data to help monitor the status of the server and work loads, and to help understand traffic and usage patterns to identify issues and scaling needs, and identify queries and actions that are consuming the most resources. + +Harper collects statistics for all operations, URL endpoints, and messaging topics, aggregating information by thread, operation, resource, and methods, in real-time. These statistics are logged in the `hdb_raw_analytics` and `hdb_analytics` table in the `system` database. + +There are two "levels" of analytics in the Harper analytics table: the first is the immediate level of raw direct logging of real-time statistics. These analytics entries are recorded once a second (when there is activity) by each thread, and include all recorded activity in the last second, along with system resource information. The records have a primary key that is the timestamp in milliseconds since epoch. This can be queried (with `superuser` permission) using the search_by_conditions operation (this will search for 10 seconds worth of analytics) on the `hdb_raw_analytics` table: + +``` +POST http:/localhost:9925 +Content-Type: application/json + +{ + "operation": "search_by_conditions", + "schema": "system", + "table": "hdb_raw_analytics", + "conditions": [{ + "search_attribute": "id", + "search_type": "between", + "search_value": [168859400000, 1688594010000] + }] +} +``` + +And a typical response looks like: + +``` +{ + "time": 1688594390708, + "period": 1000.8336279988289, + "metrics": [ + { + "metric": "bytes-sent", + "path": "search_by_conditions", + "type": "operation", + "median": 202, + "mean": 202, + "p95": 202, + "p90": 202, + "count": 1 + }, + ... + { + "metric": "memory", + "threadId": 2, + "rss": 1492664320, + "heapTotal": 124596224, + "heapUsed": 119563120, + "external": 3469790, + "arrayBuffers": 798721 + }, + { + "metric": "utilization", + "idle": 138227.52767700003, + "active": 70.5066209952347, + "utilization": 0.0005098165086230495 + } + ], + "threadId": 2, + "totalBytesProcessed": 12182820, + "id": 1688594390708.6853 +} +``` + +The second level of analytics recording is aggregate data. The aggregate records are recorded once a minute, and aggregate the results from all the per-second entries from all the threads, creating a summary of statistics once a minute. The ids for these milliseconds since epoch can be queried from the `hdb_analytics` table. You can query these with an operation like: + +``` +POST http:/localhost:9925 +Content-Type: application/json + +{ + "operation": "search_by_conditions", + "schema": "system", + "table": "hdb_analytics", + "conditions": [{ + "search_attribute": "id", + "search_type": "between", + "search_value": [1688194100000, 1688594990000] + }] +} +``` + +And a summary record looks like: + +``` +{ + "period": 60000, + "metric": "bytes-sent", + "method": "connack", + "type": "mqtt", + "median": 4, + "mean": 4, + "p95": 4, + "p90": 4, + "count": 1, + "id": 1688589569646, + "time": 1688589569646 +} +``` + +The following are general resource usage statistics that are tracked: + +- memory - This includes RSS, heap, buffer and external data usage. +- utilization - How much of the time the worker was processing requests. +- mqtt-connections - The number of MQTT connections. + +The following types of information is tracked for each HTTP request: + +- success - How many requests returned a successful response (20x response code). TTFB - Time to first byte in the response to the client. +- transfer - Time to finish the transfer of the data to the client. +- bytes-sent - How many bytes of data were sent to the client. + +Requests are categorized by operation name, for the operations API, by the resource (name) with the REST API, and by command for the MQTT interface. diff --git a/site/versioned_docs/version-4.6/technical-details/reference/architecture.md b/site/versioned_docs/version-4.6/technical-details/reference/architecture.md new file mode 100644 index 00000000..4155d5ff --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/architecture.md @@ -0,0 +1,42 @@ +--- +title: Architecture +--- + +# Architecture + +Harper's architecture consists of resources, which includes tables and user defined data sources and extensions, and server interfaces, which includes the RESTful HTTP interface, operations API, and MQTT. Servers are supported by routing and auth services. + +``` + ┌──────────┐ ┌──────────┐ + │ Clients │ │ Clients │ + └────┬─────┘ └────┬─────┘ + │ │ + ▼ ▼ + ┌────────────────────────────────────────┐ + │ │ + │ Socket routing/management │ + ├───────────────────────┬────────────────┤ + │ │ │ + │ Server Interfaces ─►│ Authentication │ + │ RESTful HTTP, MQTT │ Authorization │ + │ ◄─┤ │ + │ ▲ └────────────────┤ + │ │ │ │ + ├───┼──────────┼─────────────────────────┤ + │ │ │ ▲ │ + │ ▼ Resources ▲ │ ┌───────────┐ │ + │ │ └─┤ │ │ + ├─────────────────┴────┐ │ App │ │ + │ ├─►│ resources │ │ + │ Database tables │ └───────────┘ │ + │ │ ▲ │ + ├──────────────────────┘ │ │ + │ ▲ ▼ │ │ + │ ┌────────────────┐ │ │ + │ │ External │ │ │ + │ │ data sources ├────┘ │ + │ │ │ │ + │ └────────────────┘ │ + │ │ + └────────────────────────────────────────┘ +``` diff --git a/site/versioned_docs/version-4.6/technical-details/reference/blob.md b/site/versioned_docs/version-4.6/technical-details/reference/blob.md new file mode 100644 index 00000000..c747fd28 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/blob.md @@ -0,0 +1,106 @@ +--- +title: Blob +--- + +# Blob + +Blobs are binary large objects that can be used to store any type of unstructured/binary data and is designed for large content. Blobs support streaming and feature better performance for content larger than about 20KB. Blobs are built off the native JavaScript `Blob` type, and HarperDB extends the native `Blob` type for integrated storage with the database. To use blobs, you would generally want to declare a field as a `Blob` type in your schema: + +```graphql +type MyTable { + id: Any! @primaryKey + data: Blob +} +``` + +You can then create a blob which writes the binary data to disk, and can then be included (as a reference) in a record. For example, you can create a record with a blob like: + +```javascript +let blob = await createBlob(largeBuffer); +await MyTable.put({ id: 'my-record', data: blob }); +``` + +The `data` attribute in this example is a blob reference, and can be used like any other attribute in the record, but it is stored separately, and the data must be accessed asynchronously. You can retrieve the blob data with the standard `Blob` methods: + +```javascript +let buffer = await blob.bytes(); +``` + +If you are creating a resource method, you can return a `Response` object with a blob as the body: + +```javascript +export class MyEndpoint extends MyTable { + async get() { + return { + status: 200, + headers: {}, + body: this.data, / this.data is a blob + }); + } +} +``` + +One of the important characteristics of blobs is they natively support asynchronous streaming of data. This is important for both creation and retrieval of large data. When we create a blob with `createBlob`, the returned blob will create the storage entry, but the data will be streamed to storage. This means that you can create a blob from a buffer or from a stream. You can also create a record that references a blob before the blob is fully written to storage. For example, you can create a blob from a stream: + +```javascript +let blob = await createBlob(stream); +/ at this point the blob exists, but the data is still being written to storage +await MyTable.put({ id: 'my-record', data: blob }); +/ we now have written a record that references the blob +let record = await MyTable.get('my-record'); +/ we now have a record that gives us access to the blob. We can asynchronously access the blob's data or stream the data, and it will be available as blob the stream is written to the blob. +let stream = record.data.stream(); +``` + +This can be powerful functionality for large media content, where content can be streamed into storage as it streamed out in real-time to users as it is received. +Alternately, we can also wait for the blob to be fully written to storage before creating a record that references the blob: + +```javascript +let blob = await createBlob(stream); +/ at this point the blob exists, but the data is was not been written to storage +await blob.save(MyTable); +/ we now know the blob is fully written to storage +await MyTable.put({ id: 'my-record', data: blob }); +``` + +Note that this means that blobs are _not_ atomic or [ACID](https:/en.wikipedia.org/wiki/ACID) compliant; streaming functionality achieves the opposite behavior of ACID/atomic writes that would prevent access to data as it is being written. + +### Error Handling + +Because blobs can be streamed and referenced prior to their completion, there is a chance that an error or interruption could occur while streaming data to the blob (after the record is committed). We can create an error handler for the blob to handle the case of an interrupted blob: + +```javascript +export class MyEndpoint extends MyTable { + let blob = this.data; + blob.on('error', () => { + / if this was a caching table, we may want to invalidate or delete this record: + this.invalidate(); + }); + async get() { + return { + status: 200, + headers: {}, + body: blob + }); + } +} +``` + +### Blob `size` + +Blobs that are created from streams may not have the standard `size` property available, because the size may not be known while data is being streamed. Consequently, the `size` property may be undefined until the size is determined. You can listen for the `size` event to be notified when the size is available: + +```javascript +let record = await MyTable.get('my-record'); +let blob = record.data; +blob.size / will be available if it was saved with a known size +let stream blob.stream(); / start streaming the data +if (blob.size === undefined) { + blob.on('size', (size) => { + / will be called once the size is available + }) +} + +``` + +See the [configuration](../../deployments/configuration) documentation for more information on configuring where blob are stored. diff --git a/site/versioned_docs/version-4.6/technical-details/reference/components/applications.md b/site/versioned_docs/version-4.6/technical-details/reference/components/applications.md new file mode 100644 index 00000000..524a7e08 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/components/applications.md @@ -0,0 +1,183 @@ +--- +title: Applications +--- + +# Applications + +> The contents of this page predominantly relate to **application** components. Extensions are not necessarily _deployable_. The ambiguity of the term "components" is being worked on and will be improved in future releases. As we work to clarify the terminology, please keep in mind that the component operations are synonymous with application management. In general, "components" is the general term for both applications and extensions, but in context of the operations API it refers to applications only. + +Harper offers several approaches to managing applications that differ between local development and Harper managed instances. This page will cover the recommended methods of developing, installing, deploying, and running Harper applications. + +## Local Development + +Harper is designed to be simple to run locally. Generally, Harper should be installed locally on a machine using a global package manager install (i.e. `npm i -g harperdb`). + +> Before continuing, ensure Harper is installed and the `harperdb` CLI is available. For more information, review the [installation guide](../../../deployments/install-harper/). + +When developing an application locally there are a number of ways to run it on Harper. + +### `dev` and `run` commands + +The quickest way to run an application is by using the `dev` command within the application directory. + +The `harperdb dev .` command will automatically watch for file changes within the application directory and restart the Harper threads when changes are detected. + +The `dev` command will **not** restart the main thread; if this is a requirement, switch to using `run` instead and manually start/stop the process to execute the main thread. + +Stop execution for either of these processes by sending a SIGINT (generally CTRL/CMD+C) signal to the process. + +### Deploying to a local Harper instance + +Alternatively, to mimic interfacing with a hosted Harper instance, use operation commands instead. + +1. Start up Harper with `harperdb` +1. _Deploy_ the application to the local instance by executing: + + ```sh + harperdb deploy \ + project= \ + package= \ + restart=true + ``` + - Make sure to omit the `target` option so that it _deploys_ to the Harper instance running locally + - The `package=` option creates a symlink to the application simplifying restarts + - By default, the `deploy` operation command will _deploy_ the current directory by packaging it up and streaming the bytes. By specifying `package`, it skips this and references the file path directly + - The `restart=true` option automatically restarts Harper threads after the application is deployed + - If set to `'rolling'`, a rolling restart will be triggered after the application is deployed + +1. In another terminal, use the `harperdb restart` command to restart the instance's threads at any time + - With `package=`, the application source is symlinked so changes will automatically be picked up between restarts + - If `package` was omitted, run the `deploy` command again with any new changes +1. To remove the application use `harperdb drop_component project=` + +Similar to the previous section, if the main thread needs to be restarted, start and stop the Harper instance manually (with the application deployed). Upon Harper startup, the application will automatically be loaded and executed across all threads. + +> Not all [component operations](../../../developers/operations-api/components) are available via CLI. When in doubt, switch to using the Operations API via network requests to the local Harper instance. + +For example, to properly _deploy_ a `test-application` locally, the command would look like: + +```sh +harperdb deploy \ + project=test-application \ + package=/Users/dev/test-application \ + restart=true +``` + +> If the current directory is the application directory, use a shortcut such as `package=$(pwd)` to avoid typing out the complete path. + +Keep in mind that using a local file path for `package` will only work locally; deploying to a remote instance requires a different approach. + +## Remote Management + +Managing applications on a remote Harper instance is best accomplished through [component operations](../../../developers/operations-api/components), similar to using the `deploy` command locally. Before continuing, always backup critical Harper instances. Managing, deploying, and executing applications can directly impact a live system. + +Remote Harper instances work very similarly to local Harper instances. The primary application management operations still include `deploy_component`, `drop_component`, and `restart`. + +The key to remote management is specifying a remote `target` along with appropriate username/password values. These can all be specified using CLI arguments: `target`, `username`, and `password`. Alternatively, the `CLI_TARGET_USERNAME` and `CLI_TARGET_PASSWORD` environment variables can replace the `username` and `password` arguments. + +All together: + +```sh +harperdb deploy \ + project= \ + package= \ + username= \ + password= \ + target= \ + restart=true \ + replicated=true +``` + +Or, using environment variables: + +```sh +export CLI_TARGET_USERNAME= +export CLI_TARGET_PASSWORD= +harperdb deploy \ + project= \ + package= \ + target= \ + restart=true \ + replicated=true +``` + +Unlike local development where `package` should be set to a local file path for symlinking and improved development experience purposes, now it has some additional options. + +A local application can be deployed to a remote instance by **omitting** the `package` field. Harper will automatically package the local directory and include that along with the rest of the deployment operation. + +Furthermore, the `package` field can be set to any valid [npm dependency value](https:/docs.npmjs.com/cli/v11/configuring-npm/package-json#dependencies). + +- For applications deployed to npm, specify the package name: `package="@harperdb/status-check"` +- For applications on GitHub, specify the URL: `package="https:/github.com/HarperDB/status-check"`, or the shorthand `package=HarperDB/status-check` +- Private repositories also work if the correct SSH keys are on the server: `package="git+ssh:/git@github.com:HarperDB/secret-applications.git"` + - Reference the [SSH Key](../../../developers/operations-api/components#add-ssh-key) operations for more information on managing SSH keys on a remote instance +- Even tarball URLs are supported: `package="https:/example.com/application.tar.gz"` + +> When using git tags, we highly recommend that you use the semver directive to ensure consistent and reliable installation by npm. In addition to tags, you can also reference branches or commit numbers. + +These `package` values are all supported because behind-the-scenes, Harper is generating a `package.json` file for the components. Then, it uses a form of `npm install` to resolve them as dependencies. This is why symlinks are generated when specifying a file path locally. The following [Advanced](#advanced) section explores this pattern in more detail. + +Finally, don't forget to include `restart=true`, or run `harperdb restart target=`. + +## Advanced + +The following methods are advanced and should be executed with caution as they can have unintended side-effects. Always backup any critical Harper instances before continuing. + +First, locate the Harper installation `rootPath` directory. Generally, this is `~/hdb`. It can be retrieved by running `harperdb get_configuration` and looking for the `rootPath` field. + +> For a useful shortcut on POSIX compliant machines run: `harperdb get_configuration json=true | jq ".rootPath" | sed 's/"/g'` + +This path is the Harper instance. Within this directory, locate the root config titled `harperdb-config.yaml`, and the components root path. The components root path will be `/components` by default (thus, `~/hdb/components`), but it can also be configured. If necessary, use `harperdb get_configuration` again and look for the `componentsRoot` field for the exact path. + +### Adding components to root + +Similar to how components can specify other components within their `config.yaml`, applications can be added to Harper by adding them to the `harperdb-config.yaml`. + +The configuration is very similar to that of `config.yaml`. Entries are comprised of a top-level `:`, and an indented `package: ` field. Any additional component options can also be included as indented fields. + +```yaml +status-check: + package: '@harperdb/status-check' +``` + +The key difference between this and a component's `config.yaml` is that the name does **not** need to be associated with a `package.json` dependency. When Harper starts up, it transforms these configurations into a `package.json` file, and then executes a form of `npm install`. Thus, the `package: ` can be any valid dependency syntax such as npm packages, GitHub repos, tarballs, and local directories are all supported. + +Given a root config like: + +```yaml +myGithubComponent: + package: HarperDB-Add-Ons/package#v2.2.0 # install from GitHub +myNPMComponent: + package: harperdb # install from npm +myTarBall: + package: /Users/harper/cool-component.tar # install from tarball +myLocal: + package: /Users/harper/local # install from local path +myWebsite: + package: https:/harperdb-component # install from URL +``` + +Harper will generate a `package.json` like: + +```json +{ + "dependencies": { + "myGithubComponent": "github:HarperDB-Add-Ons/package#v2.2.0", + "myNPMComponent": "npm:harperdb", + "myTarBall": "file:/Users/harper/cool-component.tar", + "myLocal": "file:/Users/harper/local", + "myWebsite": "https:/harperdb-component" + } +} +``` + +npm will install all the components and store them in ``. A symlink back to `/node_modules` is also created for dependency resolution purposes. + +The package prefix is automatically added, however you can manually set it in your package reference. + +```yaml +myCoolComponent: + package: file:/Users/harper/cool-component.tar +``` + +By specifying a file path, npm will generate a symlink and then changes will be automatically picked up between restarts. diff --git a/site/versioned_docs/version-4.6/technical-details/reference/components/built-in-extensions.md b/site/versioned_docs/version-4.6/technical-details/reference/components/built-in-extensions.md new file mode 100644 index 00000000..ad2b8ff2 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/components/built-in-extensions.md @@ -0,0 +1,188 @@ +--- +title: Built-In Extensions +--- + +# Built-In Extensions + +Harper provides extended features using built-in extensions. They do **not** need to be installed with a package manager, and simply must be specified in a config to run. These are used throughout many Harper docs, guides, and examples. Unlike custom extensions which have their own semantic versions, built-in extensions follow Harper's semantic version. + +For more information read the [Components, Applications, and Extensions](../../../developers/applications/) documentation section. + +- [Built-In Extensions](#built-in-extensions) + - [fastifyRoutes](#fastifyroutes) + - [graphql](#graphql) + - [graphqlSchema](#graphqlschema) + - [jsResource](#jsresource) + - [loadEnv](#loadenv) + - [rest](#rest) + - [roles](#roles) + - [static](#static) + +## dataLoader + +Load data from JSON or YAML files into Harper tables as part of component deployment. + +This component is an [Extension](..#extensions) and can be configured with the `files` configuration option. + +Complete documentation for this feature is available here: [Data Loader](../../../developers/applications/data-loader) + +```yaml +dataLoader: + files: 'data/*.json' +``` + +## fastifyRoutes + +Specify custom endpoints using [Fastify](https:/fastify.dev/). + +This component is a [Resource Extension](./extensions#resource-extension) and can be configured with the [`files` and `urlPath`](./extensions#resource-extension-configuration) configuration options. + +Complete documentation for this feature is available here: [Define Fastify Routes](../../../developers/applications/define-routes) + +```yaml +fastifyRoutes: + files: 'routes/*.js' +``` + +## graphql + +> GraphQL querying is **experimental**, and only partially implements the GraphQL Over HTTP / GraphQL specifications. + +Enables GraphQL querying via a `/graphql` endpoint loosely implementing the GraphQL Over HTTP specification. + +Complete documentation for this feature is available here: [GraphQL](../graphql) + +```yaml +graphql: true +``` + +## graphqlSchema + +Specify schemas for Harper tables and resources via GraphQL schema syntax. + +This component is a [Resource Extension](./extensions#resource-extension) and can be configured with the [`files` and `urlPath`](./extensions#resource-extension-configuration) configuration options. + +Complete documentation for this feature is available here: [Defining Schemas](../../../developers/applications/defining-schemas) + +```yaml +graphqlSchema: + files: 'schemas.graphql' +``` + +## jsResource + +Specify custom, JavaScript based Harper resources. + +Refer to the Application [Custom Functionality with JavaScript](../../../developers/applications/#custom-functionality-with-javascript) guide, or [Resource Class](../resources/) reference documentation for more information on custom resources. + +This component is a [Resource Extension](./extensions#resource-extension) and can be configured with the [`files` and `urlPath`](./extensions#resource-extension-configuration) configuration options. + +```yaml +jsResource: + files: 'resource.js' +``` + +## loadEnv + +Load environment variables via files like `.env`. + +This component is a [Resource Extension](./extensions#resource-extension) and can be configured with the [`files` and `urlPath`](./extensions#resource-extension-configuration) configuration options. + +Ensure this component is specified first in `config.yaml` so that environment variables are loaded prior to loading any other components. + +```yaml +loadEnv: + files: '.env' +``` + +This component matches the default behavior of dotenv where existing variables take precedence. Specify the `override` option in order to override existing environment variables assigned to `process.env`: + +```yaml +loadEnv: + files: '.env' + override: true +``` + +> Important: Harper is a single process application. Environment variables are loaded onto `process.env` and will be shared throughout all Harper components. This means environment variables loaded by one component will be available on other components (as long as the components are loaded in the correct order). + + + + + + + + + +## rest + +Enable automatic REST endpoint generation for exported resources with this component. + +Complete documentation for this feature is available here: [REST](../../../developers/rest) + +```yaml +rest: true +``` + +This component contains additional options: + +To enable `Last-Modified` header support: + +```yaml +rest: + lastModified: true +``` + +To disable automatic WebSocket support: + +```yaml +rest: + webSocket: false +``` + +## roles + +Specify roles for Harper tables and resources. + +This component is a [Resource Extension](./extensions#resource-extension) and can be configured with the [`files` and `urlPath`](./extensions#resource-extension-configuration) configuration options. + +Complete documentation for this feature is available here: [Defining Roles](../../../developers/applications/defining-roles) + +```yaml +roles: + files: 'roles.yaml' +``` + +## static + +Specify files to serve statically from the Harper HTTP endpoint. + +Use the [Resource Extension](./extensions#resource-extension) configuration options [`files` and `urlPath`](./extensions#resource-extension-configuration) to specify the files to be served. + +As specified by Harper's Resource Extension docs, the `files` option can be any glob pattern or a glob options object. This extension will serve all files matching the pattern, so make sure to be specific. + +To serve the entire `web` directory, specify `files: 'web/**'`. + +To serve only the html files within `web`, specify `files: 'web/*.html'` or `files: 'web/**/*.html'`. + +The `urlPath` option is the base URL path entries will be resolved to. For example, a `urlPath: 'static'` will serve all files resolved from `files` to the URL path `localhost/static/`. + +Given the `config.yaml`: + +```yaml +static: + files: 'web/*.html' + urlPath: 'static' +``` + +And the file directory structure: + +``` +component/ +├─ web/ +│ ├─ index.html +│ ├─ blog.html +├─ config.yaml + +``` + +The HTML files will be available at `localhost/static/index.html` and `localhost/static/blog.html` respectively. diff --git a/site/versioned_docs/version-4.6/technical-details/reference/components/configuration.md b/site/versioned_docs/version-4.6/technical-details/reference/components/configuration.md new file mode 100644 index 00000000..08fa4cc2 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/components/configuration.md @@ -0,0 +1,89 @@ +--- +title: Component Configuration +--- + +# Component Configuration + +> For information on the distinction between the types of components (applications and extensions), refer to beginning of the [Applications](../../../developers/applications) documentation section. + +Harper components are configured with a `config.yaml` file located in the root of the component module directory. This file is how an components configures other components it depends on. Each entry in the file starts with a component name, and then configuration values are indented below it. + +```yaml +name: + option-1: value + option-2: value +``` + +It is the entry's `name` that is used for component resolution. It can be one of the [built-in extensions](./built-in-extensions), or it must match a package dependency of the component as specified by `package.json`. The [Custom Component Configuration](#custom-component-configuration) section provides more details and examples. + +For some built-in extensions they can be configured with as little as a top-level boolean; for example, the [rest](./built-in-extensions#rest) extension can be enabled with just: + +```yaml +rest: true +``` + +Most components generally have more configuration options. Some options are ubiquitous to the Harper platform, such as the `files` and `urlPath` options for an [extension](./extensions) or [plugin](./plugins), or `package` for any [custom component](#custom-component-configuration). + +[Extensions](./extensions) and [plugins](./plugins) require specifying the `extensionModule` or `pluginModule` option respectively. Refer to their respective API reference documentation for more information. + +## Custom Component Configuration + +Any custom component **must** be configured with the `package` option in order for Harper to load that component. When enabled, the name of package must match a dependency of the component. For example, to use the `@harperdb/nextjs` extension, it must first be included in `package.json`: + +```json +{ + "dependencies": { + "@harperdb/nextjs": "1.0.0" + } +} +``` + +Then, within `config.yaml` it can be enabled and configured using: + +```yaml +'@harperdb/nextjs': + package: '@harperdb/nextjs' + # ... +``` + +Since npm allows for a [variety of dependency configurations](https:/docs.npmjs.com/cli/configuring-npm/package-json#dependencies), this can be used to create custom references. For example, to depend on a specific GitHub branch, first update the `package.json`: + +```json +{ + "dependencies": { + "harper-nextjs-test-feature": "HarperDB/nextjs#test-feature" + } +} +``` + +And now in `config.yaml`: + +```yaml +harper-nextjs-test-feature: + package: '@harperdb/nextjs' + files: './' + # ... +``` + +## Default Component Configuration + +Harper components do not need to specify a `config.yaml`. Harper uses the following default configuration to load components. + +```yaml +rest: true +graphqlSchema: + files: '*.graphql' +roles: + files: 'roles.yaml' +jsResource: + files: 'resources.js' +fastifyRoutes: + files: 'routes/*.js' + urlPath: '.' +static: + files: 'web/**' +``` + +Refer to the [built-in components](./built-in-extensions) documentation for more information on these fields. + +If a `config.yaml` is defined, it will **not** be merged with the default config. diff --git a/site/versioned_docs/version-4.6/technical-details/reference/components/extensions.md b/site/versioned_docs/version-4.6/technical-details/reference/components/extensions.md new file mode 100644 index 00000000..b2a613b1 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/components/extensions.md @@ -0,0 +1,187 @@ +--- +title: Extensions API +--- + +# Extensions API + +> As of Harper v4.6, a new iteration of the extension API was released called **Plugins**. They are simultaneously a simplification and an extensibility upgrade. Plugins are **experimental**, but we encourage developers to consider developing with the [plugin API](./plugins) instead of the extension API. In time we plan to deprecate the concept of extensions in favor of plugins, but for now, both are supported. + +There are two key types of Extensions: **Resource Extension** and **Protocol Extensions**. The key difference is a **Protocol Extensions** can return a **Resource Extension**. + +Furthermore, what defines an extension separately from a component is that it leverages any of the [Resource Extension](#resource-extension-api) or [Protocol Extension](#protocol-extension-api) APIs. + +All extensions must define a `config.yaml` file and declare an `extensionModule` option. This must be a path to the extension module source code. The path must resolve from the root of the module directory. + +For example, the [Harper Next.js Extension](https:/github.com/HarperDB/nextjs) `config.yaml` specifies `extensionModule: ./extension.js`. + +If the plugin is being written in something other than JavaScript (such as TypeScript), ensure that the path resolves to the built version, (i.e. `extensionModule: ./dist/index.js`) + +## Resource Extension + +A Resource Extension is for processing a certain type of file or directory. For example, the built-in [jsResource](./built-in-extensions#jsresource) extension handles executing JavaScript files. + +Resource Extensions are comprised of four distinct function exports, [`handleFile()`](#handlefilecontents-urlpath-absolutepath-resources-void--promisevoid), [`handleDirectory()`](#handledirectoryurlpath-absolutepath-resources-boolean--void--promiseboolean--void), [`setupFile()`](#setupfilecontents-urlpath-absolutepath-resources-void--promisevoid), and [`setupDirectory()`](#setupdirectoryurlpath-absolutepath-resources-boolean--void--promiseboolean--void). The `handleFile()` and `handleDirectory()` methods are executed on **all worker threads**, and are _executed again during restarts_. The `setupFile()` and `setupDirectory()` methods are only executed **once** on the **main thread** during the initial system start sequence. + +> Keep in mind that the CLI command `harperdb restart` or CLI argument `restart=true` only restarts the worker threads. If a component is deployed using `harperdb deploy`, the code within the `setupFile()` and `setupDirectory()` methods will not be executed until the system is completely shutdown and turned back on. + +Other than their execution behavior, the `handleFile()` and `setupFile()` methods, and `handleDirectory()` and `setupDirectory()` methods have identical function definitions (arguments and return value behavior). + +### Resource Extension Configuration + +Any [Resource Extension](#resource-extension) can be configured with the `files` and `urlPath` options. These options control how _files_ and _directories_ are resolved in order to be passed to the extension's `handleFile()`, `setupFile()`, `handleDirectory()`, and `setupDirectory()` methods. + +> Harper relies on the [fast-glob](https:/github.com/mrmlnc/fast-glob) library for glob pattern matching. + +- **files** - `string | string[] | Object` - _required_ - A [glob pattern](https:/github.com/mrmlnc/fast-glob?tab=readme-ov-file#pattern-syntax) string, array of glob pattern strings, or a more expressive glob options object determining the set of files and directories to be resolved for the extension. If specified as an object, the `source` property is required. By default, Harper **matches files and directories**; this is configurable using the `only` option. + - **source** - `string | string[]` - _required_ - The glob pattern string or array of strings. + - **only** - `'all' | 'files' | 'directories'` - _optional_ - The glob pattern will match only the specified entry type. Defaults to `'all'`. + - **ignore** - `string[]` - _optional_ - An array of glob patterns to exclude from matches. This is an alternative way to use negative patterns. Defaults to `[]`. +- **urlPath** - `string` - _optional_ - A base URL path to prepend to the resolved `files` entries. + - If the value starts with `./`, such as `'./static/'`, the component name will be included in the base url path + - If the value is `.`, then the component name will be the base url path + - Note: `..` is an invalid pattern and will result in an error + - Otherwise, the value here will be base url path. Leading and trailing `/` characters will be handled automatically (`/static/`, `/static`, and `static/` are all equivalent to `static`) + +For example, to configure the [static](./built-in-extensions#static) component to serve all HTML files from the `web` source directory on the `static` URL endpoint: + +```yaml +static: + files: 'web/*.html' + urlPath: 'static' +``` + +If there are files such as `web/index.html` and `web/blog.html`, they would be available at `localhost/static/index.html` and `localhost/static/blog.html` respectively. + +Furthermore, if the component is located in the `test-component` directory, and the `urlPath` was set to `'./static/'` instead, then the files would be served from `localhost/test-component/static/*` instead. + +The `urlPath` is optional, for example to configure the [graphqlSchema](./built-in-extensions#graphqlschema) component to load all schemas within the `src/schema` directory, only specifying a `files` glob pattern is required: + +```yaml +graphqlSchema: + files: 'src/schema/*.schema' +``` + +The `files` option also supports a more complex options object. These additional fields enable finer control of the glob pattern matching. + +For example, to match files within `web`, and omit any within the `web/images` directory, the configuration could be: + +```yaml +static: + files: + source: 'web/**/*' + ignore: ['web/images'] +``` + +In order to match only files: + +```yaml +test-component: + files: + source: 'dir/**/*' + only: 'files' +``` + +### Resource Extension API + +In order for an extension to be classified as a Resource Extension it must implement at least one of the `handleFile()`, `handleDirectory()`, `setupFile()`, or `setupDirectory()` methods. As a standalone extension, these methods should be named and exported directly. For example: + +```js +/ ESM +export function handleFile() {} +export function setupDirectory() {} + +/ or CJS +function handleDirectory() {} +function setupFile() {} + +module.exports = { handleDirectory, setupFile }; +``` + +When returned by a [Protocol Extension](#protocol-extension), these methods should be defined on the object instead: + +```js +export function start() { + return { + handleFile() {}, + }; +} +``` + +#### `handleFile(contents, urlPath, absolutePath, resources): void | Promise` + +#### `setupFile(contents, urlPath, absolutePath, resources): void | Promise` + +These methods are for processing individual files. They can be async. + +> Remember! +> +> `setupFile()` is executed **once** on the **main thread** during the main start sequence. +> +> `handleFile()` is executed on **worker threads** and is executed again during restarts. + +Parameters: + +- **contents** - `Buffer` - The contents of the file +- **urlPath** - `string` - The recommended URL path of the file +- **absolutePath** - `string` - The absolute path of the file + +- **resources** - `Object` - A collection of the currently loaded resources + +Returns: `void | Promise` + +#### `handleDirectory(urlPath, absolutePath, resources): boolean | void | Promise` + +#### `setupDirectory(urlPath, absolutePath, resources): boolean | void | Promise` + +These methods are for processing directories. They can be async. + +If the function returns or resolves a truthy value, then the component loading sequence will end and no other entries within the directory will be processed. + +> Remember! +> +> `setupFile()` is executed **once** on the **main thread** during the main start sequence. +> +> `handleFile()` is executed on **worker threads** and is executed again during restarts. + +Parameters: + +- **urlPath** - `string` - The recommended URL path of the directory +- **absolutePath** - `string` - The absolute path of the directory + +- **resources** - `Object` - A collection of the currently loaded resources + +Returns: `boolean | void | Promise` + +## Protocol Extension + +A Protocol Extension is a more advanced form of a Resource Extension and is mainly used for implementing higher level protocols. For example, the [Harper Next.js Extension](https:/github.com/HarperDB/nextjs) handles building and running a Next.js project. A Protocol Extension is particularly useful for adding custom networking handlers (see the [`server`](../globals#server) global API documentation for more information). + +### Protocol Extension Configuration + +In addition to the `files` and `urlPath` [Resource Extension configuration](#resource-extension-configuration) options, and the `package` [Custom Component configuration](#custom-component-configuration) option, Protocol Extensions can also specify additional configuration options. Any options added to the extension configuration (in `config.yaml`), will be passed through to the `options` object of the `start()` and `startOnMainThread()` methods. + +For example, the [Harper Next.js Extension](https:/github.com/HarperDB/nextjs#options) specifies multiple option that can be included in its configuration. For example, a Next.js app using `@harperdb/nextjs` may specify the following `config.yaml`: + +```yaml +'@harperdb/nextjs': + package: '@harperdb/nextjs' + files: './' + prebuilt: true + dev: false +``` + +Many protocol extensions will use the `port` and `securePort` options for configuring networking handlers. Many of the [`server`](../globals#server) global APIs accept `port` and `securePort` options, so components replicated this for simpler pass-through. + +### Protocol Extension API + +A Protocol Extension is made up of two distinct methods, [`start()`](#startoptions-resourceextension--promiseresourceextension) and [`startOnMainThread()`](#startonmainthreadoptions-resourceextension--promiseresourceextension). Similar to a Resource Extension, the `start()` method is executed on _all worker threads_, and _executed again on restarts_. The `startOnMainThread()` method is **only** executed **once** during the initial system start sequence. These methods have identical `options` object parameter, and can both return a Resource Extension (i.e. an object containing one or more of the methods listed above). + +#### `start(options): ResourceExtension | Promise` + +#### `startOnMainThread(options): ResourceExtension | Promise` + +Parameters: + +- **options** - `Object` - An object representation of the extension's configuration options. + +Returns: `Object` - An object that implements any of the [Resource Extension APIs](#resource-extension-api) diff --git a/site/versioned_docs/version-4.6/technical-details/reference/components/index.md b/site/versioned_docs/version-4.6/technical-details/reference/components/index.md new file mode 100644 index 00000000..d3cd7214 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/components/index.md @@ -0,0 +1,39 @@ +--- +title: Components +--- + +# Components + +**Components** are the high-level concept for modules that extend the Harper core platform adding additional functionality. Components encapsulate both applications and extensions. + +> We are actively working to disambiguate the terminology. When you see "component", such as in the Operations API or CLI, it generally refers to an application. We will do our best to clarify exactly which classification of a component whenever possible. + +**Applications** are best defined as the implementation of a specific user-facing feature or functionality. Applications are built on top of extensions and can be thought of as the end product that users interact with. For example, a Next.js application that serves a web interface or an Apollo GraphQL server that provides a GraphQL API are both applications. + +**Extensions** are the building blocks of the Harper component system. Applications depend on extensions to provide the functionality the application is implementing. For example, the built-in `graphqlSchema` extension enables applications to define their databases and tables using GraphQL schemas. Furthermore, the `@harperdb/nextjs` and `@harperdb/apollo` extensions are the building blocks that provide support for building Next.js and Apollo applications. + +> As of Harper v4.6, a new, **experimental** component system has been introduced called **plugins**. Plugins are a **new iteration of the existing extension system**. They are simultaneously a simplification and an extensibility upgrade. Instead of defining multiple methods (`start` vs `startOnMainThread`, `handleFile` vs `setupFile`, `handleDirectory` vs `setupDirectory`), plugins only have to define a single `handleApplication` method. Plugins are **experimental**, and complete documentation is available on the [plugin API](./plugins) page. In time we plan to deprecate the concept of extensions in favor of plugins, but for now, both are supported. + +All together, the support for implementing a feature is the extension, and the actual implementation of the feature is the application. + +For more information on the differences between applications and extensions, refer to the beginning of the [Applications](../../../developers/applications/) guide documentation section. + +This technical reference section has detailed information on various component systems: + +- [Built-In Extensions](./built-in-extensions) +- [Configuration](./configuration) +- [Managing Applications](./applications) +- [Extensions](./extensions) +- [(Experimental) Plugins](./plugins) + +## Custom Applications + +- [`@harperdb/status-check`](https:/github.com/HarperDB/status-check) +- [`@harperdb/prometheus-exporter`](https:/github.com/HarperDB/prometheus-exporter) +- [`@harperdb/acl-connect`](https:/github.com/HarperDB/acl-connect) + +## Custom Extensions + +- [`@harperdb/nextjs`](https:/github.com/HarperDB/nextjs) +- [`@harperdb/apollo`](https:/github.com/HarperDB/apollo) +- [`@harperdb/astro`](https:/github.com/HarperDB/astro) diff --git a/site/versioned_docs/version-4.6/technical-details/reference/components/plugins.md b/site/versioned_docs/version-4.6/technical-details/reference/components/plugins.md new file mode 100644 index 00000000..8bfbdd25 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/components/plugins.md @@ -0,0 +1,619 @@ +--- +title: Experimental Plugins +--- + +# Experimental Plugins + +The new, experimental **plugin** API is an iteration of the existing extension system. It simplifies the API by removing the need for multiple methods (`start`, `startOnMainThread`, `handleFile`, `setupFile`, etc.) and instead only requires a single `handleApplication` method. Plugins are designed to be more extensible and easier to use, and they are intended to replace the concept of extensions in the future. + +Similar to the existing extension API, a plugin must specify an `pluginModule` option within `config.yaml`. This must be a path to the plugin module source code. The path must resolve from the root of the module directory. For example: `pluginModule: plugin.js`. + +If the plugin is being written in something other than JavaScript (such as TypeScript), ensure that the path resolves to the built version, (i.e. `pluginModule: ./dist/index.js`) + +It is also recommended that all extensions have a `package.json` that specifies JavaScript package metadata such as name, version, type, etc. Since plugins are just JavaScript packages, they can do anything a JavaScript package can normally do. It can be written in TypeScript, and compiled to JavaScript. It can export an executable (using the [bin](https:/docs.npmjs.com/cli/configuring-npm/package-json#bin) property). It can be published to npm. The possibilities are endless! + +The key to a plugin is the [`handleApplication()`](#function-handleapplicationscope-scope-void--promisevoid) method. It must be exported by the `pluginModule`, and cannot coexist with any of the other extension methods such as `start`, `handleFile`, etc. The component loader will throw an error if both are defined. + +The `handleApplication()` method is executed **sequentially** across all **worker threads** during the component loading sequence. It receives a single, `scope` argument that contains all of the relevant metadata and APIs for interacting with the associated component. + +The method can be async and it is awaited by the component loader. + +However, it is highly recommended to avoid event-loop-blocking operations within the `handleApplication()` method. See the examples section for best practices on how to use the `scope` argument effectively. + +## Configuration + +As plugins are meant to be used by applications in order to implement some feature, many plugins provide a variety of configuration options to customize their behavior. Some plugins even require certain configuration options to be set in order to function properly. + +As a brief overview, the general configuration options available for plugins are: + +- **files** - `string` | `string[]` | [`FilesOptionsObject`](#interface-filesoptionsobject) - _optional_ - A glob pattern string or array of strings that specifies the files and directories to be handled by the plugin's default `EntryHandler` instance. +- **urlPath** - `string` - _optional_ - A base URL path to prepend to the resolved `files` entries handled by the plugin's default `EntryHandler` instance. +- **timeout** - `number` - _optional_ - The timeout in milliseconds for the plugin's operations. If not specified, the system default is **30 seconds**. Plugins may override the system default themselves, but this configuration option is the highest priority and takes precedence. + +### File Entries + +Just like extensions, plugins support the `files` and `urlPath` options for file entry matching. The values specified for these options are used for the default `EntryHandler` instance created by the `scope.handleEntry()` method. As the reference documentation details, similar options can be used to create custom `EntryHandler` instances too. + +The `files` option can be a glob pattern string, an array of glob pattern strings, or a more expressive glob options object. + +- The patterns **cannot** contain `..` or start with `/`. +- The pattern `.` or `./` is transformed into `**/*` automatically. +- Often, it is best to omit a leading `.` or `./` in the glob pattern. + +The `urlPath` option is a base URL path that is prepended to the resolved `files` entries. + +- It **cannot** contain `..`. +- If it starts with `./` or is just `.`, the name of the plugin will be automatically prepended to it. + +Putting this all together, to configure the [static](./built-in-extensions#static) built-in extension to serve files from the `web` directory but at the `/static/` path, the `config.yaml` would look like this: + +```yaml +static: + files: 'web/**/*' + urlPath: '/static/' +``` + +Keep in mind the `urlPath` option is completely optional. + +As another example, to configure the [graphqlSchema](./built-in-extensions#graphqlschema) built-in extension to serve only `*.graphql` files from within the top-level of the `src/schema` directory, the `config.yaml` would look like this: + +```yaml +graphqlSchema: + files: 'src/schema/*.graphql' +``` + +As detailed, the `files` option also supports a more complex object syntax for advanced use cases. + +For example, to match files within the `web` directory, and omit any within `web/images`, you can use a configuration such as: + +```yaml +static: + files: + source: 'web/**/*' + ignore: 'web/images/**' +``` + +> If you're transitioning from the [extension](./extensions) system, the `files` option object no longer supports an `only` field. Instead, use the `entryEvent.entryType` or the specific `entryEvent.eventType` fields in [`onEntryEventHandler(entryEvent)`](#function-onentryeventhandlerentryevent-fileentryevent--directoryentryevent-void) method or any of the specific [`EntryHandler`](#class-entryhandler) events. + +### Timeouts + +The default timeout for all plugins is **30 seconds**. If the method does not complete within this time, the component loader will throw an error and unblock the component loading sequence. This is to prevent the component loader from hanging indefinitely if a plugin fails to respond or takes too long to execute. + +The plugin module can export a `defaultTimeout` variable (in milliseconds) that will override the system default. + +For example: + +```typescript +export const defaultTimeout = 60_000; / 60 seconds +``` + +Additionally, users can specify a `timeout` option in their application's `config.yaml` file for a specific plugin. This option takes precedence over the plugin's `defaultTimeout` and the system default. + +For example: + +```yaml +customPlugin: + package: '@harperdb/custom-plugin' + files: 'foo.js' + timeout: 45_000 # 45 seconds +``` + +## Example: Statically hosting files + +This is a functional example of how the `handleApplication()` method and `scope` argument can be used to create a simple static file server plugin. This example assumes that the component has a `config.yaml` with the `files` option set to a glob pattern that matches the files to be served. + +> This is a simplified form of the [static](./built-in-extensions#static) built-in extension. + +```js +export function handleApplication(scope) { + const staticFiles = new Map(); + + scope.options.on('change', (key, value, config) => { + if (key[0] === 'files' || key[0] === 'urlPath') { + / If the files or urlPath options change, we need to reinitialize the static files map + staticFiles.clear(); + logger.info(`Static files reinitialized due to change in ${key.join('.')}`); + } + }); + + scope.handleEntry((entry) => { + if (entry.entryType === 'directory') { + logger.info(`Cannot serve directories. Update the files option to only match files.`); + return; + } + + switch (entry.eventType) { + case 'add': + case 'change': + / Store / Update the file contents in memory for serving + staticFiles.set(entry.urlPath, entry.contents); + break; + case 'unlink': + / Remove the file from memory when it is deleted + staticFiles.delete(entry.urlPath); + break; + } + }); + + scope.server.http( + (req, next) => { + if (req.method !== 'GET') return next(req); + + / Attempt to retrieve the requested static file from memory + const staticFile = staticFiles.get(req.pathname); + + return staticFile + ? { + statusCode: 200, + body: staticFile, + } + : { + statusCode: 404, + body: 'File not found', + }; + }, + { runFirst: true } + ); +} +``` + +In this example, the entry handler method passed to `handleEntry` will manage the map of static files in memory using their computed `urlPath` and the `contents`. If the config file changes (and thus a new default file or url path is specified) the plugin will clear the file map as well to remove artifacts. Furthermore, it uses the `server.http()` middleware to hook into the HTTP request handling. + +This example is heavily simplified, but it demonstrates how the different key parts of `scope` can be used together to provide a performant and reactive application experience. + +## Function: `handleApplication(scope: Scope): void | Promise` + +Parameters: + +- **scope** - [`Scope`](#class-scope) - An instance of the `Scope` class that provides access to the relative application's configuration, resources, and other APIs. + +Returns: `void | Promise` + +This is the only method a plugin module must export. It can be async and is awaited by the component loader. The `scope` argument provides access to the relative application's configuration, resources, and other APIs. + +## Class: `Scope` + +- Extends [`EventEmitter`](https:/nodejs.org/docs/latest/api/events.html#class-eventemitter) + +### Event: `'close'` + +Emitted after the scope is closed via the `close()` method. + +### Event: `'error'` + +- **error** - `unknown` - The error that occurred. + +### Event: `'ready'` + +Emitted when the Scope is ready to be used after loading the associated config file. It is awaited by the component loader, so it is not necessary to await it within the `handleApplication()` method. + +### `scope.close()` + +Returns: `this` - The current `Scope` instance. + +Closes all associated entry handlers, the associated `scope.options` instance, emits the `'close'` event, and then removes all other listeners on the instance. + +### `scope.handleEntry([files][, handler])` + +Parameters: + +- **files** - [`FilesOption`](#interface-filesoption) | [`FileAndURLPathConfig`](#interface-fileandurlpathconfig) | [`onEntryEventHandler`](#function-onentryeventhandlerentryevent-fileentryevent--directoryentryevent-void) - _optional_ +- **handler** - [`onEntryEventHandler`](#function-onentryeventhandlerentryevent-fileentryevent--directoryentryevent-void) - _optional_ + +Returns: [`EntryHandler`](#class-entryhandler) - An instance of the `EntryHandler` class that can be used to handle entries within the scope. + +The `handleEntry()` method is the key to handling file system entries specified by a `files` glob pattern option in `config.yaml`. This method is used to register an entry event handler, specifically for the `EntryHandler` [`'all'`](#event-all) event. The method signature is very flexible, and allows for the following variations: + +- `scope.handleEntry()` (with no arguments) Returns the default `EntryHandler` created by the `files` and `urlPath` options in the `config.yaml`. +- `scope.handleEntry(handler)` (where `handler` is an `onEntryEventHandler`) Returns the default `EntryHandler` instance (based on the options within `config.yaml`) and uses the provided `handler` for the [`'all'`](#event-all) event. +- `scope.handleEntry(files)` (where `files` is `FilesOptions` or `FileAndURLPathConfig`) Returns a new `EntryHandler` instance that handles the specified `files` configuration. +- `scope.handleEntry(files, handler)` (where `files` is `FilesOptions` or `FileAndURLPathConfig`, and `handler` is an `onEntryEventHandler`) Returns a new `EntryHandler` instance that handles the specified `files` configuration and uses the provided `handler` for the [`'all'`](#event-all) event. + +For example: + +```js +export function handleApplication(scope) { + / Get the default EntryHandler instance + const defaultEntryHandler = scope.handleEntry(); + + / Assign a handler for the 'all' event on the default EntryHandler + scope.handleEntry((entry) => { + /* ... */ + }); + + / Create a new EntryHandler for the 'src/**/*.js' files option with a custom `'all'` event handler. + const customEntryHandler = scope.handleEntry( + { + files: 'src/**/*.js', + }, + (entry) => { + /* ... */ + } + ); + + / Create another custom EntryHandler for the 'src/**/*.ts' files option, but without a `'all'` event handler. + const anotherCustomEntryHandler = scope.handleEntry({ + files: 'src/**/*.ts', + }); +} +``` + +And thus, if the previous code was used by a component with the following `config.yaml`: + +```yaml +customPlugin: + files: 'web/**/*' +``` + +Then the default `EntryHandler` instances would be created to handle all entries within the `web` directory. + +### `scope.requestRestart()` + +Returns: `void` + +Request a Harper restart. This **does not** restart the instance immediately, but rather indicates to the user that a restart is required. This should be called when the plugin cannot handle the entry event and wants to indicate to the user that the Harper instance should be restarted. + +This method is called automatically by the `scope` instance if the user has not defined an `scope.options.on('change')` handler or if an event handler exists and is missing a necessary handler method. + +### `scope.resources` + +Returns: `Map` - A map of the currently loaded [Resource](../globals#resource) instances. + +### `scope.server` + +Returns: `server` - A reference to the [server](../globals#server) global API. + +### `scope.options` + +Returns: [`OptionsWatcher`](#class-optionswatcher) - An instance of the `OptionsWatcher` class that provides access to the application's configuration options. Emits `'change'` events when the respective plugin part of the component's config file is modified. + +For example, if the plugin `customPlugin` is configured by an application with: + +```yaml +customPlugin: + files: 'foo.js' +``` + +And has the following `handleApplication(scope)` implementation: + +```typescript +export function handleApplication(scope) { + scope.options.on('change', (key, value, config) => { + if (key[0] === 'files') { + / Handle the change in the files option + scope.logger.info(`Files option changed to: ${value}`); + } + }); +} +``` + +Then modifying the `files` option in the `config.yaml` to `bar.js` would log the following: + +```plaintext +Files option changed to: bar.js +``` + +### `scope.logger` + +Returns: `logger` - A scoped instance of the [`logger`](../globals#logger) class that provides logging capabilities for the plugin. + +It is recommended to use this instead of the `logger` global. + +### `scope.name` + +Returns: `string` - The name of the plugin as configured in the `config.yaml` file. This is the key under which the plugin is configured. + +### `scope.directory` + +Returns: `string` - The directory of the application. This is the root directory of the component where the `config.yaml` file is located. + +## Interface: `FilesOption` + +- `string` | `string[]` | [`FilesOptionsObject`](#interface-filesoptionsobject) + +## Interface: `FilesOptionsObject` + +- **source** - `string` | `string[]` - _required_ - The glob pattern string or array of strings. +- **ignore** - `string` | `string[]` - _optional_ - An array of glob patterns to exclude from matches. This is an alternative way to use negative patterns. Defaults to `[]`. + +## Interface: `FileAndURLPathConfig` + +- **files** - [`FilesOption`](#interface-filesoption) - _required_ - A glob pattern string, array of glob pattern strings, or a more expressive glob options object determining the set of files and directories to be resolved for the plugin. +- **urlPath** - `string` - _optional_ - A base URL path to prepend to the resolved `files` entries. + +## Class: `OptionsWatcher` + +- Extends [`EventEmitter`](https:/nodejs.org/docs/latest/api/events.html#class-eventemitter) + +### Event: `'change'` + +- **key** - `string[]` - The key of the changed option split into parts (e.g. `foo.bar` becomes `['foo', 'bar']`). +- **value** - [`ConfigValue`](#interface-configvalue) - The new value of the option. +- **config** - [`ConfigValue`](#interface-configvalue) - The entire configuration object of the plugin. + +The `'change'` event is emitted whenever an configuration option is changed in the configuration file relative to the application and respective plugin. + +Given an application using the following `config.yaml`: + +```yaml +customPlugin: + files: 'web/**/*' +otherPlugin: + files: 'index.js' +``` + +The `scope.options` for the respective plugin's `customPlugin` and `otherPlugin` would emit `'change'` events when the `files` options relative to them are modified. + +For example, if the `files` option for `customPlugin` is changed to `web/**/*.js`, the following event would be emitted _only_ within the `customPlugin` scope: + +```js +scope.options.on('change', (key, value, config) => { + key; / ['files'] + value; / 'web/**/*.js' + config; / { files: 'web/**/*.js' } +}); +``` + +### Event: `'close'` + +Emitted when the `OptionsWatcher` is closed via the `close()` method. The watcher is not usable after this event is emitted. + +### Event: `'error'` + +- **error** - `unknown` - The error that occurred. + +### Event: `'ready'` + +- **config** - [`ConfigValue`](#interface-configvalue) | `undefined` - The configuration object of the plugin, if present. + +This event can be emitted multiple times. It is first emitted upon the initial load, but will also be emitted after restoring a configuration file or configuration object after a `'remove'` event. + +### Event: `'remove'` + +The configuration was removed. This can happen if the configuration file was deleted, the configuration object within the file is deleted, or if the configuration file fails to parse. Once restored, the `'ready'` event will be emitted again. + +### `options.close()` + +Returns: `this` - The current `OptionsWatcher` instance. + +Closes the options watcher, removing all listeners and preventing any further events from being emitted. The watcher is not usable after this method is called. + +### `options.get(key)` + +Parameters: + +- **key** - `string[]` - The key of the option to get, split into parts (e.g. `foo.bar` is represented as `['foo', 'bar']`). + +Returns: [`ConfigValue`](#interface-configvalue) | `undefined` + +If the config is defined it will attempt to retrieve the value of the option at the specified key. If the key does not exist, it will return `undefined`. + +### `options.getAll()` + +Returns: [`ConfigValue`](#interface-configvalue) | `undefined` + +Returns the entire configuration object for the plugin. If the config is not defined, it will return `undefined`. + +### `options.getRoot()` + +Returns: [`Config`](#interface-config) | `undefined` + +Returns the root configuration object of the application. This is the entire configuration object, basically the parsed form of the `config.yaml`. If the config is not defined, it will return `undefined`. + +### Interface: `Config` + +- `[key: string]` [`ConfigValue`](#interface-configvalue) + +An object representing the `config.yaml` file configuration. + +### Interface: `ConfigValue` + +- `string` | `number` | `boolean` | `null` | `undefined` | `ConfigValue[]` | [`Config`](#interface-config) + +Any valid configuration value type. Essentially, the primitive types, an array of those types, or an object comprised of values of those types. + +## Class: `EntryHandler` + +Extends: [`EventEmitter`](https:/nodejs.org/docs/latest/api/events.html#class-eventemitter) + +Created by calling [`scope.handleEntry()`](#scopehandleentry) method. + +### Event: `'all'` + +- **entry** - [`FileEntry`](#interface-fileentry) | [`DirectoryEntry`](#interface-directoryentry) - The entry that was added, changed, or removed. + +The `'all'` event is emitted for all entry events, including file and directory events. This is the event that the handler method in `scope.handleEntry` is registered for. The event handler receives an `entry` object that contains the entry metadata, such as the file contents, URL path, and absolute path. + +An effective pattern for this event is: + +```js +async function handleApplication(scope) { + scope.handleEntry((entry) => { + switch (entry.eventType) { + case 'add': + / Handle file addition + break; + case 'change': + / Handle file change + break; + case 'unlink': + / Handle file deletion + break; + case 'addDir': + / Handle directory addition + break; + case 'unlinkDir': + / Handle directory deletion + break; + } + }); +} +``` + +### Event: `'add'` + +- **entry** - [`AddFileEvent`](#interface-addfileevent) - The file entry that was added. + +The `'add'` event is emitted when a file is created (or the watcher sees it for the first time). The event handler receives an `AddFileEvent` object that contains the file contents, URL path, absolute path, and other metadata. + +### Event: `'addDir'` + +- **entry** - [`AddDirEvent`](#interface-adddirevent) - The directory entry that was added. + +The `'addDir'` event is emitted when a directory is created (or the watcher sees it for the first time). The event handler receives an `AddDirEvent` object that contains the URL path and absolute path of the directory. + +### Event: `'change'` + +- **entry** - [`ChangeFileEvent`](#interface-changefileevent) - The file entry that was changed. + +The `'change'` event is emitted when a file is modified. The event handler receives a `ChangeFileEvent` object that contains the updated file contents, URL path, absolute path, and other metadata. + +### Event: `'close'` + +Emitted when the entry handler is closed via the [`entryHandler.close()`](#entryhandlerclose) method. + +### Event: `'error'` + +- **error** - `unknown` - The error that occurred. + +### Event: `'ready'` + +Emitted when the entry handler is ready to be used. This is not automatically awaited by the component loader, but also is not required. Calling `scope.handleEntry()` is perfectly sufficient. This is generally useful if you need to do something _after_ the entry handler is absolutely watching and handling entries. + +### Event: `'unlink'` + +- **entry** - [`UnlinkFileEvent`](#interface-unlinkfileevent) - The file entry that was deleted. + +The `'unlink'` event is emitted when a file is deleted. The event handler receives an `UnlinkFileEvent` object that contains the URL path and absolute path of the deleted file. + +### Event: `'unlinkDir'` + +- **entry** - [`UnlinkDirEvent`](#interface-unlinkdirevent) - The directory entry that was deleted. + +The `'unlinkDir'` event is emitted when a directory is deleted. The event handler receives an `UnlinkDirEvent` object that contains the URL path and absolute path of the deleted directory. + +### `entryHandler.name` + +Returns: `string` - The name of the plugin as configured in the `config.yaml` file. This is the key under which the plugin is configured. + +The name of the plugin. + +### `entryHandler.directory` + +Returns: `string` + +The directory of the application. This is the root directory of the component where the `config.yaml` file is located. + +### `entryHandler.close()` + +Returns: `this` - The current `EntryHandler` instance. + +Closes the entry handler, removing all listeners and preventing any further events from being emitted. The handler can be started again using the [`entryHandler.update()`](#entryhandlerupdateconfig) method. + +### `entryHandler.update(config)` + +Parameters: + +- **config** - [`FilesOption`](#interface-filesoption) | [`FileAndURLPathConfig`](#interface-fileandurlpathconfig) - The configuration object for the entry handler. + +This method will update an existing entry handler to watch new entries. It will close the underlying watcher and create a new one, but will maintain any existing listeners on the EntryHandler instance itself. + +This method returns a promise associated with the ready event of the updated handler. + +### Interface: `BaseEntry` + +- **stats** - [`fs.Stats`](https:/nodejs.org/docs/latest/api/fs.html#class-fsstats) | `undefined` - The file system stats for the entry. +- **urlPath** - `string` - The recommended URL path of the entry. +- **absolutePath** - `string` - The absolute path of the entry. + +The foundational entry handle event object. The `stats` may or may not be present depending on the event, entry type, and platform. + +The `urlPath` is resolved based on the configured pattern (`files:` option) combined with the optional `urlPath` option. This path is generally useful for uniquely representing the entry. It is used in the built-in components such as `jsResource` and `static`. + +The `absolutePath` is the file system path for the entry. + +### Interface: `FileEntry` + +Extends [`BaseEntry`](#interface-baseentry) + +- **contents** - `Buffer` - The contents of the file. + +A specific extension of the `BaseEntry` interface representing a file entry. We automatically read the contents of the file so the user doesn't have to bother with FS operations. + +There is no `DirectoryEntry` since there is no other important metadata aside from the `BaseEntry` properties. If a user wants the contents of a directory, they should adjust the pattern to resolve files instead. + +### Interface: `EntryEvent` + +Extends [`BaseEntry`](#interface-baseentry) + +- **eventType** - `string` - The type of entry event. +- **entryType** - `string` - The type of entry, either a file or a directory. + +A general interface representing the entry handle event objects. + +### Interface: `AddFileEvent` + +Extends [`EntryEvent`](#interface-entryevent), [FileEntry](#interface-fileentry) + +- **eventType** - `'add'` +- **entryType** - `'file'` + +Event object emitted when a file is created (or the watcher sees it for the first time). + +### Interface: `ChangeFileEvent` + +Extends [`EntryEvent`](#interface-entryevent), [FileEntry](#interface-fileentry) + +- **eventType** - `'change'` +- **entryType** - `'file'` + +Event object emitted when a file is modified. + +### Interface: `UnlinkFileEvent` + +Extends [`EntryEvent`](#interface-entryevent), [FileEntry](#interface-fileentry) + +- **eventType** - `'unlink'` +- **entryType** - `'file'` + +Event object emitted when a file is deleted. + +### Interface: `FileEntryEvent` + +- `AddFileEvent` | `ChangeFileEvent` | `UnlinkFileEvent` + +A union type representing the file entry events. These events are emitted when a file is created, modified, or deleted. The `FileEntry` interface provides the file contents and other metadata. + +### Interface: `AddDirEvent` + +Extends [`EntryEvent`](#interface-entryevent) + +- **eventType** - `'addDir'` +- **entryType** - `'directory'` + +Event object emitted when a directory is created (or the watcher sees it for the first time). + +### Interface: `UnlinkDirEvent` + +Extends [`EntryEvent`](#interface-entryevent) + +- **eventType** - `'unlinkDir'` +- **entryType** - `'directory'` + +Event object emitted when a directory is deleted. + +### Interface: `DirectoryEntryEvent` + +- `AddDirEvent` | `UnlinkDirEvent` + +A union type representing the directory entry events. There are no change events for directories since they are not modified in the same way as files. + +### Function: `onEntryEventHandler(entryEvent: FileEntryEvent | DirectoryEntryEvent): void` + +Parameters: + +- **entryEvent** - [`FileEntryEvent`](#interface-fileentryevent) | [`DirectoryEntryEvent`](#interface-directoryentryevent) + +Returns: `void` + +This function is what is passed to the `scope.handleEntry()` method as the handler for the `'all'` event. This is also applicable to a custom `.on('all', handler)` method for any `EntryHandler` instance. diff --git a/site/versioned_docs/version-4.6/technical-details/reference/content-types.md b/site/versioned_docs/version-4.6/technical-details/reference/content-types.md new file mode 100644 index 00000000..d7567f7f --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/content-types.md @@ -0,0 +1,29 @@ +--- +title: Content Types +--- + +# Content Types + +Harper supports several different content types (or MIME types) for both HTTP request bodies (describing operations) as well as for serializing content into HTTP response bodies. Harper follows HTTP standards for specifying both request body content types and acceptable response body content types. Any of these content types can be used with any of the standard Harper operations. + +For request body content, the content type should be specified with the `Content-Type` header. For example with JSON, use `Content-Type: application/json` and for CBOR, include `Content-Type: application/cbor`. To request that the response body be encoded with a specific content type, use the `Accept` header. If you want the response to be in JSON, use `Accept: application/json`. If you want the response to be in CBOR, use `Accept: application/cbor`. + +The following content types are supported: + +## JSON - application/json + +JSON is the most widely used content type, and is relatively readable and easy to work with. However, JSON does not support all the data types that are supported by Harper, and can't be used to natively encode data types like binary data or explicit Maps/Sets. Also, JSON is not as efficient as binary formats. When using JSON, compression is recommended (this also follows standard HTTP protocol with the `Accept-Encoding` header) to improve network transfer performance (although there is server performance overhead). JSON is a good choice for web development and when standard JSON types are sufficient and when combined with compression and debuggability/observability is important. + +## CBOR - application/cbor + +CBOR is a highly efficient binary format, and is a recommended format for most production use cases with Harper. CBOR supports the full range of Harper data types, including binary data, typed dates, and explicit Maps/Sets. CBOR is very performant and space efficient even without compression. Compression will still yield better network transfer size/performance, but compressed CBOR is generally not any smaller than compressed JSON. CBOR also natively supports streaming for optimal performance (using indefinite length arrays). The CBOR format has excellent standardization and Harper's CBOR provides an excellent balance of performance and size efficiency. + +## MessagePack - application/x-msgpack + +MessagePack is another efficient binary format like CBOR, with support for all Harper data types. MessagePack generally has wider adoption than CBOR and can be useful in systems that don't have CBOR support (or good support). However, MessagePack does not have native support for streaming of arrays of data (for query results), and so query results are returned as a (concatenated) sequence of MessagePack objects/maps. MessagePack decoders used with Harper's MessagePack must be prepared to decode a direct sequence of MessagePack values to properly read responses. + +## Comma-separated Values (CSV) - text/csv + +Comma-separated values is an easy to use and understand format that can be readily imported into spreadsheets or used for data processing. CSV lacks hierarchical structure for most data types, and shouldn't be used for frequent/production use, but when you need it, it is available. + +In addition, with the REST interface, you can use file-style extensions to indicate an encoding like http:/host/path.csv to indicate CSV encoding. See the [REST documentation](../../developers/rest) for more information on how to do this. diff --git a/site/versioned_docs/version-4.6/technical-details/reference/data-types.md b/site/versioned_docs/version-4.6/technical-details/reference/data-types.md new file mode 100644 index 00000000..0262d25f --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/data-types.md @@ -0,0 +1,60 @@ +--- +title: Data Types +--- + +# Data Types + +Harper supports a rich set of data types for use in records in databases. Various data types can be used from both direct JavaScript interfaces in Custom Functions and the HTTP operations APIs. Using JSON for communication naturally limits the data types to those available in JSON (Harper’s supports all of JSON data types), but JavaScript code and alternate data formats facilitate the use of additional data types. Harper supports MessagePack and CBOR, which allows for all of Harper supported data types. [Schema definitions can specify the expected types for fields, with GraphQL Schema Types](../../developers/applications/defining-schemas), which are used for validation of incoming typed data (JSON, MessagePack), and is used for auto-conversion of untyped data (CSV, [query parameters](../../developers/rest)). Available data types include: + +(Note that these labels are descriptive, they do not necessarily correspond to the GraphQL schema type names, but the schema type names are noted where possible) + +## Boolean + +true or false. The GraphQL schema type name is `Boolean`. + +## String + +Strings, or text, are a sequence of any unicode characters and are internally encoded with UTF-8. The GraphQL schema type name is `String`. + +## Number + +Numbers can be stored as signed integers up to a 1000 bits of precision (about 300 digits) or floating point with 64-bit floating point precision, and numbers are automatically stored using the most optimal type. With JSON, numbers are automatically parsed and stored in the most appropriate format. Custom components and applications may use BigInt numbers to store/access integers that are larger than 53-bit. The following GraphQL schema type name are supported: + +- `Float` - Any number that can be represented with [64-bit double precision floating point number](https:/en.wikipedia.org/wiki/Double-precision_floating-point_format) ("double") +- `Int` - Any integer between from -2147483648 to 2147483647 +- `Long` - Any integer between from -9007199254740992 to 9007199254740992 +- `BigInt` - Any integer (negative or positive) with less than 300 digits + +Note that `BigInt` is a distinct and separate type from standard numbers in JavaScript, so custom code should handle this type appropriately. + +## Object/Map + +Objects, or maps, that hold a set named properties can be stored in Harper. When provided as JSON objects or JavaScript objects, all property keys are stored as strings. The order of properties is also preserved in Harper’s storage. Duplicate property keys are not allowed (they are dropped in parsing any incoming data). + +## Array + +Arrays hold an ordered sequence of values and can be stored in Harper. There is no support for sparse arrays, although you can use objects to store data with numbers (converted to strings) as properties. + +## Null + +A null value can be stored in Harper property values as well. + +## Date + +Dates can be stored as a specific data type. This is not supported in JSON, but is supported by MessagePack and CBOR. Custom Functions can also store and use Dates using JavaScript Date instances. The GraphQL schema type name is `Date`. + +## Binary Data + +Binary data can be stored in property values as well, with two different data types that are available: + +### Bytes + +JSON doesn’t have any support for encoding binary data, but MessagePack and CBOR support binary data in data structures, and this will be preserved in HarperDB. Custom Functions can also store binary data by using NodeJS’s Buffer or Uint8Array instances to hold the binary data. The GraphQL schema type name is `Bytes`. + +### Blobs + +Binary data can also be stored with [`Blob`s](./blob), which can scale much better for larger content than `Bytes`, as it is designed to be streamed and does not need to be held entirely in memory. It is recommended that `Blob`s are used for content larger than 20KB. + +## Explicit Map/Set + +Explicit instances of JavaScript Maps and Sets can be stored and preserved in Harper as well. This can’t be represented with JSON, but can be with CBOR. diff --git a/site/versioned_docs/version-4.6/technical-details/reference/dynamic-schema.md b/site/versioned_docs/version-4.6/technical-details/reference/dynamic-schema.md new file mode 100644 index 00000000..2ea72c1e --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/dynamic-schema.md @@ -0,0 +1,148 @@ +--- +title: Dynamic Schema +--- + +# Dynamic Schema + +When tables are created without any schema, through the operations API (without specifying attributes) or studio, the tables follow "dynamic-schema" behavior. Generally it is best-practice to define schemas for your tables to ensure predictable, consistent structures with data integrity and precise control over indexing, without dependency on data itself. However, it can often be simpler and quicker to simply create a table and let the data auto-generate the schema dynamically with everything being auto-indexed for broad querying. + +With dynamic schemas individual attributes are reflexively created as data is ingested, meaning the table will adapt to the structure of data ingested. Harper tracks the metadata around schemas, tables, and attributes allowing for describe table, describe schema, and describe all operations. + +### Databases + +Harper databases hold a collection of tables together in a single file that are transactionally connected. This means that operations across tables within a database can be performed in a single atomic transaction. By default tables are added to the default database called "data", but other databases can be created and specified for tables. + +### Tables + +Harper tables group records together with a common data pattern. To create a table users must provide a table name and a primary key. + +- **Table Name**: Used to identify the table. +- **Primary Key**: This is a required attribute that serves as the unique identifier for a record and is also known as the `hash_attribute` in Harper operations API. + +## Primary Key + +The primary key (also referred to as the `hash_attribute`) is used to uniquely identify records. Uniqueness is enforced on the primary; inserts with the same primary key will be rejected. If a primary key is not provided on insert, a GUID will be automatically generated and returned to the user. The [Harper Storage Algorithm](./storage-algorithm) utilizes this value for indexing. + +**Standard Attributes** + +With tables that are using dynamic schemas, additional attributes are reflexively added via insert and update operations (in both SQL and NoSQL) when new attributes are included in the data structure provided to Harper. As a result, schemas are additive, meaning new attributes are created in the underlying storage algorithm as additional data structures are provided. Harper offers `create_attribute` and `drop_attribute` operations for users who prefer to manually define their data model independent of data ingestion. When new attributes are added to tables with existing data the value of that new attribute will be assumed `null` for all existing records. + +**Audit Attributes** + +Harper automatically creates two audit attributes used on each record if the table is created without a schema. + +- `__createdtime__`: The time the record was created in [Unix Epoch with milliseconds](https:/www.epochconverter.com/) format. +- `__updatedtime__`: The time the record was updated in [Unix Epoch with milliseconds](https:/www.epochconverter.com/) format. + +### Dynamic Schema Example + +To better understand the behavior let’s take a look at an example. This example utilizes [Harper API operations](../../developers/operations-api/databases-and-tables). + +**Create a Database** + +```bash +{ + "operation": "create_database", + "schema": "dev" +} +``` + +**Create a Table** + +Notice the schema name, table name, and primary key name are the only required parameters. + +```bash +{ + "operation": "create_table", + "database": "dev", + "table": "dog", + "primary_key": "id" +} +``` + +At this point the table does not have structure beyond what we provided, so the table looks like this: + +**dev.dog** + +![](/img/v4.6/reference/dynamic_schema_2_create_table.png.webp) + +**Insert Record** + +To define attributes we do not need to do anything beyond sending them in with an insert operation. + +```bash +{ + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + {"id": 1, "dog_name": "Penny", "owner_name": "Kyle"} + ] +} +``` + +With a single record inserted and new attributes defined, our table now looks like this: + +**dev.dog** + +![](/img/v4.6/reference/dynamic_schema_3_insert_record.png.webp) + +Indexes have been automatically created for `dog_name` and `owner_name` attributes. + +**Insert Additional Record** + +If we continue inserting records with the same data schema no schema updates are required. One record will omit the hash attribute from the insert to demonstrate GUID generation. + +```bash +{ + "operation": "insert", + "database": "dev", + "table": "dog", + "records": [ + {"id": 2, "dog_name": "Monk", "owner_name": "Aron"}, + {"dog_name": "Harper","owner_name": "Stephen"} + ] +} +``` + +In this case, there is no change to the schema. Our table now looks like this: + +**dev.dog** + +![](/img/v4.6/reference/dynamic_schema_4_insert_additional_record.png.webp) + +**Update Existing Record** + +In this case, we will update a record with a new attribute not previously defined on the table. + +```bash +{ + "operation": "update", + "database": "dev", + "table": "dog", + "records": [ + {"id": 2, "weight_lbs": 35} + ] +} +``` + +Now we have a new attribute called `weight_lbs`. Our table now looks like this: + +**dev.dog** + +![](/img/v4.6/reference/dynamic_schema_5_update_existing_record.png.webp) + +**Query Table with SQL** + +Now if we query for all records where `weight_lbs` is `null` we expect to get back two records. + +```bash +{ + "operation": "sql", + "sql": "SELECT * FROM dev.dog WHERE weight_lbs IS NULL" +} +``` + +This results in the expected two records being returned. + +![](/img/v4.6/reference/dynamic_schema_6_query_table_with_sql.png.webp) diff --git a/site/versioned_docs/version-4.6/technical-details/reference/globals.md b/site/versioned_docs/version-4.6/technical-details/reference/globals.md new file mode 100644 index 00000000..70763cde --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/globals.md @@ -0,0 +1,327 @@ +--- +title: Globals +--- + +# Globals + +The primary way that JavaScript code can interact with Harper is through the global variables, which has several objects and classes that provide access to the tables, server hooks, and resources that Harper provides for building applications. As global variables, these can be directly accessed in any module. + +These global variables are also available through the `harperdb` module/package, which can provide better typing in TypeScript. To use this with your own directory, make sure you link the package to your current `harperdb` installation: + +```bash +npm link harperdb +``` + +The `harperdb` package is automatically linked for all installed components. Once linked, if you are using EcmaScript module syntax you can import function from `harperdb` like: + +```javascript +import { tables, Resource } from 'harperdb'; +``` + +Or if you are using CommonJS format for your modules: + +```javascript +const { tables, Resource } = require('harperdb'); +``` + +The global variables include: + +## `tables` + +This is an object that holds all the tables for the default database (called `data`) as properties. Each of these property values is a table class that subclasses the Resource interface and provides access to the table through the Resource interface. For example, you can get a record from a table (in the default database) called 'my-table' with: + +```javascript +import { tables } from 'harperdb'; +const { MyTable } = tables; +async function getRecord() { + let record = await MyTable.get(recordId); +} +``` + +It is recommended that you [define a database](../../developers/applications/defining-schemas) for all the tables that are required to exist in your application. This will ensure that the tables exist on the `tables` object. Also note that the property names follow a CamelCase convention for use in JavaScript and in the GraphQL Schemas, but these are translated to snake_case for the actual table names, and converted back to CamelCase when added to the `tables` object. + +## `databases` + +This is an object that holds all the databases in Harper, and can be used to explicitly access a table by database name. Each database will be a property on this object, each of these property values will be an object with the set of all tables in that database. The default database, `databases.data` should equal the `tables` export. For example, if you want to access the "dog" table in the "dev" database, you could do so: + +```javascript +import { databases } from 'harperdb'; +const { Dog } = databases.dev; +``` + +## `Resource` + +This is the base class for all resources, including tables and external data sources. This is provided so that you can extend it to implement custom data source providers. See the [Resource API documentation](./resources/) for more details about implementing a Resource class. + +## `auth(username, password?): Promise` + +This returns the user object with permissions/authorization information based on the provided username. If a password is provided, the password will be verified before returning the user object (if the password is incorrect, an error will be thrown). + +## `logger` + +This provides methods `trace`, `debug`, `info`, `warn`, `error`, `fatal`, and `notify` for logging. See the [logging documentation](../../administration/logging/standard-logging) for more information. + +## `server` + +The `server` global object provides a number of functions and objects to interact with Harper's HTTP, networking, and authentication services. + +### `server.http(listener: RequestListener, options: HttpOptions): HttpServer[]` + +Alias: `server.request` + +Add a handler method to the HTTP server request listener middleware chain. + +Returns an array of server instances based on the specified `options.port` and `options.securePort`. + +Example: + +```js +server.http( + (request, next) => { + return request.url === '/graphql' ? handleGraphQLRequest(request) : next(request); + }, + { + runFirst: true, / run this handler first + } +); +``` + +#### `RequestListener` + +Type: `(request: Request, next: RequestListener) => Promise` + +The HTTP request listener to be added to the middleware chain. To continue chain execution pass the `request` to the `next` function such as `return next(request);`. + +### `Request` and `Response` + +The `Request` and `Response` classes are based on the WHATWG APIs for the [`Request`](https:/developer.mozilla.org/en-US/docs/Web/API/Request) and [`Response`](https:/developer.mozilla.org/en-US/docs/Web/API/Response) classes. Requests and responses are based on these standard-based APIs to facilitate reuse with modern web code. While Node.js' HTTP APIs are powerful low-level APIs, the `Request`/`Response` APIs provide excellent composability characteristics, well suited for layered middleware and for clean mapping to [RESTful method handlers](./resources/) with promise-based responses, as well as interoperability with other standards-based APIs like [streams](https:/developer.mozilla.org/en-US/docs/Web/API/ReadableStream) used with [`Blob`s](https:/developer.mozilla.org/en-US/docs/Web/API/Blob). However, the Harper implementation of these classes is not a direct implementation of the WHATWG APIs, but implements additional/distinct properties for the the Harper server environment: + +#### `Request` + +A `Request` object is passed to the direct static REST handlers, and preserved as the context for instance methods, and has the following properties: + +- `url` - This is the request target, which is the portion of the URL that was received by the server. If a client sends a request to `http:/example.com:8080/path?query=string`, the actual received request is `GET /path?query=string` and the `url` property will be `/path?query=string`. +- `method` - This is the HTTP method of the request. This is a string like `GET`, `POST`, `PUT`, `DELETE`, etc. +- `headers` - This is a [`Headers`](https:/developer.mozilla.org/en-US/docs/Web/API/Headers) object that contains the headers of the request. +- `pathname` - This is the path portion of the URL, without the query string. For example, if the URL is `/path?query=string`, the `pathname` will be `/path`. +- `protocol` - This is the protocol of the request, like `http` or `https`. +- `data` - This is the deserialized body of the request (based on the type of data specified by `Content-Type` header). +- `ip` - This is the remote IP address of the client that made the request (or the remote IP address of the last proxy to connect to Harper). +- `host` - This is the host of the request, like `example.com`. +- `sendEarlyHints(link: string, headers?: object): void` - This method sends an early hints response to the client, prior to actually returning a response. This is useful for sending a link header to the client to indicate that another resource should be preloaded. The `headers` argument can be used to send additional headers with the early hints response, in addition to the `link`. This is generally most helpful in a cache resolution function, where you can send hints _if_ the data is not in the cache and is resolving from an origin: + +```javascript +class Origin { + async get(request) { + / if we are fetching data from origin, send early hints + this.getContext().requestContext.sendEarlyHints(''); + let response = await fetch(request); + ... + } +} +Cache.sourcedFrom(Origin); +``` + +- `login(username, password): Promise` - This method can be called to start an authenticated session. The login will authenticate the user by username and password. If the authentication was successful, a session will be created and a cookie will be set on the response header that references the session. All subsequent requests from the client that sends the cookie in requests will be authenticated as the user that logged in and the session record will be attached to the request. This method returns a promise that resolves when the login is successful, and rejects if the login is unsuccessful. +- `session` - This is the session object that is associated with current cookie-maintained session. This object is used to store session data for the current session. This is `Table` record instance, and can be updated by calling `request.session.update({ key: value })` or session can be retrieved with `request.session.get()`. If the cookie has not been set yet, a cookie will be set the first time a session is updated or a login occurs. +- `_nodeRequest` - This is the underlying Node.js [`http.IncomingMessage`](https:/nodejs.org/api/http.html#http_class_http_incomingmessage) object. This can be used to access the raw request data, such as the raw headers, raw body, etc. However, this is discouraged and should be used with caution since it will likely break any other server handlers that depends on the layered `Request` call with `Response` return pattern. +- `_nodeResponse` - This is the underlying Node.js [`http.ServerResponse`](https:/nodejs.org/api/http.html#http_class_http_serverresponse) object. This can be used to access the raw response data, such as the raw headers. Again, this is discouraged and can cause problems for middleware, should only be used if you are certain that other server handlers will not attempt to return a different `Response` object. + +#### `Response` + +REST methods can directly return data that is serialized and returned to users, or it can return a `Response` object (or a promise to a `Response`), or it can return a `Response`-like object with the following properties (or again, a promise to it): + +- `status` - This is the HTTP status code of the response. This is a number like `200`, `404`, `500`, etc. +- `headers` - This is a [`Headers`](https:/developer.mozilla.org/en-US/docs/Web/API/Headers) object that contains the headers of the response. +- `data` - This is the data to be returned of the response. This will be serialized using Harper's content negotiation. +- `body` - Alternately (to `data`), the raw body can be returned as a `Buffer`, string, stream (Node.js or [`ReadableStream`](https:/developer.mozilla.org/en-US/docs/Web/API/ReadableStream)), or a [`Blob`](https:/developer.mozilla.org/en-US/docs/Web/API/Blob). + +#### `HttpOptions` + +Type: `Object` + +Properties: + +- `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` +- `port` - _optional_ - `number` - Specify which HTTP server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` +- `securePort` - _optional_ - `number` - Specify which HTTPS server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` + +#### `HttpServer` + +Node.js [`http.Server`](https:/nodejs.org/api/http.html#class-httpserver) or [`https.SecureServer`](https:/nodejs.org/api/https.html#class-httpsserver) instance. + +### `server.socket(listener: ConnectionListener, options: SocketOptions): SocketServer` + +Creates a socket server on the specified `options.port` or `options.securePort`. + +Only one socket server will be created. A `securePort` takes precedence. + +#### `ConnectionListener` + +Node.js socket server connection listener as documented in [`net.createServer`](https:/nodejs.org/api/net.html#netcreateserveroptions-connectionlistener) or [`tls.createServer`](https:/nodejs.org/api/tls.html#tlscreateserveroptions-secureconnectionlistener) + +#### `SocketOptions` + +- `port` - _optional_ - `number` - Specify the port for the [`net.Server`](https:/nodejs.org/api/net.html#class-netserver) instance. +- `securePort` - _optional_ - `number` - Specify the port for the [`tls.Server`](https:/nodejs.org/api/tls.html#class-tlsserver) instance. + +#### `SocketServer` + +Node.js [`net.Server`](https:/nodejs.org/api/net.html#class-netserver) or [`tls.Server`](https:/nodejs.org/api/tls.html#class-tlsserver) instance. + +### `server.ws(listener: WsListener, options: WsOptions): HttpServer[]` + +Add a listener to the WebSocket connection listener middleware chain. The WebSocket server is associated with the HTTP server specified by the `options.port` or `options.securePort`. Use the [`server.upgrade()`](./globals#serverupgradelistener-upgradelistener-options-upgradeoptions-void) method to add a listener to the upgrade middleware chain. + +Example: + +```js +server.ws((ws, request, chainCompletion) => { + chainCompletion.then(() => { + ws.on('error', console.error); + + ws.on('message', function message(data) { + console.log('received: %s', data); + }); + + ws.send('something'); + }); +}); +``` + +#### `WsListener` + +Type: `(ws: WebSocket, request: Request, chainCompletion: ChainCompletion, next: WsListener): Promise` + +The WebSocket connection listener. + +- The `ws` argument is the [WebSocket](https:/github.com/websockets/ws/blob/master/doc/ws.md#class-websocket) instance as defined by the `ws` module. +- The `request` argument is Harper's transformation of the `IncomingMessage` argument of the standard ['connection'](https:/github.com/websockets/ws/blob/master/doc/ws.md#event-connection) listener event for a WebSocket server. +- The `chainCompletion` argument is a `Promise` of the associated HTTP server's request chain. Awaiting this promise enables the user to ensure the HTTP request has finished being processed before operating on the WebSocket. +- The `next` argument is similar to that of other `next` arguments in Harper's server middlewares. To continue execution of the WebSocket connection listener middleware chain, pass all of the other arguments to this one such as: `next(ws, request, chainCompletion)` + +#### `WsOptions` + +Type: `Object` + +Properties: + +- `maxPayload` - _optional_ - `number` - Set the max payload size for the WebSocket server. Defaults to 100 MB. +- `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` +- `port` - _optional_ - `number` - Specify which WebSocket server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` +- `securePort` - _optional_ - `number` - Specify which WebSocket secure server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` + +### `server.upgrade(listener: UpgradeListener, options: UpgradeOptions): void` + +Add a listener to the HTTP Server [upgrade](https:/nodejs.org/api/http.html#event-upgrade_1) event. If a WebSocket connection listener is added using [`server.ws()`](./globals#serverwslistener-wslistener-options-wsoptions-httpserver), a default upgrade handler will be added as well. The default upgrade handler will add a `__harperdb_request_upgraded` boolean to the `request` argument to signal the connection has already been upgraded. It will also check for this boolean _before_ upgrading and if it is `true`, it will pass the arguments along to the `next` listener. + +This method should be used to delegate HTTP upgrade events to an external WebSocket server instance. + +Example: + +> This example is from the Harper Next.js component. See the complete source code [here](https:/github.com/HarperDB/nextjs/blob/main/extension.js) + +```js +server.upgrade( + (request, socket, head, next) => { + if (request.url === '/_next/webpack-hmr') { + return upgradeHandler(request, socket, head).then(() => { + request.__harperdb_request_upgraded = true; + + next(request, socket, head); + }); + } + + return next(request, socket, head); + }, + { runFirst: true } +); +``` + +#### `UpgradeListener` + +Type: `(request, socket, head, next) => void` + +The arguments are passed to the middleware chain from the HTTP server [`'upgrade'`](https:/nodejs.org/api/http.html#event-upgrade_1) event. + +#### `UpgradeOptions` + +Type: `Object` + +Properties: + +- `runFirst` - _optional_ - `boolean` - Add listener to the front of the middleware chain. Defaults to `false` +- `port` - _optional_ - `number` - Specify which HTTP server middleware chain to add the listener to. Defaults to the Harper system default HTTP port configured by `harperdb-config.yaml`, generally `9926` +- `securePort` - _optional_ - `number` - Specify which HTTP secure server middleware chain to add the listener to. Defaults to the Harper system default HTTP secure port configured by `harperdb-config.yaml`, generally `9927` + +### `server.config` + +This provides access to the Harper configuration object. This comes from the [harperdb-config.yaml](../../deployments/configuration) (parsed into object form). + +### `server.recordAnalytics(value, metric, path?, method?, type?)` + +This records the provided value as a metric into Harper's analytics. Harper efficiently records and tracks these metrics and makes them available through [analytics API](./analytics). The values are aggregated and statistical information is computed when many operations are performed. The optional parameters can be used to group statistics. For the parameters, make sure you are not grouping on too fine of a level for useful aggregation. The parameters are: + +- `value` - This is a numeric value for the metric that is being recorded. This can be a value measuring time or bytes, for example. +- `metric` - This is the name of the metric. +- `path` - This is an optional path (like a URL path). For a URL like /my-resource/, you would typically include a path of "my-resource", not including the id so you can group by all the requests to "my-resource" instead of individually aggregating by each individual id. +- `method` - Optional method to group by. +- `type` - Optional type to group by. + +### `server.getUser(username): Promise` + +This returns the user object with permissions/authorization information based on the provided username. This does not verify the password, so it is generally used for looking up users by username. If you want to verify a user by password, use [`server.authenticateUser`](./globals#serverauthenticateuserusername-password-user). + +### `server.authenticateUser(username, password): Promise` + +This returns the user object with permissions/authorization information based on the provided username. The password will be verified before returning the user object (if the password is incorrect, an error will be thrown). + +### `server.resources: Resources` + +This provides access to the map of all registered resources. This is the central registry in Harper for registering any resources to be exported for use by REST, MQTT, or other components. Components that want to register resources should use the `server.resources.set(name, resource)` method to add to this map. Exported resources can be found by passing in a path to `server.resources.getMatch(path)` which will find any resource that matches the path or beginning of the path. + +#### `server.resources.set(name, resource, exportTypes?)` + +Register a resource with the server. For example: + +``` +class NewResource extends Resource { +} +server.resources.set('NewResource', Resource); +/ or limit usage: +server.resources.set('NewResource', Resource, { rest: true, mqtt: false, 'my-protocol': true }); +``` + +#### `server.resources.getMatch(path, exportType?)` + +Find a resource that matches the path. For example: + +``` +server.resources.getMatch('/NewResource/some-id'); +/ or specify the export/protocol type, to allow it to be limited: +server.resources.getMatch('/NewResource/some-id', 'my-protocol'); +``` + +### `server.operation(operation: Object, context?: Object, authorize?: boolean)` + +Execute an operation from the [Operations API](https:/docs.harperdb.io/developers/operations-api) + +Parameters: + +- **operation** - `Object` - Object matching desired operation's request body +- **context** - `Object` - `{ username: string}` - _optional_ - The specified user +- **authorize** - `boolean` - _optional_ - Indicate the operation should authorize the user or not. Defaults to `false` + +Returns a `Promise` with the operation's response as per the [Operations API documentation](https:/docs.harperdb.io/developers/operations-api). + +### `server.nodes` + +Returns an array of node objects registered in the cluster + +### `server.shards` + +Returns map of shard number to an array of its associated nodes + +### `server.hostname` + +Returns the hostname of the current node diff --git a/site/versioned_docs/version-4.6/technical-details/reference/graphql.md b/site/versioned_docs/version-4.6/technical-details/reference/graphql.md new file mode 100644 index 00000000..edcc723b --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/graphql.md @@ -0,0 +1,254 @@ +--- +title: GraphQL Querying +--- + +# GraphQL Querying + +Harper supports GraphQL in a variety of ways. It can be used for [defining schemas](../../developers/applications/defining-schemas), and for querying [Resources](./resources/). + +Get started by setting `graphql: true` in `config.yaml`. + +This automatically enables a `/graphql` endpoint that can be used for GraphQL queries. + +> Harper's GraphQL component is inspired by the [GraphQL Over HTTP](https:/graphql.github.io/graphql-over-http/draft/#) specification; however, it does not fully implement neither that specification nor the [GraphQL](https:/spec.graphql.org/) specification. + +Queries can either be `GET` or `POST` requests, and both follow essentially the same request format. `GET` requests must use search parameters, and `POST` requests use the request body. + +For example, to request the GraphQL Query: + +```graphql +query GetDogs { + Dog { + id + name + } +} +``` + +The `GET` request would look like: + +```http +GET /graphql?query=query+GetDogs+%7B+Dog+%7B+id+name+%7D+%7D+%7D +Accept: application/graphql-response+json +``` + +And the `POST` request would look like: + +```http +POST /graphql/ +Content-Type: application/json +Accept: application/graphql-response+json + +{ + "query": "query GetDogs { Dog { id name } } }" +} +``` + +> Tip: For the best user experience, include the `Accept: application/graphql-response+json` header in your request. This provides better status codes for errors. + +The Harper GraphQL querying system is strictly limited to exported Harper Resources. For many users, this will typically be a table that uses the `@exported` directive in its schema. Queries can only specify Harper Resources and their attributes in the selection set. Queries can filter using [arguments](https:/graphql.org/learn/queries/#arguments) on the top-level Resource field. Harper provides a short form pattern for simple queries, and a long form pattern based off of the [Resource Query API](./resources/#query) for more complex queries. + +Unlike REST queries, GraphQL queries can specify multiple resources simultaneously: + +```graphql +query GetDogsAndOwners { + Dog { + id + name + breed + } + + Owner { + id + name + occupation + } +} +``` + +This will return all dogs and owners in the database. And is equivalent to executing two REST queries: + +```http +GET /Dog/?select(id,name,breed) +# and +GET /Owner/?select(id,name,occupation) +``` + +### Request Parameters + +There are three request parameters for GraphQL queries: `query`, `operationName`, and `variables` + +1. `query` - _Required_ - The string representation of the GraphQL document. + 1. Limited to [Executable Definitions](https:/spec.graphql.org/October2021/#executabledefinition) only. + 1. i.e. GraphQL [`query`](https:/graphql.org/learn/queries/#fields) or `mutation` (coming soon) operations, and [fragments](https:/graphql.org/learn/queries/#fragments). + 1. If an shorthand, unnamed, or singular named query is provided, they will be executed by default. Otherwise, if there are multiple queries, the `operationName` parameter must be used. +1. `operationName` - _Optional_ - The name of the query operation to execute if multiple queries are provided in the `query` parameter +1. `variables` - _Optional_ - A map of variable values to be used for the specified query + +### Type Checking + +The Harper GraphQL Querying system takes many liberties from the GraphQL specification. This extends to how it handle type checking. In general, the querying system does **not** type check. Harper uses the `graphql` parser directly, and then performs a transformation on the resulting AST. We do not control any type checking/casting behavior of the parser, and since the execution step diverges from the spec greatly, the type checking behavior is only loosely defined. + +In variable definitions, the querying system will ensure non-null values exist (and error appropriately), but it will not do any type checking of the value itself. + +For example, the variable `$name: String!` states that `name` should be a non-null, string value. + +- If the request does not contain the `name` variable, an error will be returned +- If the request provides `null` for the `name` variable, an error will be returned +- If the request provides any non-string value for the `name` variable, i.e. `1`, `true`, `{ foo: "bar" }`, the behavior is undefined and an error may or may not be returned. +- If the variable definition is changed to include a default value, `$name: String! = "John"`, then when omitted, `"John"` will be used. + - If `null` is provided as the variable value, an error will still be returned. + - If the default value does not match the type specified (i.e. `$name: String! = 0`), this is also considered undefined behavior. It may or may not fail in a variety of ways. +- Fragments will generally extend non-specified types, and the querying system will do no validity checking on them. For example, `fragment Fields on Any { ... }` is just as valid as `fragment Fields on MadeUpTypeName { ... }`. See the Fragments sections for more details. + +The only notable place the querying system will do some level of type analysis is the transformation of arguments into a query. + +- Objects will be transformed into properly nested attributes +- Strings and Boolean values are passed through as their AST values +- Float and Int values will be parsed using the JavaScript `parseFloat` and `parseInt` methods respectively. +- List and Enums are not supported. + +### Fragments + +The querying system loosely supports fragments. Both fragment definitions and inline fragments are supported, and are entirely a composition utility. Since this system does very little type checking, the `on Type` part of fragments is entirely pointless. Any value can be used for `Type` and it will have the same effect. + +For example, in the query + +```graphql +query Get { + Dog { + ...DogFields + } +} + +fragment DogFields on Dog { + name + breed +} +``` + +The `Dog` type in the fragment has no correlation to the `Dog` resource in the query (that correlates to the Harper `Dog` resource). + +You can literally specify anything in the fragment and it will behave the same way: + +```graphql +fragment DogFields on Any { ... } # this is recommended +fragment DogFields on Cat { ... } +fragment DogFields on Animal { ... } +fragment DogFields on LiterallyAnything { ... } +``` + +As an actual example, fragments should be used for composition: + +```graphql +query Get { + Dog { + ...sharedFields + breed + } + Owner { + ...sharedFields + occupation + } +} + +fragment sharedFields on Any { + id + name +} +``` + +### Short Form Querying + +Any attribute can be used as an argument for a query. In this short form, multiple arguments is treated as multiple equivalency conditions with the default `and` operation. + +For example, the following query requires an `id` variable to be provided, and the system will search for a `Dog` record matching that id. + +```graphql +query GetDog($id: ID!) { + Dog(id: $id) { + name + breed + owner { + name + } + } +} +``` + +And as a properly formed request: + +```http +POST /graphql/ +Content-Type: application/json +Accept: application/graphql-response+json + +{ + "query": "query GetDog($id: ID!) { Dog(id: $id) { name breed owner {name}}", + "variables": { + "id": "0" + } +} +``` + +The REST equivalent would be: + +```http +GET /Dog/?id==0&select(name,breed,owner{name}) +# or +GET /Dog/0?select(name,breed,owner{name}) +``` + +Short form queries can handle nested attributes as well. + +For example, return all dogs who have an owner with the name `"John"` + +```graphql +query GetDog { + Dog(owner: { name: "John" }) { + name + breed + owner { + name + } + } +} +``` + +Would be equivalent to + +```http +GET /Dog/?owner.name==John&select(name,breed,owner{name}) +``` + +And finally, we can put all of these together to create semi-complex, equality based queries! + +The following query has two variables and will return all dogs who have the specified name as well as the specified owner name. + +```graphql +query GetDog($dogName: String!, $ownerName: String!) { + Dog(name: $dogName, owner: { name: $ownerName }) { + name + breed + owner { + name + } + } +} +``` + +### Long Form Querying + +> Coming soon! + +### Mutations + +> Coming soon! + +### Subscriptions + +> Coming soon! + +### Directives + +> Coming soon! diff --git a/site/versioned_docs/version-4.6/technical-details/reference/headers.md b/site/versioned_docs/version-4.6/technical-details/reference/headers.md new file mode 100644 index 00000000..5c85fc88 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/headers.md @@ -0,0 +1,12 @@ +--- +title: Harper Headers +--- + +# Harper Headers + +All Harper API responses include headers that are important for interoperability and debugging purposes. The following headers are returned with all Harper API responses: + +| Key | Example Value | Description | +| ------------- | ---------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | +| server-timing | db;dur=7.165 | This reports the duration of the operation, in milliseconds. This follows the standard for Server-Timing and can be consumed by network monitoring tools. | +| content-type | application/json | This reports the MIME type of the returned content, which is negotiated based on the requested content type in the Accept header. | diff --git a/site/versioned_docs/version-4.6/technical-details/reference/index.md b/site/versioned_docs/version-4.6/technical-details/reference/index.md new file mode 100644 index 00000000..f00182d3 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/index.md @@ -0,0 +1,29 @@ +--- +title: Reference +--- + +# Reference + +This section contains technical details and reference materials for Harper. + +- [Analytics](./analytics) +- [Architecture](./architecture) +- [Blob](./blob) +- Content Types +- [Components](./components/) + - [Applications](./components/applications) + - [Built-In Extensions](./components/built-in-extensions) + - [Configuration](./components/configuration) + - [Extensions](./components/extensions) + - [(Experimental) Plugins](./components/plugins) +- [Data Types](./data-types) +- [Dynamic Schema](./dynamic-schema) +- [Globals](./globals) +- [GraphQL](./graphql) +- [Headers](./headers) +- [Limits](./limits) +- [Resources](./resources/) + - [Migration](./resources/migration) + - [Instance Binding](./resources/instance-binding) +- [Storage Algorithm](./storage-algorithm) +- [Transactions](./transactions) diff --git a/site/versioned_docs/version-4.6/technical-details/reference/limits.md b/site/versioned_docs/version-4.6/technical-details/reference/limits.md new file mode 100644 index 00000000..97214620 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/limits.md @@ -0,0 +1,37 @@ +--- +title: Harper Limits +--- + +# Harper Limits + +This document outlines limitations of Harper. + +## Database Naming Restrictions + +**Case Sensitivity** + +Harper database metadata (database names, table names, and attribute/column names) are case sensitive. Meaning databases, tables, and attributes can differ only by the case of their characters. + +**Restrictions on Database Metadata Names** + +Harper database metadata (database names, table names, and attribute names) cannot contain the following UTF-8 characters: + +``` +/`¡¢£¤¥¦§¨©ª«¬®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõö÷øùúûüýþÿ +``` + +Additionally, they cannot contain the first 31 non-printing characters. Spaces are allowed, but not recommended as best practice. The regular expression used to verify a name is valid is: + +``` +^[\x20-\x2E|\x30-\x5F|\x61-\x7E]*$ +``` + +## Table Limitations + +**Attribute Maximum** + +Harper limits the number of total indexed attributes across tables (including the primary key of each table) to 10,000 per database. + +## Primary Keys + +The maximum length of a primary key is 1978 bytes or 659 characters (whichever is shortest). diff --git a/site/versioned_docs/version-4.6/technical-details/reference/resources/index.md b/site/versioned_docs/version-4.6/technical-details/reference/resources/index.md new file mode 100644 index 00000000..8c42cd54 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/resources/index.md @@ -0,0 +1,744 @@ +--- +title: Resource Class +--- + +# Resource Class + +## Resource Class + +The Resource class is designed to provide a unified API for modeling different data resources within Harper. Database/table data can be accessed through the Resource API. The Resource class can be extended to create new data sources. Resources can be exported to define endpoints. Tables themselves extend the Resource class, and can be extended by users. + +Conceptually, a Resource class provides an interface for accessing, querying, modifying, and monitoring a set of entities or records. Instances of a Resource class can represent a single record or entity, or a collection of records, at a given point in time, that you can interact with through various methods or queries. Resource instances can represent an atomic transactional view of a resource and facilitate transactional interaction. A Resource instance holds the primary key/identifier, context information, and any pending updates to the record, so any instance methods can act on the record and have full access to this information during execution. Therefore, there are distinct resource instances created for every record or query that is accessed, and the instance methods are used for interaction with the data. + +Resource classes also have static methods, which are generally the preferred way to externally interact with tables and resources. The static methods handle parsing paths and query strings, starting a transaction as necessary, performing access authorization checks (if required), creating a resource instance, and calling the instance methods. This general rule for how to interact with resources: + +- If you want to _act upon_ a table or resource, querying or writing to it, then use the static methods to initially access or write data. For example, you could use `MyTable.get(34)` to access the record with a primary key of `34`. +- If you want to _define custom behavior_ for a table or resource (to control how a resource responds to queries/writes), then extend the class and override/define instance methods. + +The Resource API is heavily influenced by the REST/HTTP API, and the methods and properties of the Resource class are designed to map to and be used in a similar way to how you would interact with a RESTful API. + +The REST-based API is a little different from traditional Create-Read-Update-Delete (CRUD) APIs that were designed with single-server interactions in mind. Semantics that attempt to guarantee no existing record or overwrite-only behavior require locks that don't scale well in distributed database. Centralizing writes around `put` calls provides much more scalable, simple, and consistent behavior in a distributed eventually consistent database. You can generally think of CRUD operations mapping to REST operations like this: + +- Read - `get` +- Create with a known primary key - `put` +- Create with a generated primary key - `post`/`create` +- Update (Full) - `put` +- Update (Partial) - `patch` +- Delete - `delete` + +The RESTful HTTP server and other server interfaces will directly call resource methods of the same name to fulfill incoming requests so resources can be defined as endpoints for external interaction. When resources are used by the server interfaces, the static method will be executed (which starts a transaction and does access checks), which will then create the resource instance and call the corresponding instance method. Paths (URL, MQTT topics) are mapped to different resource instances. Using a path that specifies an ID like `/MyResource/3492` will be mapped an instance of MyResource, and will call the instance methods like `get(target)`, `put(target, data)`, and `post(target, data)`, where target is based on the `/3492` part of the path. + +It is recommended that you use the latest version (V2) of the Resource API with the legacy instance binding behavior disabled. This is done by setting the static `loadAsInstance` property to `false` on the Resource class. This will become the default behavior in Harper version 5.0. This page is written assuming `loadAsInstance` is set to `false`. If you want to use the legacy instance binding behavior, you can set `loadAsInstance` to `true` on the Resource class. If you have existing code that you want to migrate, please see the [migration guide](./migration) for more information. + +You can create classes that extend `Resource` to define your own data sources, typically to interface with external data sources (the `Resource` base class is available as a global variable in the Harper JS environment). In doing this, you will generally be extending and providing implementations for the instance methods below. For example: + +```javascript +export class MyExternalData extends Resource { + static loadAsInstance = false; / enable the updated API + async get(target) { + / fetch data from an external source, using our id + let response = await this.fetch(target.id); + / do something with the response + } + put(target, data) { + / send the data into the external source + } + delete(target) { + / delete an entity in the external data source + } + subscribe(subscription) { + / if the external data source is capable of real-time notification of changes, can subscribe + } +} +/ we can export this class from resources.json as our own endpoint, or use this as the source for +/ a Harper data to store and cache the data coming from this data source: +tables.MyCache.sourcedFrom(MyExternalData); +``` + +You can also extend table classes in the same way, overriding the instance methods for custom functionality. The `tables` object is a global variable in the Harper JavaScript environment, along with `Resource`: + +```javascript +export class MyTable extends tables.MyTable { + static loadAsInstance = false; / enable the updated API + get(target) { + / we can add properties or change properties before returning data: + return { ...super.get(target), newProperty: 'newValue', existingProperty: 42 }; / returns the record, with additional properties + } + put(target, data) { + / can change data any way we want + super.put(target, data); + } + delete(target) { + super.delete(target); + } + post(target, data) { + / providing a post handler (for HTTP POST requests) is a common way to create additional + / actions that aren't well described with just PUT or DELETE + } +} +``` + +Make sure that if are extending and `export`ing your table with this class, that you remove the `@export` directive in your schema, so that you aren't exporting the same table/class name twice. + +All Resource methods that are called from HTTP methods may directly return data or may return a [`Response`](https:/developer.mozilla.org/en-US/docs/Web/API/Response) object or an object with `headers` and a `status` (HTTP status code), to explicitly return specific headers and status code. + +## Global Variables + +### `tables` + +This is an object with all the tables in the default database (the default database is "data"). Each table that has been declared or created will be available as a (standard) property on this object, and the value will be the table class that can be used to interact with that table. The table classes implement the Resource API. + +### `databases` + +This is an object with all the databases that have been defined in Harper (in the running instance). Each database that has been declared or created will be available as a (standard) property on this object. The property values are an object with the tables in that database, where each property is a table, like the `tables` object. In fact, `databases.data === tables` should always be true. + +### `Resource` + +This is the Resource base class. This can be directly extended for custom resources, and is the base class for all tables. + +### `server` + +This object provides extension points for extension components that wish to implement new server functionality (new protocols, authentication, etc.). See the [extensions documentation for more information](../components/extensions). + +### `transaction` + +This provides a function for starting transactions. See the transactions section below for more information. + +### `contentTypes` + +This provides an interface for defining new content type handlers. See the content type extensions documentation for more information. + +### TypeScript Support + +While these objects/methods are all available as global variables, it is easier to get TypeScript support (code assistance, type checking) for these interfaces by explicitly `import`ing them. This can be done by setting up a package link to the main Harper package in your app: + +``` +# you may need to go to your harper directory and set it up as a link first +npm link harperdb +``` + +And then you can import any of the main Harper APIs you will use, and your IDE should understand the full typings associated with them: + +``` +import { databases, tables, Resource } from 'harperdb'; +``` + +## Resource Class (Instance) Methods + +### Properties/attributes declared in schema + +Properties that have been defined in your table's schema can be accessed and modified as direct properties on the Resource instances. + +### `get(target: RequestTarget | Id): Promise|AsyncIterable` + +This retrieves a record, or queries for records, and is called by HTTP GET requests. This can be called with a `RequestTarget` which can specify a path/id and query parameters as well as search parameters. For tables, this can also be called directly with an id (string or number) to retrieve a record by id. When defining Resource classes, you can define or override this method to define exactly what should be returned when retrieving a record. HTTP requests will always call `get` with a full `RequestTarget`. The default `get` method (`super.get(target)`) returns the current record as a plain object. + +The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. For example, with a request to `/my-resource/some-id?param1=value`, we can access URL/request information: + +```javascript +class extends Resource { + static loadAsInstance = false; + get(target) { + let param1 = target.get('param1'); / returns 'value' + let id = target.id; / returns 'some-id' + let path = target.pathname; / returns /some-id + let fullTarget = target.target; / returns /some-id?param1=value + ... + } +``` + +If `get` is called for a single record (for a request like `/Table/some-id`), the default action is to return the record identified by the path. If `get` is called on a collection (`/Table/?name=value`), the target will have the `isCollection` property set to `true` and default action is to `search` and return an AsyncIterable of results. + +### `search(query: RequestTarget)`: AsyncIterable + +This performs a query on this resource or table. By default, this is called by `get(query)` from a collection resource. When this is called for the root resource (like `/Table/`) it searches through all records in the table. You can define or override this method to define how records should be queried. The default `search` method on tables (`super.search(query)`) will perform a query and return an `AsyncIterable` of results. The `query` object can be used to specify the desired query. + +### `put(target: RequestTarget | Id, data: object): void|Response` + +This will assign the provided record or data to this resource, and is called for HTTP PUT requests. You can define or override this method to define how records should be updated. The default `put` method on tables (`super.put(target, data)`) writes the record to the table (updating or inserting depending on if the record previously existed) as part of the current transaction for the resource instance. + +The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. + +### `patch(target: RequestTarget | Id, data: object): void|Response` + +This will update the existing record with the provided data's properties, and is called for HTTP PATCH requests. You can define or override this method to define how records should be updated. The default `patch` method on tables (`super.patch(target, data)`) updates the record. The properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `data` object. This is performed as part of the current transaction for the resource instance. The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. + +### `update(target: RequestTarget, updates?: object): Updatable` + +This can be called to get an Updatable class for updating a record. An `Updatable` instance provides direct access to record properties as properties on `Updatable` instance. The properties can also be modified and any changes are tracked and written to the record when the transaction commits. For example, if we wanted to update the quantify of a product in the Product table, in response to a post, we could write: + +```javascript +class ... { + post(target, data) { + static loadAsInstance = false; + let updatable = this.update(target); + updatable.quantity = updatable.quantity - 1; + } +} +``` + +In addition, the `Updatable` class has the following methods. + +### `Updatable` class + +#### `addTo(property, value)` + +This adds the provided value to the specified property using conflict-free data type (CRDT) incrementation. This ensures that even if multiple calls are simultaneously made to increment a value, the resulting merge of data changes from different threads and nodes will properly sum all the added values. We could improve the example above to reliably ensure the quantity is decremented even when it occurs in multiple nodes simultaneously: + +```javascript +class ... { + static loadAsInstance = false; + post(target, data) { + let updatable = this.update(target); + updatable.addTo('quantity', -1); + } +} +``` + +#### `subtractFrom(property, value)` + +This functions exactly the same as `addTo`, except it subtracts the value. + +The `Updatable` also inherits the `getUpdatedTime` and `getExpiresAt` methods from the `RecordObject` class. + +### `delete(target: RequestTarget): void|Response` + +This will delete this record or resource identified by the target, and is called for HTTP DELETE requests. You can define or override this method to define how records should be deleted. The default `delete` method on tables (`super.delete(target)`) deletes the record identified by target from the table as part of the current transaction. The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. + +### `publish(target: RequestTarget, message): void|Response` + +This will publish a message to this resource, and is called for MQTT publish commands. You can define or override this method to define how messages should be published. The default `publish` method on tables (`super.publish(target, message)`) records the published message as part of the current transaction; this will not change the data in the record but will notify any subscribers to the record/topic. The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. + +### `post(target: RequestTarget, data: object): void|Response` + +This is called for HTTP POST requests. You can define this method to provide your own implementation of how POST requests should be handled. Generally `POST` provides a generic mechanism for various types of data updates, and is a good place to define custom functionality for updating records. The default behavior is to create a new record/resource. The `target` object represents the target of a request and can be used to access the path, coerced id, and any query parameters that were included in the URL. + +### `invalidate(target: RequestTarget)` + +This method is available on tables. This will invalidate the specified record in the table. This can be used with a caching table and is used to indicate that the source data has changed, and the record needs to be reloaded when next accessed. + +### `subscribe(subscriptionRequest: SubscriptionRequest): Promise` + +This will subscribe to the current resource, and is called for MQTT subscribe commands. You can define or override this method to define how subscriptions should be handled. The default `subscribe` method on tables (`super.publish(message)`) will set up a listener that will be called for any changes or published messages to this resource. + +The returned (promise resolves to) Subscription object is an `AsyncIterable` that you can use a `for await` to iterate through. It also has a `queue` property which holds (an array of) any messages that are ready to be delivered immediately (if you have specified a start time, previous count, or there is a message for the current or "retained" record, these may be immediately returned). + +The `SubscriptionRequest` object supports the following properties (all optional): + +- `includeDescendants` - If this is enabled, this will create a subscription to all the record updates/messages that are prefixed with the id. For example, a subscription request of `{id:'sub', includeDescendants: true}` would return events for any update with an id/topic of the form sub/\* (like `sub/1`). +- `startTime` - This will begin the subscription at a past point in time, returning all updates/messages since the start time (a catch-up of historical messages). This can be used to resume a subscription, getting all messages since the last subscription. +- `previousCount` - This specifies the number of previous updates/messages to deliver. For example, `previousCount: 10` would return the last ten messages. Note that `previousCount` can not be used in conjunction with `startTime`. +- `omitCurrent` - Indicates that the current (or retained) record should _not_ be immediately sent as the first update in the subscription (if no `startTime` or `previousCount` was used). By default, the current record is sent as the first update. + +### `connect(target: RequestTarget, incomingMessages?: AsyncIterable): AsyncIterable` + +This is called when a connection is received through WebSockets or Server Sent Events (SSE) to this resource path. This is called with `incomingMessages` as an iterable stream of incoming messages when the connection is from WebSockets, and is called with no arguments when the connection is from a SSE connection. This can return an asynchronous iterable representing the stream of messages to be sent to the client. + +### `getUpdatedTime(): number` + +This returns the last updated time of the resource (timestamp of last commit). This is returned as milliseconds from epoch. + +### `wasLoadedFromSource(): boolean` + +Indicates if the record had been loaded from source. When using caching tables, this indicates that there was a cache miss and the data had to be loaded from the source (or waiting on an inflight request from the source to finish). + +### `getContext(): Context` + +Returns the context for this resource. The context contains information about the current transaction, the user that initiated this action, and other metadata that should be retained through the life of an action. + +#### `Context` + +The `Context` object has the following (potential) properties: + +- `user` - This is the user object, which includes information about the username, role, and authorizations. +- `transaction` - The current transaction If the current method was triggered by an HTTP request, the following properties are available: +- `lastModified` - This value is used to indicate the last modified or updated timestamp of any resource(s) that are accessed and will inform the response's `ETag` (or `Last-Modified`) header. This can be updated by application code if it knows that modification should cause this timestamp to be updated. + +When a resource gets a request through HTTP, the request object is the context, which has the following properties: + +- `url` - The local path/URL of the request (this will not include the protocol or host name, but will start at the path and includes the query string). +- `method` - The method of the HTTP request. +- `headers` - This is an object with the headers that were included in the HTTP request. You can access headers by calling `context.headers.get(headerName)`. +- `responseHeaders` - This is an object with the headers that will be included in the HTTP response. You can set headers by calling `context.responseHeaders.set(headerName, value)`. +- `pathname` - This provides the path part of the URL (no querystring). +- `host` - This provides the host name of the request (from the `Host` header). +- `ip` - This provides the ip address of the client that made the request. +- `body` - This is the request body as a raw NodeJS Readable stream, if there is a request body. +- `data` - If the HTTP request had a request body, this provides a promise to the deserialized data from the request body. (Note that for methods that normally have a request body like `POST` and `PUT`, the resolved deserialized data is passed in as the main argument, but accessing the data from the context provides access to this for requests that do not traditionally have a request body like `DELETE`). + +When a resource is accessed as a data source: + +- `requestContext` - For resources that are acting as a data source for another resource, this provides access to the context of the resource that is making a request for data from the data source resource. Note that it is generally not recommended to rely on this context. The resolved data may be used fulfilled many different requests, and relying on this first request context may not be representative of future requests. Also, source resolution may be triggered by various actions, not just specified endpoints (for example queries, operations, studio, etc.), so make sure you are not relying on specific request context information. + +### `operation(operationObject: Object, authorize?: boolean): Promise` + +This method is available on tables and will execute a Harper operation, using the current table as the target of the operation (the `table` and `database` do not need to be specified). See the [operations API](../../../developers/operations-api/) for available operations that can be performed. You can set the second argument to `true` if you want the current user to be checked for authorization for the operation (if `true`, will throw an error if they are not authorized). + +### `allowStaleWhileRevalidate(entry: { version: number, localTime: number, expiresAt: number, value: object }, id): boolean` + +For caching tables, this can be defined to allow stale entries to be returned while revalidation is taking place, rather than waiting for revalidation. The `version` is the timestamp/version from the source, the `localTime` is when the resource was last refreshed, the `expiresAt` is when the resource expired and became stale, and the `value` is the last value (the stale value) of the record/resource. All times are in milliseconds since epoch. Returning `true` will allow the current stale value to be returned while revalidation takes place concurrently. Returning `false` will cause the response to wait for the data source or origin to revalidate or provide the latest value first, and then return the latest value. + +## Resource Static Methods and Properties + +The Resource class also has static methods that mirror the instance methods with an initial argument that is the id of the record to act on. The static methods are generally the preferred and most convenient method for interacting with tables outside of methods that are directly extending a table. Whereas instances methods are bound to a specific record, the static methods allow you to specify any record in the table to act on. + +The `get`, `put`, `delete`, `publish`, `subscribe`, and `connect` methods all have static equivalents. There is also a `static search()` method for specifically handling searching a table with query parameters. By default, the Resource static methods default to creating an instance bound to the record specified by the arguments, and calling the instance methods. Again, generally static methods are the preferred way to interact with resources and call them from application code. These methods are available on all user Resource classes and tables. + +### `get(target: RequestTarget|Id, context?: Resource|Context)` + +This will retrieve a resource instance by id. For example, if you want to retrieve comments by id in the retrieval of a blog post you could do: + +```javascript +const { MyTable, Comment } = tables; +... +/ in class: + async get() { + for (let commentId of this.commentIds) { + let comment = await Comment.get(commentId, this); + / now you can do something with the comment record + } + } +``` + +Type definition for `Id`: + +```typescript +Id = string | number | array; +``` + +### `get(query: Query, context?: Resource|Context)` + +This can be used to retrieve a resource instance by a query. The query can be used to specify a single/unique record by an `id` property, and can be combined with a `select`: + +```javascript +MyTable.get({ id: 34, select: ['name', 'age'] }); +``` + +This method may also be used to retrieve a collection of records by a query. If the query is not for a specific record id, this will call the `search` method, described above. + +### `put(target: RequestTarget|Id, record: object, context?: Resource|Context): Promise` + +This will save the provided record or data to this resource. This will create a new record or fully replace an existing record if one exists with the same `id` (primary key). + +### `put(record: object, context?: Resource|Context): Promise` + +This will save the provided record or data to this resource. This will create a new record or fully replace an existing record if one exists with the same primary key provided in the record. If your table doesn't have a primary key attribute, you will need to use the method with the `id` argument. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `create(record: object, context?: Resource|Context): Promise` + +This will create a new record using the provided record for all fields (except primary key), generating a new primary key for the record. This does _not_ check for an existing record; the record argument should not have a primary key and should use the generated primary key. This will (asynchronously) return the new resource instance. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `post(target: RequestTarget|Id, data: object, context?: Resource|Context): Promise|any` + +This will save the provided data to this resource. By default, this will create a new record (by calling `create`). However, the `post` method is specifically intended to be available for custom behaviors, so extending a class to support custom `post` method behavior is encouraged. + +### `patch(target: RequestTarget|Id, recordUpdate: object, context?: Resource|Context): Promise|void` + +This will save the provided updates to the record. The `recordUpdate` object's properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `recordUpdate` object. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `delete(target: RequestTarget|Id, context?: Resource|Context): Promise|void` + +Deletes this resource's record or data. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `publish(target: RequestTarget|Id, message: object, context?: Resource|Context): Promise|void` + +Publishes the given message to the record entry specified by the id in the context. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `subscribe(subscriptionRequest?, context?: Resource|Context): Promise` + +Subscribes to a record/resource. See the description of the `subscriptionRequest` object above for more information on how to use this. + +### `search(query: RequestTarget, context?: Resource|Context): AsyncIterable` + +This will perform a query on this table or collection. The query parameter can be used to specify the desired query. + +### `setComputedAttribute(name: string, computeFunction: (record: object) => any)` + +This will define the function to use for a computed attribute. To use this, the attribute must be defined in the schema as a computed attribute. The `computeFunction` will be called with the record as an argument and should return the computed value for the attribute. For example: + +```javascript +MyTable.setComputedAttribute('computedAttribute', (record) => { + return record.attribute1 + record.attribute2; +}); +``` + +For a schema like: + +```graphql +type MyTable @table { + id: ID @primaryKey + attribute1: Int + attribute2: Int + computedAttribute: Int @computed +} +``` + +See the [schema documentation](../../../developers/applications/defining-schemas) for more information on computed attributes. + +### `primaryKey` + +This property indicates the name of the primary key attribute for a table. You can get the primary key for a record using this property name. For example: + +```javascript +let record34 = await Table.get(34); +record34[Table.primaryKey] -> 34 +``` + +There are additional methods that are only available on table classes (which are a type of resource). + +### `Table.sourcedFrom(Resource, options)` + +This defines the source for a table. This allows a table to function as a cache for an external resource. When a table is configured to have a source, any request for a record that is not found in the table will be delegated to the source resource to retrieve (via `get`) and the result will be cached/stored in the table. All writes to the table will also first be delegated to the source (if the source defines write functions like `put`, `delete`, etc.). The `options` parameter can include an `expiration` property that will configure the table with a time-to-live expiration window for automatic deletion or invalidation of older entries. The `options` parameter (also) supports: + +- `expiration` - Default expiration time for records in seconds. +- `eviction` - Eviction time for records in seconds. +- `scanInterval` - Time period for scanning the table for records to evict. + +If the source resource implements subscription support, real-time invalidation can be performed to ensure the cache is guaranteed to be fresh (and this can eliminate or reduce the need for time-based expiration of data). + +### `directURLMapping` + +This property can be set to force the direct URL request target to be mapped to the resource primary key. Normally, URL resource targets are parsed, where the path is mapped to the primary key of the resource (and decoded using standard URL decoding), and any query string parameters are used to query that resource. But if this is turned on, the full URL is used as the primary key. For example: + +```javascript +export class MyTable extends tables.MyTable { + static directURLMapping = true; +} +``` + +```http request +GET /MyTable/test?foo=bar +``` + +This will be mapped to the resource with a primary key of `test?foo=bar`, and no querying will be performed on that resource. + +### `getRecordCount({ exactCount: boolean })` + +This will return the number of records in the table. By default, this will return an approximate count of records, which is fast and efficient. If you want an exact count, you can pass `{ exactCount: true }` as the first argument, but this will be slower and more expensive. The return value will be a Promise that resolves to an object with a `recordCount` property, which is the number of records in the table. If this was not an exact count, it will also include `estimatedRange` array with estimate range of the count. + +````javascript + +### `parsePath(path, context, query) {` + +This is called by static methods when they are responding to a URL (from HTTP request, for example), and translates the path to an id. By default, this will parse `.property` suffixes for accessing properties and specifying preferred content type in the URL (and for older tables it will convert a multi-segment path to multipart an array id). However, in some situations you may wish to preserve the path directly as a string. You can override `parsePath` for simpler path to id preservation: + +````javascript + static parsePath(path) { + return path; / return the path as the id + } +```` + +### `getRecordCount: Promise<{}>` + +### `isCollection(resource: Resource): boolean` + +This returns a boolean indicating if the provide resource instance represents a collection (can return a query result) or a single record/entity. + +### Context and Transactions + +Whenever you implement an action that is calling other resources, it is recommended that you provide the "context" for the action. This allows a secondary resource to be accessed through the same transaction, preserving atomicity and isolation. + +This also allows timestamps that are accessed during resolution to be used to determine the overall last updated timestamp, which informs the header timestamps (which facilitates accurate client-side caching). The context also maintains user, session, and request metadata information that is communicated so that contextual request information (like headers) can be accessed and any writes are properly attributed to the correct user, or any additional security checks to be applied to the user. + +When using an export resource class, the REST interface will automatically create a context for you with a transaction and request metadata, and you can pass this to other actions by simply including `this` as the source argument (second argument) to the static methods. + +For example, if we had a method to post a comment on a blog, and when this happens we also want to update an array of comment IDs on the blog record, but then add the comment to a separate comment table. We might do this: + +````javascript +const { Comment } = tables; + +export class BlogPost extends tables.BlogPost { + post(comment) { + / add a comment record to the comment table, using this resource as the source for the context + Comment.put(comment, this); + this.comments.push(comment.id); / add the id for the record to our array of comment ids + / Both of these actions will be committed atomically as part of the same transaction + } +} +``` + +Please see the [transaction documentation](../transactions) for more information on how transactions work in Harper. + +### Query + +The `get`/`search` methods accept a Query object that can be used to specify a query for data. The query is an object that has the following properties, which are all optional: + +#### `conditions` + +This is an array of objects that specify the conditions to use the match records (if conditions are omitted or it is an empty array, this is a search for everything in the table). Each condition object can have the following properties: + +- `attribute`: Name of the property/attribute to match on. +- `value`: The value to match. +- `comparator`: This can specify how the value is compared. This defaults to "equals", but can also be "greater_than", "greater_than_equal", "less_than", "less_than_equal", "starts_with", "contains", "ends_with", "between", and "not_equal". +- `conditions`: An array of conditions, which follows the same structure as above. +- `operator`: Specifies the operator to apply to this set of conditions (`and` or `or`. This is optional and defaults to `and`). For example, a complex query might look like: + +For example, a more complex query might look like: + +```javascript +Table.search({ + conditions: [ + { attribute: 'price', comparator: 'less_than', value: 100 }, + { + operator: 'or', + conditions: [ + { attribute: 'rating', comparator: 'greater_than', value: 4 }, + { attribute: 'featured', value: true }, + ], + }, + ], +}); +``` + +**Chained Attributes/Properties** + +Chained attribute/property references can be used to search on properties within related records that are referenced by [relationship properties](../../../developers/applications/defining-schemas) (in addition to the [schema documentation](../../../developers/applications/defining-schemas), see the [REST documentation](../../../developers/rest) for more of overview of relationships and querying). Chained property references are specified with an array, with each entry in the array being a property name for successive property references. For example, if a relationship property called `brand` has been defined that references a `Brand` table, we could search products by brand name: + +```javascript +Product.search({ conditions: [{ attribute: ['brand', 'name'], value: 'Harper' }] }); +``` + +This effectively executes a join, searching on the `Brand` table and joining results with matching records in the `Product` table. Chained array properties can be used in any condition, as well nested/grouped conditions. The chain of properties may also be more than two entries, allowing for multiple relationships to be traversed, effectively joining across multiple tables. An array of chained properties can also be used as the `attribute` in the `sort` property, allowing for sorting by an attribute in a referenced joined tables. + +#### `operator` + +Specifies if the conditions should be applied as an `"and"` (records must match all conditions), or as an "or" (records must match at least one condition). This is optional and defaults to `"and"`. + +#### `limit` + +This specifies the limit of the number of records that should be returned from the query. + +#### `offset` + +This specifies the number of records that should be skipped prior to returning records in the query. This is often used with `limit` to implement "paging" of records. + +#### `select` + +This specifies the specific properties that should be included in each record that is returned. This can be an array, to specify a set of properties that should be included in the returned objects. The array can specify an `select.asArray = true` property and the query results will return a set of arrays of values of the specified properties instead of objects; this can be used to return more compact results. Each of the elements in the array can be a property name, or can be an object with a `name` and `select` array itself that specifies properties that should be returned by the referenced sub-object or related record. For example, a `select` can defined: + +```javascript +Table.search({ select: [ 'name', 'age' ], conditions: ...}) +``` + +Or nested/joined properties from referenced objects can be specified, here we are including the referenced `related` records, and returning the `description` and `id` from each of the related objects: + +```javascript +Table.search({ select: [ 'name', `{ name: 'related', select: ['description', 'id'] }` ], conditions: ...}) +``` + +The select properties can also include certain special properties: + +- `$id` - This will specifically return the primary key of the record (regardless of name, even if there is no defined primary key attribute for the table). +- `$updatedtime` - This will return the last updated timestamp/version of the record (regardless of whether there is an attribute for the updated time). + +Alternately, the select value can be a string value, to specify that the value of the specified property should be returned for each iteration/element in the results. For example to just return an iterator of the `id`s of object: + +```javascript +Table.search({ select: 'id', conditions: ...}) +``` + +#### `sort` + +This defines the sort order, and should be an object that can have the following properties: + +- `attributes`: The attribute to sort on. +- `descending`: If true, will sort in descending order (optional and defaults to `false`). +- `next`: Specifies the next sort order to resolve ties. This is an object that follows the same structure as `sort`. + +#### `explain` + +This will return the conditions re-ordered as Harper will execute them. Harper will estimate the number of the matching records for each condition and apply the narrowest condition applied first. + +#### `enforceExecutionOrder` + +This will force the conditions to be executed in the order they were supplied, rather than using query estimation to re-order them. + +The query results are returned as an `AsyncIterable`. In order to access the elements of the query results, you must use a `for await` loop (it does _not_ return an array, you can not access the results by index). + +For example, we could do a query like: + +```javascript +let { Product } = tables; +let results = Product.search({ + conditions: [ + { attribute: 'rating', value: 4.5, comparator: 'greater_than' }, + { attribute: 'price', value: 100, comparator: 'less_than' }, + ], + offset: 20, + limit: 10, + select: ['id', 'name', 'price', 'rating'], + sort: { attribute: 'price' }, +}); +for await (let record of results) { + / iterate through each record in the query results +} +``` + +`AsyncIterable`s can be returned from resource methods, and will be properly serialized in responses. When a query is performed, this will open/reserve a read transaction until the query results are iterated, either through your own `for await` loop or through serialization. Failing to iterate the results this will result in a long-lived read transaction which can degrade performance (including write performance), and may eventually be aborted. + +### `RequestTarget` + +The `RequestTarget` class is used to represent a URL path that can be mapped to a resource. This is used by the REST interface to map a URL path to a resource class. All REST methods are called with a `RequestTarget` as the first argument, which is used to determine which record or entry to access or modify. Methods on a `Resource` class can be called with a primary key as a string or number value as the first argument, to access or modify a record by primary key, which will work with all the default methods. The static methods will be transform the primary key to a `RequestTarget` instance to call the instance methods for argument normalization. +When RequestTarget is constructed with a URL path (from the REST methods). The static methods will also automatically parse the path to a `RequestTarget` instance, including parsing the search string into query parameters. +Below are the properties and methods of the `RequestTarget` class: + +- `pathname` - The path of the URL relative to the resource path that matched this request. This excluded the query/search string +- `toString()` - The full relative path and search string of the URL +- `search` - The search/query part the target path (the part after the first `?` character) +- `id` - The primary key of the resource, as determined by the path +- `checkPermission` - This property is set to an object indicating that a permission check should be performed on the + resource. This is used by the REST interface to determine if a user has permission to access the resource. The object + contains: + - `action` - The type of action being performed (read/write/delete) + - `resource` - The resource being accessed + - `user` - The user requesting access + +`RequestTarget` is subclass of `URLSearchParams`, and these methods are available for accessing and modifying the query parameters: + +- `get(name: string)` - Get the value of the query parameter with the specified name +- `getAll(name: string)` - Get all the values of the query parameter with the specified name +- `set(name: string, value: string)` - Set the value of the query parameter with the specified name +- `append(name: string, value: string)` - Append the value to the query parameter with the specified name +- `delete(name: string)` - Delete the query parameter with the specified name +- `has(name: string)` - Check if the query parameter with the specified name exists + +In addition, the `RequestTarget` class is an iterable, so you can iterate through the query parameters: + +- `for (let [name, value] of target)` - Iterate through the query parameters + +When a `RequestTarget` has query parameters using Harper's extended query syntax, the REST static methods will parse the `RequestTarget` and potentially add any of the following properties if they are present in the query: + +- `conditions` - An array of conditions that will be used to filter the query results +- `limit` - The limit of the number of records to return +- `offset` - The number of records to skip before returning the results +- `sort` - The sort order of the query results +- `select` - The properties to return in the query results + +### `RecordObject` + +The `get` method will return a `RecordObject` instance, which is an object containing all the properties of the record. Any property on the record can be directly accessed and the properties can be enumerated with standard JS capabilities like `for`-`in` and `Object.keys`. The `RecordObject` instance will also have the following methods: + +- `getUpdatedTime()` - Get the last updated time (the version number) of the record +- `getExpiresAt()` - Get the expiration time of the entry, if there is one. + +### Interacting with the Resource Data Model + +When extending or interacting with table resources, you can interact through standard CRUD/REST methods to create, read, update, and delete records. You can idiomatic property access and modification to interact with the records themselves. For example, let's say we defined a product schema: + +```graphql +type Product @table { + id: ID @primaryKey + name: String + rating: Int + price: Float +} +``` + +If we have extended this table class with our own `get()` we can interact with the record: + +```javascript +export class CustomProduct extends Product { + async get(target) { + let record = await super.get(target); + let name = record.name; / this is the name of the current product + let rating = record.rating; / this is the rating of the current product + / we can't directly modify the record (it is frozen), but we can copy if we want to return a modification + record = { ...record, rating: 3 }; + return record; + } +} +``` + +Likewise, we can interact with resource instances in the same way when retrieving them through the static methods: + +```javascript +let product1 = await Product.get(1); +let name = product1.name; / this is the name of the product with a primary key of 1 +let rating = product1.rating; / this is the rating of the product with a primary key of 1 +/ if we want to update a single property: +await Product.patch(1, { rating: 3 }); +``` + +When running inside a transaction, we can use the `update` method and updates are automatically saved when a request completes: + +```javascript +export class CustomProduct extends Product { + post(target, data) { + let record = this.update(target); + record.name = data.name; + record.description = data.description; + / both of these changes will be saved automatically as this transaction commits + } +} +``` + +We can also interact with properties in nested objects and arrays, following the same patterns. For example we could define more complex types on our product: + +```graphql +type Product @table { + id: ID @primaryKey + name: String + rating: Int + price: Float + brand: Brand; + variations: [Variation]; +} +type Brand { + name: String +} +type Variation { + name: String + price: Float +} +``` + +We can interact with these nested properties: + +```javascript +export class CustomProduct extends Product { + post(data) { + let record = this.update(target); + let brandName = record.brand.name; + let firstVariationPrice = record.variations[0].price; + let additionalInfoOnBrand = record.brand.additionalInfo; / not defined in schema, but can still try to access property + / make some changes + record.variations.splice(0, 1); / remove first variation + record.variations.push({ name: 'new variation', price: 9.99 }); / add a new variation + record.brand.name = 'new brand name'; + / all these change will be saved + } +} +``` + +If you need to delete a property, you can do with the `delete` method: + +```javascript +let product1 = await Product.update(1); +product1.delete('additionalInformation'); +``` + +## Response Object + +The resource methods can return an object that will be serialized and returned as the response to the client. However, these methods can also return a `Response` style object with `status`, `headers`, and optionally `body` or `data` properties. This allows you to have more control over the response, including setting custom headers and status codes. For example, you could return a redirect response like: + +```javascript +return `{ status: 302, headers: { Location: '/new-location' }` }; +``` + +If you include a `body` property, this must be a string or buffer that will be returned as the response body. If you include a `data` property, this must be an object that will be serialized as the response body (using the standard content negotiation). For example, we could return an object with a custom header: + +```javascript +return { status: 200, headers: { 'X-Custom-Header': 'custom value' }, data: `{ message: 'Hello, World!' }` }; +``` + +### Throwing Errors + +You may throw errors (and leave them uncaught) from the response methods and these should be caught and handled by protocol the handler. For REST requests/responses, this will result in an error response. By default the status code will be 500. You can assign a property of `statusCode` to errors to indicate the HTTP status code that should be returned. For example: + +```javascript +if (notAuthorized()) { + let error = new Error('You are not authorized to access this'); + error.statusCode = 403; + throw error; +} +``` diff --git a/site/versioned_docs/version-4.6/technical-details/reference/resources/instance-binding.md b/site/versioned_docs/version-4.6/technical-details/reference/resources/instance-binding.md new file mode 100644 index 00000000..8c2629d1 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/resources/instance-binding.md @@ -0,0 +1,723 @@ +--- +title: Resource Class with Resource Instance Binding behavior +--- + +# Resource Class with Resource Instance Binding behavior + +This document describes the legacy instance binding behavior of the Resource class. It is recommended that you use the [updated behavior of the Resource API](./) instead, but this legacy API is preserved for backwards compatibility. + +## Resource Class + +```javascript +export class MyExternalData extends Resource { + static loadAsInstance = true; + async get() { + / fetch data from an external source, using our id + let response = await this.fetch(this.id); + / do something with the response + } + put(data) { + / send the data into the external source + } + delete() { + / delete an entity in the external data source + } + subscribe(options) { + / if the external data source is capable of real-time notification of changes, can subscribe + } +} +/ we can export this class from resources.json as our own endpoint, or use this as the source for +/ a Harper data to store and cache the data coming from this data source: +tables.MyCache.sourcedFrom(MyExternalData); +``` + +You can also extend table classes in the same way, overriding the instance methods for custom functionality. The `tables` object is a global variable in the Harper JavaScript environment, along with `Resource`: + +```javascript +export class MyTable extends tables.MyTable { + get() { + / we can add properties or change properties before returning data: + this.newProperty = 'newValue'; + this.existingProperty = 44; + return super.get(); / returns the record, modified with the changes above + } + put(data) { + / can change data any way we want + super.put(data); + } + delete() { + super.delete(); + } + post(data) { + / providing a post handler (for HTTP POST requests) is a common way to create additional + / actions that aren't well described with just PUT or DELETE + } +} +``` + +Make sure that if are extending and `export`ing your table with this class, that you remove the `@export` directive in your schema, so that you aren't exporting the same table/class name twice. + +All Resource methods that are called from HTTP methods may directly return data or may return a [`Response`](https:/developer.mozilla.org/en-US/docs/Web/API/Response) object or an object with `headers` and a `status` (HTTP status code), to explicitly return specific headers and status code. + +## Global Variables + +### `tables` + +This is an object with all the tables in the default database (the default database is "data"). Each table that has been declared or created will be available as a (standard) property on this object, and the value will be the table class that can be used to interact with that table. The table classes implement the Resource API. + +### `databases` + +This is an object with all the databases that have been defined in Harper (in the running instance). Each database that has been declared or created will be available as a (standard) property on this object. The property values are an object with the tables in that database, where each property is a table, like the `tables` object. In fact, `databases.data === tables` should always be true. + +### `Resource` + +This is the Resource base class. This can be directly extended for custom resources, and is the base class for all tables. + +### `server` + +This object provides extension points for extension components that wish to implement new server functionality (new protocols, authentication, etc.). See the [extensions documentation for more information](../components/extensions). + +### `transaction` + +This provides a function for starting transactions. See the transactions section below for more information. + +### `contentTypes` + +This provides an interface for defining new content type handlers. See the content type extensions documentation for more information. + +### TypeScript Support + +While these objects/methods are all available as global variables, it is easier to get TypeScript support (code assistance, type checking) for these interfaces by explicitly `import`ing them. This can be done by setting up a package link to the main Harper package in your app: + +``` +# you may need to go to your harper directory and set it up as a link first +npm link harperdb +``` + +And then you can import any of the main Harper APIs you will use, and your IDE should understand the full typings associated with them: + +``` +import { databases, tables, Resource } from 'harperdb'; +``` + +## Resource Class (Instance) Methods + +### Properties/attributes declared in schema + +Properties that have been defined in your table's schema can be accessed and modified as direct properties on the Resource instances. + +### `get(queryOrProperty?)`: Resource|AsyncIterable + +This is called to return the record or data for this resource, and is called by HTTP GET requests. This may be optionally called with a `query` object to specify a query should be performed, or a string to indicate that the specified property value should be returned. When defining Resource classes, you can define or override this method to define exactly what should be returned when retrieving a record. The default `get` method (`super.get()`) returns the current record as a plain object. + +The query object can be used to access any query parameters that were included in the URL. For example, with a request to `/my-resource/some-id?param1=value`, we can access URL/request information: + +```javascript +get(query) { + / note that query will only exist (as an object) if there is a query string + let param1 = query?.get?.('param1'); / returns 'value' + let id = this.getId(); / returns 'some-id' + ... +} +``` + +If `get` is called for a single record (for a request like `/Table/some-id`), the default action is to return `this` instance of the resource. If `get` is called on a collection (`/Table/?name=value`), the default action is to `search` and return an AsyncIterable of results. + +It is important to note that `this` is the resource instance for a specific record, specified by the primary key. Therefore, calling `super.get(query)` performs a `get` on this specific record/resource, not on the whole table. If you wish to access a _different_ record, you should use the static `get` method on the table class, like `Table.get(otherId, context)`. + +### `search(query: Query)`: AsyncIterable + +This performs a query on this resource, searching for records that are descendants. By default, this is called by `get(query)` from a collection resource. When this is called for the root resource (like `/Table/`) it searches through all records in the table. However, if you call search from an instance with a specific ID like `1` from a path like `Table/1`, it will only return records that are descendants of that record, like `[1, 1]` (path of Table/1/1) and `[1, 2]` (path of Table/1/2). If you want to do a standard search of the table, make you call the static method like `Table.search(...)`. You can define or override this method to define how records should be queried. The default `search` method on tables (`super.search(query)`) will perform a query and return an AsyncIterable of results. The query object can be used to specify the desired query. + +### `getId(): string|number|Array` + +Returns the primary key value for this resource. + +### `put(data: object, query?: Query): Resource|void|Response` + +This will assign the provided record or data to this resource, and is called for HTTP PUT requests. You can define or override this method to define how records should be updated. The default `put` method on tables (`super.put(data)`) writes the record to the table (updating or inserting depending on if the record previously existed) as part of the current transaction for the resource instance. + +It is important to note that `this` is the resource instance for a specific record, specified by the primary key. Therefore, calling `super.put(data)` updates this specific record/resource, not another records in the table. If you wish to update a _different_ record, you should use the static `put` method on the table class, like `Table.put(data, context)`. + +The `query` argument is used to represent any additional query parameters that were included in the URL. For example, with a request to `/my-resource/some-id?param1=value`, we can access URL/request information: + +```javascript +put(data, query) { + let param1 = query?.get?.('param1'); / returns 'value' + ... +} +``` + +### `patch(data: object): Resource|void|Response` + +### `patch(data: object, query?: Query)` + +This will update the existing record with the provided data's properties, and is called for HTTP PATCH requests. You can define or override this method to define how records should be updated. The default `patch` method on tables (`super.patch(data)`) updates the record. The properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `data` object. This is performed as part of the current transaction for the resource instance. The `query` argument is used to represent any additional query parameters that were included. + +### `update(data: object, fullUpdate: boolean?)` + +This is called by the default `put` and `patch` handlers to update a record. `put` calls with `fullUpdate` as `true` to indicate a full record replacement (`patch` calls it with the second argument as `false`). Any additional property changes that are made before the transaction commits will also be persisted. + +### `delete(queryOrProperty?): Resource|void|Response` + +This will delete this record or resource, and is called for HTTP DELETE requests. You can define or override this method to define how records should be deleted. The default `delete` method on tables (`super.delete(record)`) deletes the record from the table as part of the current transaction. + +### `publish(message): Resource|void|Response` + +This will publish a message to this resource, and is called for MQTT publish commands. You can define or override this method to define how messages should be published. The default `publish` method on tables (`super.publish(message)`) records the published message as part of the current transaction; this will not change the data in the record but will notify any subscribers to the record/topic. + +### `post(data: object, query?: Query): Resource|void|Response` + +This is called for HTTP POST requests. You can define this method to provide your own implementation of how POST requests should be handled. Generally `POST` provides a generic mechanism for various types of data updates, and is a good place to define custom functionality for updating records. The default behavior is to create a new record/resource. The `query` argument is used to represent any additional query parameters that were included. + +### `invalidate()` + +This method is available on tables. This will invalidate the current record in the table. This can be used with a caching table and is used to indicate that the source data has changed, and the record needs to be reloaded when next accessed. + +### `subscribe(subscriptionRequest: SubscriptionRequest): Promise` + +This will subscribe to the current resource, and is called for MQTT subscribe commands. You can define or override this method to define how subscriptions should be handled. The default `subscribe` method on tables (`super.publish(message)`) will set up a listener that will be called for any changes or published messages to this resource. + +The returned (promise resolves to) Subscription object is an `AsyncIterable` that you can use a `for await` to iterate through. It also has a `queue` property which holds (an array of) any messages that are ready to be delivered immediately (if you have specified a start time, previous count, or there is a message for the current or "retained" record, these may be immediately returned). + +The `SubscriptionRequest` object supports the following properties (all optional): + +- `includeDescendants` - If this is enabled, this will create a subscription to all the record updates/messages that are prefixed with the id. For example, a subscription request of `{id:'sub', includeDescendants: true}` would return events for any update with an id/topic of the form sub/\* (like `sub/1`). +- `startTime` - This will begin the subscription at a past point in time, returning all updates/messages since the start time (a catch-up of historical messages). This can be used to resume a subscription, getting all messages since the last subscription. +- `previousCount` - This specifies the number of previous updates/messages to deliver. For example, `previousCount: 10` would return the last ten messages. Note that `previousCount` can not be used in conjunction with `startTime`. +- `omitCurrent` - Indicates that the current (or retained) record should _not_ be immediately sent as the first update in the subscription (if no `startTime` or `previousCount` was used). By default, the current record is sent as the first update. + +### `connect(incomingMessages?: AsyncIterable, query?: Query): AsyncIterable` + +This is called when a connection is received through WebSockets or Server Sent Events (SSE) to this resource path. This is called with `incomingMessages` as an iterable stream of incoming messages when the connection is from WebSockets, and is called with no arguments when the connection is from a SSE connection. This can return an asynchronous iterable representing the stream of messages to be sent to the client. + +### `set(property, value)` + +This will assign the provided value to the designated property in the resource's record. During a write operation, this will indicate that the record has changed and the changes will be saved during commit. During a read operation, this will modify the copy of the record that will be serialized during serialization (converted to the output format of JSON, MessagePack, etc.). + +### `allowCreate(user: any, data: Promise, context: Context): boolean | Promise` + +This is called to determine if the user has permission to create the current resource. This is called as part of external incoming requests (HTTP). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's insert permission to the table. The allow method may be asynchronous and return a promise that resolves to a boolean, and may await the `data` promise to determine if the data is valid for creation. + +### `allowRead(user: any, query: Map | void, context: Context): boolean | Promise` + +This is called to determine if the user has permission to read from the current resource. This is called as part of external incoming requests (HTTP GET). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's read permission to the table. The allow method may be asynchronous and return a promise that resolves to a boolean. + +### `allowUpdate(user: any, data: Promise, context: Context): boolean | Promise` + +This is called to determine if the user has permission to update the current resource. This is called as part of external incoming requests (HTTP PUT). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's update permission to the table. The allow method may be asynchronous and return a promise that resolves to a boolean, and may await the `data` promise to determine if the data is valid for creation. + +### `allowDelete(user: any, query: Map | void, context: Context): boolean | Promise` + +This is called to determine if the user has permission to delete the current resource. This is called as part of external incoming requests (HTTP DELETE). The default behavior for a generic resource is that this requires super-user permission and the default behavior for a table is to check the user's role's delete permission to the table. The allow method may be asynchronous and return a promise that resolves to a boolean. + +### `addTo(property, value)` + +This adds to provided value to the specified property using conflict-free data type (CRDT) incrementation. This ensures that even if multiple calls are simultaneously made to increment a value, the resulting merge of data changes from different threads and nodes will properly sum all the added values. + +### `getUpdatedTime(): number` + +This returns the last updated time of the resource (timestamp of last commit). This is returned as milliseconds from epoch. + +### `wasLoadedFromSource(): boolean` + +Indicates if the record had been loaded from source. When using caching tables, this indicates that there was a cache miss and the data had to be loaded from the source (or waiting on an inflight request from the source to finish). + +### `getContext(): Context` + +Returns the context for this resource. The context contains information about the current transaction, the user that initiated this action, and other metadata that should be retained through the life of an action. + +#### `Context` + +The `Context` object has the following (potential) properties: + +- `user` - This is the user object, which includes information about the username, role, and authorizations. +- `transaction` - The current transaction If the current method was triggered by an HTTP request, the following properties are available: +- `lastModified` - This value is used to indicate the last modified or updated timestamp of any resource(s) that are accessed and will inform the response's `ETag` (or `Last-Modified`) header. This can be updated by application code if it knows that modification should cause this timestamp to be updated. + +When a resource gets a request through HTTP, the request object is the context, which has the following properties: + +- `url` - The local path/URL of the request (this will not include the protocol or host name, but will start at the path and includes the query string). +- `method` - The method of the HTTP request. +- `headers` - This is an object with the headers that were included in the HTTP request. You can access headers by calling `context.headers.get(headerName)`. +- `responseHeaders` - This is an object with the headers that will be included in the HTTP response. You can set headers by calling `context.responseHeaders.set(headerName, value)`. +- `pathname` - This provides the path part of the URL (no querystring). +- `host` - This provides the host name of the request (from the `Host` header). +- `ip` - This provides the ip address of the client that made the request. +- `body` - This is the request body as a raw NodeJS Readable stream, if there is a request body. +- `data` - If the HTTP request had a request body, this provides a promise to the deserialized data from the request body. (Note that for methods that normally have a request body like `POST` and `PUT`, the resolved deserialized data is passed in as the main argument, but accessing the data from the context provides access to this for requests that do not traditionally have a request body like `DELETE`). + +When a resource is accessed as a data source: + +- `requestContext` - For resources that are acting as a data source for another resource, this provides access to the context of the resource that is making a request for data from the data source resource. Note that it is generally not recommended to rely on this context. The resolved data may be used fulfilled many different requests, and relying on this first request context may not be representative of future requests. Also, source resolution may be triggered by various actions, not just specified endpoints (for example queries, operations, studio, etc.), so make sure you are not relying on specific request context information. + +### `operation(operationObject: Object, authorize?: boolean): Promise` + +This method is available on tables and will execute a Harper operation, using the current table as the target of the operation (the `table` and `database` do not need to be specified). See the [operations API](../../../developers/operations-api/) for available operations that can be performed. You can set the second argument to `true` if you want the current user to be checked for authorization for the operation (if `true`, will throw an error if they are not authorized). + +### `allowStaleWhileRevalidate(entry: { version: number, localTime: number, expiresAt: number, value: object }, id): boolean` + +For caching tables, this can be defined to allow stale entries to be returned while revalidation is taking place, rather than waiting for revalidation. The `version` is the timestamp/version from the source, the `localTime` is when the resource was last refreshed, the `expiresAt` is when the resource expired and became stale, and the `value` is the last value (the stale value) of the record/resource. All times are in milliseconds since epoch. Returning `true` will allow the current stale value to be returned while revalidation takes place concurrently. Returning `false` will cause the response to wait for the data source or origin to revalidate or provide the latest value first, and then return the latest value. + +## Resource Static Methods and Properties + +The Resource class also has static methods that mirror the instance methods with an initial argument that is the id of the record to act on. The static methods are generally the preferred and most convenient method for interacting with tables outside of methods that are directly extending a table. Whereas instances methods are bound to a specific record, the static methods allow you to specify any record in the table to act on. + +The `get`, `put`, `delete`, `publish`, `subscribe`, and `connect` methods all have static equivalents. There is also a `static search()` method for specifically handling searching a table with query parameters. By default, the Resource static methods default to creating an instance bound to the record specified by the arguments, and calling the instance methods. Again, generally static methods are the preferred way to interact with resources and call them from application code. These methods are available on all user Resource classes and tables. + +### `get(id: Id, context?: Resource|Context)` + +This will retrieve a resource instance by id. For example, if you want to retrieve comments by id in the retrieval of a blog post you could do: + +```javascript +const { MyTable, Comment } = tables; +... +/ in class: + async get() { + for (let commentId of this.commentIds) { + let comment = await Comment.get(commentId, this); + / now you can do something with the comment record + } + } +``` + +Type definition for `Id`: + +```typescript +Id = string | number | array; +``` + +### `get(query: Query, context?: Resource|Context)` + +This can be used to retrieve a resource instance by a query. The query can be used to specify a single/unique record by an `id` property, and can be combined with a `select`: + +```javascript +MyTable.get({ id: 34, select: ['name', 'age'] }); +``` + +This method may also be used to retrieve a collection of records by a query. If the query is not for a specific record id, this will call the `search` method, described above. + +### `put(id: Id, record: object, context?: Resource|Context): Promise` + +This will save the provided record or data to this resource. This will create a new record or fully replace an existing record if one exists with the same `id` (primary key). + +### `put(record: object, context?: Resource|Context): Promise` + +This will save the provided record or data to this resource. This will create a new record or fully replace an existing record if one exists with the same primary key provided in the record. If your table doesn't have a primary key attribute, you will need to use the method with the `id` argument. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `create(record: object, context?: Resource|Context): Promise` + +This will create a new record using the provided record for all fields (except primary key), generating a new primary key for the record. This does _not_ check for an existing record; the record argument should not have a primary key and should use the generated primary key. This will (asynchronously) return the new resource instance. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `post(id: Id, data: object, context?: Resource|Context): Promise` + +### `post(data: object, context?: Resource|Context): Promise` + +This will save the provided data to this resource. By default, this will create a new record (by calling `create`). However, the `post` method is specifically intended to be available for custom behaviors, so extending a class to support custom `post` method behavior is encouraged. + +### `patch(recordUpdate: object, context?: Resource|Context): Promise` + +### `patch(id: Id, recordUpdate: object, context?: Resource|Context): Promise` + +This will save the provided updates to the record. The `recordUpdate` object's properties will be applied to the existing record, overwriting the existing records properties, and preserving any properties in the record that are not specified in the `recordUpdate` object. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `delete(id: Id, context?: Resource|Context): Promise` + +Deletes this resource's record or data. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `publish(message: object, context?: Resource|Context): Promise` + +### `publish(topic: Id, message: object, context?: Resource|Context): Promise` + +Publishes the given message to the record entry specified by the id in the context. Make sure to `await` this function to ensure it finishes execution within the surrounding transaction. + +### `subscribe(subscriptionRequest?, context?: Resource|Context): Promise` + +Subscribes to a record/resource. See the description of the `subscriptionRequest` object above for more information on how to use this. + +### `search(query: Query, context?: Resource|Context): AsyncIterable` + +This will perform a query on this table or collection. The query parameter can be used to specify the desired query. + +### `setComputedAttribute(name: string, computeFunction: (record: object) => any)` + +This will define the function to use for a computed attribute. To use this, the attribute must be defined in the schema as a computed attribute. The `computeFunction` will be called with the record as an argument and should return the computed value for the attribute. For example: + +```javascript +MyTable.setComputedAttribute('computedAttribute', (record) => { + return record.attribute1 + record.attribute2; +}); +``` + +For a schema like: + +```graphql +type MyTable @table { + id: ID @primaryKey + attribute1: Int + attribute2: Int + computedAttribute: Int @computed +} +``` + +See the [schema documentation](../../../developers/applications/defining-schemas) for more information on computed attributes. + +### `primaryKey` + +This property indicates the name of the primary key attribute for a table. You can get the primary key for a record using this property name. For example: + +```javascript +let record34 = await Table.get(34); +record34[Table.primaryKey] -> 34 +``` + +There are additional methods that are only available on table classes (which are a type of resource). + +### `Table.sourcedFrom(Resource, options)` + +This defines the source for a table. This allows a table to function as a cache for an external resource. When a table is configured to have a source, any request for a record that is not found in the table will be delegated to the source resource to retrieve (via `get`) and the result will be cached/stored in the table. All writes to the table will also first be delegated to the source (if the source defines write functions like `put`, `delete`, etc.). The `options` parameter can include an `expiration` property that will configure the table with a time-to-live expiration window for automatic deletion or invalidation of older entries. The `options` parameter (also) supports: + +- `expiration` - Default expiration time for records in seconds. +- `eviction` - Eviction time for records in seconds. +- `scanInterval` - Time period for scanning the table for records to evict. + +If the source resource implements subscription support, real-time invalidation can be performed to ensure the cache is guaranteed to be fresh (and this can eliminate or reduce the need for time-based expiration of data). + +### `directURLMapping` + +This property can be set to force the direct URL request target to be mapped to the resource primary key. Normally, URL resource targets are parsed, where the path is mapped to the primary key of the resource (and decoded using standard URL decoding), and any query string parameters are used to query that resource. But if this is turned on, the full URL is used as the primary key. For example: + +```javascript +export class MyTable extends tables.MyTable { + static directURLMapping = true; +} +``` + +```http request +GET /MyTable/test?foo=bar +``` + +This will be mapped to the resource with a primary key of `test?foo=bar`, and no querying will be performed on that resource. + +### `getRecordCount({ exactCount: boolean })` + +This will return the number of records in the table. By default, this will return an approximate count of records, which is fast and efficient. If you want an exact count, you can pass `{ exactCount: true }` as the first argument, but this will be slower and more expensive. The return value will be a Promise that resolves to an object with a `recordCount` property, which is the number of records in the table. If this was not an exact count, it will also include `estimatedRange` array with estimate range of the count. + +````javascript + +### `parsePath(path, context, query) {` + +This is called by static methods when they are responding to a URL (from HTTP request, for example), and translates the path to an id. By default, this will parse `.property` suffixes for accessing properties and specifying preferred content type in the URL (and for older tables it will convert a multi-segment path to multipart an array id). However, in some situations you may wish to preserve the path directly as a string. You can override `parsePath` for simpler path to id preservation: + +````javascript + static parsePath(path) { + return path; / return the path as the id + } +```` + +### `getRecordCount: Promise<{}>` + +### `isCollection(resource: Resource): boolean` + +This returns a boolean indicating if the provide resource instance represents a collection (can return a query result) or a single record/entity. + +### Context and Transactions + +Whenever you implement an action that is calling other resources, it is recommended that you provide the "context" for the action. This allows a secondary resource to be accessed through the same transaction, preserving atomicity and isolation. + +This also allows timestamps that are accessed during resolution to be used to determine the overall last updated timestamp, which informs the header timestamps (which facilitates accurate client-side caching). The context also maintains user, session, and request metadata information that is communicated so that contextual request information (like headers) can be accessed and any writes are properly attributed to the correct user, or any additional security checks to be applied to the user. + +When using an export resource class, the REST interface will automatically create a context for you with a transaction and request metadata, and you can pass this to other actions by simply including `this` as the source argument (second argument) to the static methods. + +For example, if we had a method to post a comment on a blog, and when this happens we also want to update an array of comment IDs on the blog record, but then add the comment to a separate comment table. We might do this: + +````javascript +const { Comment } = tables; + +export class BlogPost extends tables.BlogPost { + post(comment) { + / add a comment record to the comment table, using this resource as the source for the context + Comment.put(comment, this); + this.comments.push(comment.id); / add the id for the record to our array of comment ids + / Both of these actions will be committed atomically as part of the same transaction + } +} +``` + +Please see the [transaction documentation](./transactions) for more information on how transactions work in Harper. + +### Query + +The `get`/`search` methods accept a Query object that can be used to specify a query for data. The query is an object that has the following properties, which are all optional: + +#### `conditions` + +This is an array of objects that specify the conditions to use the match records (if conditions are omitted or it is an empty array, this is a search for everything in the table). Each condition object can have the following properties: + +- `attribute`: Name of the property/attribute to match on. +- `value`: The value to match. +- `comparator`: This can specify how the value is compared. This defaults to "equals", but can also be "greater_than", "greater_than_equal", "less_than", "less_than_equal", "starts_with", "contains", "ends_with", "between", and "not_equal". +- `conditions`: An array of conditions, which follows the same structure as above. +- `operator`: Specifies the operator to apply to this set of conditions (`and` or `or`. This is optional and defaults to `and`). For example, a complex query might look like: + +For example, a more complex query might look like: + +```javascript +Table.search({ + conditions: [ + { attribute: 'price', comparator: 'less_than', value: 100 }, + { + operator: 'or', + conditions: [ + { attribute: 'rating', comparator: 'greater_than', value: 4 }, + { attribute: 'featured', value: true }, + ], + }, + ], +}); +``` + +**Chained Attributes/Properties** + +Chained attribute/property references can be used to search on properties within related records that are referenced by [relationship properties](../../../developers/applications/defining-schemas) (in addition to the [schema documentation](../../../developers/applications/defining-schemas), see the [REST documentation](../../../developers/rest) for more of overview of relationships and querying). Chained property references are specified with an array, with each entry in the array being a property name for successive property references. For example, if a relationship property called `brand` has been defined that references a `Brand` table, we could search products by brand name: + +```javascript +Product.search({ conditions: [{ attribute: ['brand', 'name'], value: 'Harper' }] }); +``` + +This effectively executes a join, searching on the `Brand` table and joining results with matching records in the `Product` table. Chained array properties can be used in any condition, as well nested/grouped conditions. The chain of properties may also be more than two entries, allowing for multiple relationships to be traversed, effectively joining across multiple tables. An array of chained properties can also be used as the `attribute` in the `sort` property, allowing for sorting by an attribute in a referenced joined tables. + +#### `operator` + +Specifies if the conditions should be applied as an `"and"` (records must match all conditions), or as an "or" (records must match at least one condition). This is optional and defaults to `"and"`. + +#### `limit` + +This specifies the limit of the number of records that should be returned from the query. + +#### `offset` + +This specifies the number of records that should be skipped prior to returning records in the query. This is often used with `limit` to implement "paging" of records. + +#### `select` + +This specifies the specific properties that should be included in each record that is returned. This can be an array, to specify a set of properties that should be included in the returned objects. The array can specify an `select.asArray = true` property and the query results will return a set of arrays of values of the specified properties instead of objects; this can be used to return more compact results. Each of the elements in the array can be a property name, or can be an object with a `name` and `select` array itself that specifies properties that should be returned by the referenced sub-object or related record. For example, a `select` can defined: + +```javascript +Table.search({ select: [ 'name', 'age' ], conditions: ...}) +``` + +Or nested/joined properties from referenced objects can be specified, here we are including the referenced `related` records, and returning the `description` and `id` from each of the related objects: + +```javascript +Table.search({ select: [ 'name', `{ name: 'related', select: ['description', 'id'] }` ], conditions: ...}) +``` + +The select properties can also include certain special properties: + +- `$id` - This will specifically return the primary key of the record (regardless of name, even if there is no defined primary key attribute for the table). +- `$updatedtime` - This will return the last updated timestamp/version of the record (regardless of whether there is an attribute for the updated time). + +Alternately, the select value can be a string value, to specify that the value of the specified property should be returned for each iteration/element in the results. For example to just return an iterator of the `id`s of object: + +```javascript +Table.search({ select: 'id', conditions: ...}) +``` + +#### `sort` + +This defines the sort order, and should be an object that can have the following properties: + +- `attributes`: The attribute to sort on. +- `descending`: If true, will sort in descending order (optional and defaults to `false`). +- `next`: Specifies the next sort order to resolve ties. This is an object that follows the same structure as `sort`. + +#### `explain` + +This will return the conditions re-ordered as Harper will execute them. Harper will estimate the number of the matching records for each condition and apply the narrowest condition applied first. + +#### `enforceExecutionOrder` + +This will force the conditions to be executed in the order they were supplied, rather than using query estimation to re-order them. + +The query results are returned as an `AsyncIterable`. In order to access the elements of the query results, you must use a `for await` loop (it does _not_ return an array, you can not access the results by index). + +For example, we could do a query like: + +```javascript +let { Product } = tables; +let results = Product.search({ + conditions: [ + { attribute: 'rating', value: 4.5, comparator: 'greater_than' }, + { attribute: 'price', value: 100, comparator: 'less_than' }, + ], + offset: 20, + limit: 10, + select: ['id', 'name', 'price', 'rating'], + sort: { attribute: 'price' }, +}); +for await (let record of results) { + / iterate through each record in the query results +} +``` + +`AsyncIterable`s can be returned from resource methods, and will be properly serialized in responses. When a query is performed, this will open/reserve a read transaction until the query results are iterated, either through your own `for await` loop or through serialization. Failing to iterate the results this will result in a long-lived read transaction which can degrade performance (including write performance), and may eventually be aborted. + +### Interacting with the Resource Data Model + +When extending or interacting with table resources, when a resource instance is retrieved and instantiated, it will be loaded with the record data from its table. You can interact with this record through the resource instance. For any properties that have been defined in the table's schema, you can direct access or modify properties through standard property syntax. For example, let's say we defined a product schema: + +```graphql +type Product @table { + id: ID @primaryKey + name: String + rating: Int + price: Float +} +``` + +If we have extended this table class with our get() we can interact with any these specified attributes/properties: + +```javascript +export class CustomProduct extends Product { + get(query) { + let name = this.name; / this is the name of the current product + let rating = this.rating; / this is the rating of the current product + this.rating = 3; / we can also modify the rating for the current instance + / (with a get this won't be saved by default, but will be used when serialized) + return super.get(query); + } +} +``` + +Likewise, we can interact with resource instances in the same way when retrieving them through the static methods: + +```javascript +let product1 = await Product.get(1); +let name = product1.name; / this is the name of the product with a primary key of 1 +let rating = product1.rating; / this is the rating of the product with a primary key of 1 +product1.rating = 3; / modify the rating for this instance (this will be saved without a call to update()) +``` + +If there are additional properties on (some) products that aren't defined in the schema, we can still access them through the resource instance, but since they aren't declared, there won't be getter/setter definition for direct property access, but we can access properties with the `get(propertyName)` method and modify properties with the `set(propertyName, value)` method: + +```javascript +let product1 = await Product.get(1); +let additionalInformation = product1.get('additionalInformation'); / get the additionalInformation property value even though it isn't defined in the schema +product1.set('newProperty', 'some value'); / we can assign any properties we want with set +``` + +And likewise, we can do this in an instance method, although you will probably want to use super.get()/set() so you don't have to write extra logic to avoid recursion: + +```javascript +export class CustomProduct extends Product { + get(query) { + let additionalInformation = super.get('additionalInformation'); / get the additionalInformation property value even though it isn't defined in the schema + super.set('newProperty', 'some value'); / we can assign any properties we want with set + } +} +``` + +Note that you may also need to use `get`/`set` for properties that conflict with existing method names. For example, your schema defines an attribute called `getId` (not recommended), you would need to access that property through `get('getId')` and `set('getId', value)`. + +If you want to save the changes you make, you can call the \`update()\`\` method: + +```javascript +let product1 = await Product.get(1); +product1.rating = 3; +product1.set('newProperty', 'some value'); +product1.update(); / save both of these property changes +``` + +Updates are automatically saved inside modifying methods like put and post: + +```javascript +export class CustomProduct extends Product { + post(data) { + this.name = data.name; + this.set('description', data.description); + / both of these changes will be saved automatically as this transaction commits + } +} +``` + +We can also interact with properties in nested objects and arrays, following the same patterns. For example we could define more complex types on our product: + +```graphql +type Product @table { + id: ID @primaryKey + name: String + rating: Int + price: Float + brand: Brand; + variations: [Variation]; +} +type Brand { + name: String +} +type Variation { + name: String + price: Float +} +``` + +We can interact with these nested properties: + +```javascript +export class CustomProduct extends Product { + post(data) { + let brandName = this.brand.name; + let firstVariationPrice = this.variations[0].price; + let additionalInfoOnBrand = this.brand.get('additionalInfo'); / not defined in schema, but can still try to access property + / make some changes + this.variations.splice(0, 1); / remove first variation + this.variations.push({ name: 'new variation', price: 9.99 }); / add a new variation + this.brand.name = 'new brand name'; + / all these change will be saved + } +} +``` + +If you need to delete a property, you can do with the `delete` method: + +```javascript +let product1 = await Product.get(1); +product1.delete('additionalInformation'); +product1.update(); +``` + +You can also get "plain" object representation of a resource instance by calling `toJSON`, which will return a simple frozen object with all the properties (whether defined in the schema) as direct normal properties (note that this object can _not_ be modified, it is frozen since it is belongs to a cache): + +```javascript +let product1 = await Product.get(1); +let plainObject = product1.toJSON(); +for (let key in plainObject) { + / can iterate through the properties of this record +} +``` + +## Response Object + +The resource methods can return an object that will be serialized and returned as the response to the client. However, these methods can also return a `Response` style object with `status`, `headers`, and optionally `body` or `data` properties. This allows you to have more control over the response, including setting custom headers and status codes. For example, you could return a redirect response like: + +```javascript +return `{ status: 302, headers: { Location: '/new-location' }` }; +``` + +If you include a `body` property, this must be a string or buffer that will be returned as the response body. If you include a `data` property, this must be an object that will be serialized as the response body (using the standard content negotiation). For example, we could return an object with a custom header: + +```javascript +return { status: 200, headers: { 'X-Custom-Header': 'custom value' }, data: `{ message: 'Hello, World!' }` }; +``` + +### Throwing Errors + +You may throw errors (and leave them uncaught) from the response methods and these should be caught and handled by protocol the handler. For REST requests/responses, this will result in an error response. By default the status code will be 500. You can assign a property of `statusCode` to errors to indicate the HTTP status code that should be returned. For example: + +```javascript +if (notAuthorized()) { + let error = new Error('You are not authorized to access this'); + error.statusCode = 403; + throw error; +} +``` diff --git a/site/versioned_docs/version-4.6/technical-details/reference/resources/migration.md b/site/versioned_docs/version-4.6/technical-details/reference/resources/migration.md new file mode 100644 index 00000000..6a6899f0 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/resources/migration.md @@ -0,0 +1,137 @@ +--- +title: Migration to Resource API version 2 (non-instance binding) +--- + +# Migration to Resource API version 2 (non-instance binding) + +The Resource API was inspired by two major design ideas: the REST architectural design and the [Active Record pattern](https:/en.wikipedia.org/wiki/Active_record_pattern) (made popular by Ruby on Rails and heavily used as a pattern in many ORMs). The basic design goal of the Resource API is to integrate these concepts into a single construct that can directly map RESTful methods (specifically the "uniform interface" of HTTP) to an active record data model. However, while the active record pattern has been for _consumption_ of data, implementing methods for endpoint definitions and caching sources as a data _provider_ can be confusing and cumbersome to implement. The updated non-instance binding Resource API is designed to make it easier and more consistent to implement a data provider and interact with records across a table, while maintaining more explicit control over what data is loaded and when. + +The updated Resource API is enabled on a per-class basis by setting static `loadAsInstance` property to `false`. When this property is set to `false`, this means that the Resource instances will not be bound to a specific record. Instead instances represent the whole table, capturing the context and current transactional state. Any records in the table can be loaded or modified from `this` instance. There are a number of implications and different behaviors from a Resource class with `static loadAsInstance = false`: + +- The `get` method (both static and instance) will directly return the record, a frozen enumerable object with direct properties, instead of a Resource instance. +- When instance methods are called, there will not be any record preloaded beforehand and the resource instance will not have properties mapped to a record. +- All instance methods accept a `target`, an instance of `RequestTarget`, as the first argument, which identifies the target record or query. + - The `target` will have an `id` property identifying the target resource, along with target information. + - The `getId()` method is no longer used and will return `undefined`. + - The `target` will provide access to query parameters, search operators, and other directives. + - A `target` property of `checkPermission` indicates that a method should check the permission before of request before proceeding. The default instance methods provide the default authorization behavior. + - This supplants the need for `allowRead`, `allowUpdate`, `allowCreate`, and `allowDelete` methods, which shouldn't need to be used (and don't provide the id of the target record). +- Any data from a POST, PUT, and PATCH request will be available in the second argument. This reverses the order of the arguments to `put`, `post`, and `patch` compared to the legacy Resource API. +- Context is tracked using asynchronous context tracking, and will automatically be available to calls to other resources. This can be disabled by setting `static explicitContext = true`, which can improve performance. +- The `update` method will return an `Updatable` object (instead of a Resource instance), which provides properties mapped to a record, but these properties can be updated and changes will be saved when the transaction is committed. + +The following are examples of how to migrate to the non-instance binding Resource API. + +Previous code with a `get` method: + +```javascript +export class MyData extends tables.MyData { + async get(query) { + let id = this.getId(); / get the id + if (query?.size > 0) { + / check number of query parameters + let idWithQuery = id + query.toString(); / add query parameters + let resource = await tables.MyData.get(idWithQuery, this); / retrieve another record + resource.newProperty = 'value'; / assign a new value to the returned resource instance + return resource; + } else { + this.newProperty = 'value'; / assign a new value to this instance + return super.get(query); + } + } +} +``` + +Updated code: + +```javascript +export class MyData extends tables.MyData { + static loadAsInstance = false; / opt in to updated behavior + async get(target) { + let id = target.id; / get the id + let record; + if (target.size > 0) { + / check number of query parameters + let idWithQuery = target.toString(); / this is the full target with the path query parameters + / we can retrieve another record from this table directly with this.get/super.get or with tables.MyData.get + record = await super.get(idWithQuery); + } else { + record = await super.get(target); / we can just directly use the target as well + } + / the record itself is frozen, but we can copy/assign to a new object with additional properties if we want + return { ...record, newProperty: 'value' }; + } +} +``` + +Here is an example of the preferred approach for authorization: +Previous code with a `get` method: + +```javascript +export class MyData extends tables.MyData { + allowRead(user) { + / allow any authenticated user + return user ? true : false; + } + async get(query) { + / any get logic + return super.get(query); + } +} +``` + +```javascript +export class MyData extends tables.MyData { + static loadAsInstance = false; / opt in to updated behavior + async get(target) { + / While you can still use allowRead, it is not called before get is called, and it is generally encouraged + / to perform/call authorization explicitly in direct get, put, post methods rather than using allow* methods. + if (!this.getContext().user) throw new Error('Unauthorized'); + target.checkPermissions = false; / authorization complete, no need to further check permissions below + / target.checkPermissions is set to true or left in place, this default get method will perform the default permissions checks + return super.get(target); / we can just directly use the query as well + } +} +``` + +Here is an example of how to convert/upgrade an implementation of a `post` method: +Previous code with a `post` method: + +```javascript +export class MyData extends tables.MyData { + async post(data, query) { + let resource = await tables.MyData.get(data.id, this); + if (resource) { + / update a property + resource.someProperty = 'value'; + / or + tables.MyData.patch(data.id, { someProperty: 'value' }, this); + } else { + / create a new record + MyData.create(data, this); + } + } +} +``` + +Updated code: + +```javascript +export class MyData extends tables.MyData { + static loadAsInstance = false; / opt in to updated behavior + / IMPORTANT: arguments are reversed: + async post(target, data) { + let record = await this.get(data.id); + if (record) { + / update a property + const updatable = await this.update(data.id); / we can alternately pass a target to update + updatable.someProperty = 'value'; + / or + this.patch(data.id, { someProperty: 'value' }); + } else { + / create a new record + this.create(data); + } + } +} +``` diff --git a/site/versioned_docs/version-4.6/technical-details/reference/storage-algorithm.md b/site/versioned_docs/version-4.6/technical-details/reference/storage-algorithm.md new file mode 100644 index 00000000..99525536 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/storage-algorithm.md @@ -0,0 +1,27 @@ +--- +title: Storage Algorithm +--- + +# Storage Algorithm + +The Harper storage algorithm is fundamental to the Harper core functionality, enabling the [Dynamic Schema](./dynamic-schema) and all other user-facing functionality. Harper is built on top of Lightning Memory-Mapped Database (LMDB), a key-value store offering industry leading performance and functionality, which allows for our storage algorithm to store data in tables as rows/objects. This document will provide additional details on how data is stored within Harper. + +## Query Language Agnostic + +The Harper storage algorithm was designed to abstract the data storage from any individual query language. Harper currently supports both SQL and NoSQL on top of this storage algorithm, with the ability to add additional query languages in the future. This means data can be inserted via NoSQL and read via SQL while hitting the same underlying data storage. + +## ACID Compliant + +Utilizing Multi-Version Concurrency Control (MVCC) through LMDB, Harper offers ACID compliance independently on each node. Readers and writers operate independently of each other, meaning readers don’t block writers and writers don’t block readers. Each Harper table has a single writer process, avoiding deadlocks and assuring that writes are executed in the order in which they were received. Harper tables can have multiple reader processes operating at the same time for consistent, high scale reads. + +## Universally Indexed + +All top level attributes are automatically indexed immediately upon ingestion. The [Harper Dynamic Schema](./dynamic-schema) reflexively creates both the attribute and index reflexively as new schema metadata comes in. Indexes are agnostic of datatype, honoring the following order: booleans, numbers ordered naturally, strings ordered lexically. Within the LMDB implementation, table records are grouped together into a single LMDB environment file, where each attribute index is a sub-database (dbi) inside said environment file. An example of the indexing scheme can be seen below. + +## Additional LMDB Benefits + +Harper inherits both functional and performance benefits by implementing LMDB as the underlying key-value store. Data is memory-mapped, which enables quick data access without data duplication. All writers are fully serialized, making writes deadlock-free. LMDB is built to maximize operating system features and functionality, fully exploiting buffer cache and built to run in CPU cache. To learn more about LMDB, visit their documentation. + +## Harper Indexing Example (Single Table) + +![](/img/v4.6/reference/HarperDB-3.0-Storage-Algorithm.png.webp) diff --git a/site/versioned_docs/version-4.6/technical-details/reference/transactions.md b/site/versioned_docs/version-4.6/technical-details/reference/transactions.md new file mode 100644 index 00000000..11a8f4dc --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/reference/transactions.md @@ -0,0 +1,40 @@ +--- +title: Transactions +--- + +# Transactions + +Transactions are an important part of robust handling of data in data-driven applications. Harper provides ACID-compliant support for transactions, allowing for guaranteed atomic, consistent, and isolated data handling within transactions, with durability guarantees on commit. Understanding how transactions are tracked and behave is important for properly leveraging transactional support in Harper. For most operations this is very intuitive, each HTTP request is executed in a transaction, so when multiple actions are executed in a single request, they are normally automatically included in the same transaction. + +Transactions span a database. Once a read snapshot is started, it is an atomic snapshot of all the tables in a database. And writes that span multiple tables in the database will all be committed atomically together (no writes in one table will be visible before writes in another table in the same database). If a transaction is used to access or write data in multiple databases, there will actually be a separate database transaction used for each database, and there is no guarantee of atomicity between separate transactions in separate databases. This can be an important consideration when deciding if and how tables should be organized into different databases. + +Because Harper is designed to be a low-latency distributed database, locks are avoided in data handling. Because of this, transactions do not lock data within the transaction. When a transaction starts, it will provide a read snapshot of the database for any retrievals or queries, which means all reads will be performed on a single version of the database isolated from any other writes that are concurrently taking place. And within a transaction all writes are aggregated and atomically written on commit. These writes are all isolated (from other transactions) until committed, and all become visible atomically. However, because transactions are non-locking, it is possible that writes from other transactions may occur between when reads are performed and when the writes are committed (at which point the last write will win for any records that have been written concurrently). Support for locks in transactions is planned for a future release. + +Transactions can also be explicitly started using the `transaction` global function that is provided in the Harper environment: + +## `transaction(context?, callback: (transaction) => any): Promise` + +This executes the callback in a transaction, providing a context that can be used for any resource methods that are called. This returns a promise for when the transaction has been committed. The callback itself may be asynchronous (return a promise), allowing for asynchronous activity within the transaction. This is useful for starting a transaction when your code is not already running within a transaction (in an HTTP request handler, a transaction will typically already be started). For example, if we wanted to run an action on a timer that periodically loads data, we could ensure that the data is loaded in single transactions like this (note that HDB is multi-threaded and if we do a timer-based job, we very likely want it to only run in one thread): + +```javascript +import { tables } from 'harperdb'; +const { MyTable } = tables; +if (isMainThread) / only on main thread + setInterval(async () => { + let someData = await (await fetch(... some URL ...)).json(); + transaction((txn) => { + for (let item in someData) { + MyTable.put(item, txn); + } + }); + }, 3600000); / every hour +``` + +You can provide your own context object for the transaction to attach to. If you call `transaction` with a context that already has a transaction started, it will simply use the current transaction, execute the callback and immediately return (this can be useful for ensuring that a transaction has started). + +Once the transaction callback is completed (for non-nested transaction calls), the transaction will commit, and if the callback throws an error, the transaction will abort. However, the callback is called with the `transaction` object, which also provides the following methods and property: + +- `commit(): Promise` - Commits the current transaction. The transaction will be committed once the returned promise resolves. +- `abort(): void` - Aborts the current transaction and resets it. +- `resetReadSnapshot(): void` - Resets the read snapshot for the transaction, resetting to the latest data in the database. +- `timestamp: number` - This is the timestamp associated with the current transaction. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/index.md b/site/versioned_docs/version-4.6/technical-details/release-notes/index.md new file mode 100644 index 00000000..d0bb0c74 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/index.md @@ -0,0 +1,269 @@ +--- +title: Release Notes +--- + +# Release Notes + +### Current Release + +[Meet Tucker](./v4-tucker/tucker) Our 4th Release Pup + +[4.6.2 Tucker](./v4-tucker/4.6.2) + +[4.6.1 Tucker](./v4-tucker/4.6.1) + +[4.6.0 Tucker](./v4-tucker/4.6.0) + +[4.5.14 Tucker](./v4-tucker/4.5.14) + +[4.5.13 Tucker](./v4-tucker/4.5.13) + +[4.5.12 Tucker](./v4-tucker/4.5.12) + +[4.5.11 Tucker](./v4-tucker/4.5.11) + +[4.5.10 Tucker](./v4-tucker/4.5.10) + +[4.5.9 Tucker](./v4-tucker/4.5.9) + +[4.5.8 Tucker](./v4-tucker/4.5.8) + +[4.5.7 Tucker](./v4-tucker/4.5.7) + +[4.5.6 Tucker](./v4-tucker/4.5.6) + +[4.5.5 Tucker](./v4-tucker/4.5.5) + +[4.5.4 Tucker](./v4-tucker/4.5.4) + +[4.5.3 Tucker](./v4-tucker/4.5.3) + +[4.5.2 Tucker](./v4-tucker/4.5.2) + +[4.5.1 Tucker](./v4-tucker/4.5.1) + +[4.5.0 Tucker](./v4-tucker/4.5.0) + +[4.4.24 Tucker](./v4-tucker/4.4.24) + +[4.4.23 Tucker](./v4-tucker/4.4.23) + +[4.4.22 Tucker](./v4-tucker/4.4.22) + +[4.4.21 Tucker](./v4-tucker/4.4.21) + +[4.4.20 Tucker](./v4-tucker/4.4.20) + +[4.4.19 Tucker](./v4-tucker/4.4.19) + +[4.4.21 Tucker](./v4-tucker/4.4.21) + +[4.4.20 Tucker](./v4-tucker/4.4.20) + +[4.4.19 Tucker](./v4-tucker/4.4.19) + +[4.4.18 Tucker](./v4-tucker/4.4.18) + +[4.4.17 Tucker](./v4-tucker/4.4.17) + +[4.4.16 Tucker](./v4-tucker/4.4.16) + +[4.4.15 Tucker](./v4-tucker/4.4.15) + +[4.4.14 Tucker](./v4-tucker/4.4.14) + +[4.4.13 Tucker](./v4-tucker/4.4.13) + +[4.4.12 Tucker](./v4-tucker/4.4.12) + +[4.4.11 Tucker](./v4-tucker/4.4.11) + +[4.4.10 Tucker](./v4-tucker/4.4.10) + +[4.4.9 Tucker](./v4-tucker/4.4.9) + +[4.4.8 Tucker](./v4-tucker/4.4.8) + +[4.4.7 Tucker](./v4-tucker/4.4.7) + +[4.4.6 Tucker](./v4-tucker/4.4.6) + +[4.4.5 Tucker](./v4-tucker/4.4.5) + +[4.4.4 Tucker](./v4-tucker/4.4.4) + +[4.4.4 Tucker](./v4-tucker/4.4.3) + +[4.4.2 Tucker](./v4-tucker/4.4.2) + +[4.4.1 Tucker](./v4-tucker/4.4.1) + +[4.4.0 Tucker](./v4-tucker/4.4.0) + +[4.3.38 Tucker](./v4-tucker/4.3.38) + +[4.3.37 Tucker](./v4-tucker/4.3.37) + +[4.3.36 Tucker](./v4-tucker/4.3.36) + +[4.3.35 Tucker](./v4-tucker/4.3.35) + +[4.3.34 Tucker](./v4-tucker/4.3.34) + +[4.3.33 Tucker](./v4-tucker/4.3.33) + +[4.3.32 Tucker](./v4-tucker/4.3.32) + +[4.3.31 Tucker](./v4-tucker/4.3.31) + +[4.3.30 Tucker](./v4-tucker/4.3.30) + +[4.3.29 Tucker](./v4-tucker/4.3.29) + +[4.3.28 Tucker](./v4-tucker/4.3.28) + +[4.3.27 Tucker](./v4-tucker/4.3.27) + +[4.3.26 Tucker](./v4-tucker/4.3.26) + +[4.3.25 Tucker](./v4-tucker/4.3.25) + +[4.3.24 Tucker](./v4-tucker/4.3.24) + +[4.3.23 Tucker](./v4-tucker/4.3.23) + +[4.3.22 Tucker](./v4-tucker/4.3.22) + +[4.3.21 Tucker](./v4-tucker/4.3.21) + +[4.3.20 Tucker](./v4-tucker/4.3.20) + +[4.3.19 Tucker](./v4-tucker/4.3.19) + +[4.3.18 Tucker](./v4-tucker/4.3.18) + +[4.3.17 Tucker](./v4-tucker/4.3.17) + +[4.3.16 Tucker](./v4-tucker/4.3.16) + +[4.3.15 Tucker](./v4-tucker/4.3.15) + +[4.3.14 Tucker](./v4-tucker/4.3.14) + +[4.3.13 Tucker](./v4-tucker/4.3.13) + +[4.3.12 Tucker](./v4-tucker/4.3.12) + +[4.3.11 Tucker](./v4-tucker/4.3.11) + +[4.3.10 Tucker](./v4-tucker/4.3.10) + +[4.3.9 Tucker](./v4-tucker/4.3.9) + +[4.3.8 Tucker](./v4-tucker/4.3.8) + +[4.3.7 Tucker](./v4-tucker/4.3.7) + +[4.3.6 Tucker](./v4-tucker/4.3.6) + +[4.3.5 Tucker](./v4-tucker/4.3.5) + +[4.3.4 Tucker](./v4-tucker/4.3.4) + +[4.3.3 Tucker](./v4-tucker/4.3.3) + +[4.3.2 Tucker](./v4-tucker/4.3.2) + +[4.3.1 Tucker](./v4-tucker/4.3.1) + +[4.3.0 Tucker](./v4-tucker/4.3.0) + +[4.2.8 Tucker](./v4-tucker/4.2.8) + +[4.2.7 Tucker](./v4-tucker/4.2.7) + +[4.2.6 Tucker](./v4-tucker/4.2.6) + +[4.2.5 Tucker](./v4-tucker/4.2.5) + +[4.2.4 Tucker](./v4-tucker/4.2.4) + +[4.2.3 Tucker](./v4-tucker/4.2.3) + +[4.2.2 Tucker](./v4-tucker/4.2.2) + +[4.2.1 Tucker](./v4-tucker/4.2.1) + +[4.2.0 Tucker](./v4-tucker/4.2.0) + +[4.1.2 Tucker](./v4-tucker/4.1.2) + +[4.1.1 Tucker](./v4-tucker/4.1.1) + +[4.1.0 Tucker](./v4-tucker/4.1.0) + +[4.0.7 Tucker](./v4-tucker/4.0.7) + +[4.0.6 Tucker](./v4-tucker/4.0.6) + +[4.0.5 Tucker](./v4-tucker/4.0.5) + +[4.0.4 Tucker](./v4-tucker/4.0.4) + +[4.0.3 Tucker](./v4-tucker/4.0.3) + +[4.0.2 Tucker](./v4-tucker/4.0.2) + +[4.0.1 Tucker](./v4-tucker/4.0.1) + +[4.0.0 Tucker](./v4-tucker/4.0.0) + +### Past Releases + +[Meet Monkey](./v3-monkey/) Our 3rd Release Pup + +[3.2.1 Monkey](./v3-monkey/3.2.1) + +[3.2.0 Monkey](./v3-monkey/3.2.0) + +[3.1.5 Monkey](./v3-monkey/3.1.5) + +[3.1.4 Monkey](./v3-monkey/3.1.4) + +[3.1.3 Monkey](./v3-monkey/3.1.3) + +[3.1.2 Monkey](./v3-monkey/3.1.2) + +[3.1.1 Monkey](./v3-monkey/3.1.1) + +[3.1.0 Monkey](./v3-monkey/3.1.0) + +[3.0.0 Monkey](./v3-monkey/3.0.0) + +--- + +[Meet Penny](./v2-penny/) Our 2nd Release Pup + +[2.3.1 Penny](./v2-penny/2.3.1) + +[2.3.0 Penny](./v2-penny/2.3.0) + +[2.2.3 Penny](./v2-penny/2.2.3) + +[2.2.2 Penny](./v2-penny/2.2.2) + +[2.2.0 Penny](./v2-penny/2.2.0) + +[2.1.1 Penny](./v2-penny/2.1.1) + +--- + +[Meet Alby](./v1-alby/) Our 1st Release Pup + +[1.3.1 Alby](./v1-alby/1.3.1) + +[1.3.0 Alby](./v1-alby/1.3.0) + +[1.2.0 Alby](./v1-alby/1.2.0) + +[1.1.0 Alby](./v1-alby/1.1.0) diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/1.1.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/1.1.0.md new file mode 100644 index 00000000..2256a825 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/1.1.0.md @@ -0,0 +1,72 @@ +--- +title: 1.1.0 +sidebar_position: 89899 +--- + +### HarperDB 1.1.0, Alby Release + +4/18/2018 + +**Features** + +- Users & Roles: + - Limit/Assign access to all HarperDB operations + + - Limit/Assign access to schemas, tables & attributes + + - Limit/Assign access to specific SQL operations (`INSERT`, `UPDATE`, `DELETE`, `SELECT`) + +- Enhanced SQL parser + - Added extensive ANSI SQL Support. + - Added Array function, which allows for converting relational data into Object/Hierarchical data + - `Distinct_Array` Function: allows for removing duplicates in the Array function. + - Enhanced SQL Validation: Improved validation around structure of SQL, validating the schema, etc.. + - 10x performance improvement on SQL statements. + +- Export Function: can now call a NoSQL/SQL search and have it export to CSV or JSON. + +- Added upgrade function to CLI + +- Added ability to perform bulk update from CSV + +- Created landing page for HarperDB. + +- Added CORS support to HarperDB + +**Fixes** + +- Fixed memory leak in CSV bulk loads + +- Corrected error when attempting to perform a `SQL DELETE` + +- Added further validation to NoSQL `UPDATE` to validate schema & table exist + +- Fixed install issue occurring when part of the install path does not exist, the install would silently fail. + +- Fixed issues with replicated data when one of the replicas is down + +- Removed logging of initial user’s credentials during install + +- Can now use reserved words as aliases in SQL + +- Removed user(s) password in results when calling `list_users` + +- Corrected forwarding of operations to other nodes in a cluster + +- Corrected lag in schema meta-data passing to other nodes in a cluster + +- Drop table & schema now move the table & schema or table to the trash folder under the Database folder for later permanent deletion. + +- Bulk inserts no longer halt the entire operation if n records already exist, instead the return includes the hashes of records that have been skipped. + +- Added ability to accept EULA from command line + +- Corrected `search_by_value` not searching on the correct attribute + +- Added ability to increase the timeout of a request by adding `SERVER_TIMEOUT_MS` to config/settings.js + +- Add error handling resulting from SQL calculations. + +- Standardized error responses as JSON. + +- Corrected internal process generation to not allow more processes than machine has cores. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/1.2.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/1.2.0.md new file mode 100644 index 00000000..a504a7ad --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/1.2.0.md @@ -0,0 +1,42 @@ +--- +title: 1.2.0 +sidebar_position: 89799 +--- + +### HarperDB 1.2.0, Alby Release + +7/10/2018 + +**Features** + +- Time to Live: Conserve the resources of your edge device by setting data on devices to live for a specific period of time. +- Geo: HarperDB has implemented turf.js into its SQL parser to enable geo based analytics. +- Jobs: CSV Data loads, Exports & Time to Live now all run as back ground jobs. +- Exports: Perform queries that export into JSON or CSV and save to disk or S3. + +**Fixes** + +- Fixed issue where CSV data loads incorrectly report number of records loaded. +- Added validation to stop `BETWEEN` operations in SQL. +- Updated logging to not include internal variables in the logs. +- Cleaned up `add_role` response to not include internal variables. +- Removed old and unused dependencies. +- Build out further unit tests and integration tests. +- Fixed https to handle certificates properly. +- Improved stability of clustering & replication. +- Corrected issue where Objects and Arrays were not casting properly in `SQL SELECT` response. +- Fixed issue where Blob text was not being returned from `SQL SELECT`s. +- Fixed error being returned when querying on table with no data, now correctly returns empty array. +- Improved performance in SQL when searching on exact values. +- Fixed error when ./harperdb stop is called. +- Fixed logging issue causing instability in installer. +- Fixed `read_log` operation to accept date time. +- Added permissions checking to `export_to_s3`. +- Added ability to run SQL on `SELECT` without a `FROM`. +- Fixed issue where updating a user’s password was not encrypting properly. +- Fixed `user_guide.html` to point to readme on git repo. +- Created option to have HarperDB run as a foreground process. +- Updated `user_info` to return the correct role for a user. +- Fixed issue where HarperDB would not stop if the database root was deleted. +- Corrected error message on insert if an invalid schema is provided. +- Added permissions checks for user & role operations. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/1.3.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/1.3.0.md new file mode 100644 index 00000000..e3a5215f --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/1.3.0.md @@ -0,0 +1,27 @@ +--- +title: 1.3.0 +sidebar_position: 89699 +--- + +### HarperDB 1.3.0, Alby Release + +11/2/2018 + +**Features** + +- Upgrade: Upgrade to newest version via command line. +- SQL Support: Added `IS NULL` for SQL parser. +- Added attribute validation to search operations. + +**Fixes** + +- Fixed `SELECT` calculations, i.e. `SELECT` 2+2. +- Fixed select OR not returning expected results. +- No longer allowing reserved words for schema and table names. +- Corrected process interruptions from improper SQL statements. +- Improved message handling between spawned processes that replace killed processes. +- Enhanced error handling for updates to tables that do not exist. +- Fixed error handling for NoSQL responses when `get_attributes` is provided with invalid attributes. +- Fixed issue with new columns not being updated properly in update statements. +- Now validating roles, tables and attributes when creating or updating roles. +- Fixed an issue where in some cases `undefined` was being returned after dropping a role diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/1.3.1.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/1.3.1.md new file mode 100644 index 00000000..56927389 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/1.3.1.md @@ -0,0 +1,29 @@ +--- +title: 1.3.1 +sidebar_position: 89698 +--- + +### HarperDB 1.3.1, Alby Release + +2/26/2019 + +**Features** + +- Clustering connection direction appointment +- Foundations for threading/multi processing +- UUID autogen for hash attributes that were not provided +- Added cluster status operation + +**Bug Fixes and Enhancements** + +- More logging +- Clustering communication enhancements +- Clustering queue ordering by timestamps +- Cluster re connection enhancements +- Number of system core(s) detection +- Node LTS (10.15) compatibility +- Update/Alter users enhancements +- General performance enhancements +- Warning is logged if different versions of harperdb are connected via clustering +- Fixed need to restart after user creation/alteration +- Fixed SQL error that occurred on selecting from an empty table diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/_category_.json b/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/_category_.json new file mode 100644 index 00000000..e33195ec --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Alby (Version 1)", + "position": -1 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/index.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/index.md new file mode 100644 index 00000000..0aa3a2c4 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v1-alby/index.md @@ -0,0 +1,13 @@ +--- +title: HarperDB Alby (Version 1) +--- + +# HarperDB Alby (Version 1) + +Did you know our release names are dedicated to employee pups? For our first release, Alby was our pup. + +Here is a bit about Alby: + +![picture of black dog](/img/v4.6/dogs/alby.webp) + +_Hi, I am Alby. My mom is Kaylan Stock, Director of Marketing at HarperDB. I am a 9-year-old Great Dane mix who loves sun bathing, going for swims, and wreaking havoc on the local squirrels. My favorite snack is whatever you are eating, and I love a good butt scratch!_ diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.1.1.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.1.1.md new file mode 100644 index 00000000..c59337d7 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.1.1.md @@ -0,0 +1,28 @@ +--- +title: 2.1.1 +sidebar_position: 79898 +--- + +### HarperDB 2.1.1, Penny Release + +05/22/2020 + +**Highlights** + +- CORE-1007 Added the ability to perform `SQL INSERT` & `UPDATE` with function calls & expressions on values. +- CORE-1023 Fixed minor bug in final SQL step incorrectly trying to translate ordinals to alias in `ORDER BY` statement. +- CORE-1020 Fixed bug allowing 'null' and 'undefined' string values to be passed in as valid hash values. +- CORE-1006 Added SQL functionality that enables `JOIN` statements across different schemas. +- CORE-1005 Implemented JSONata library to handle our JSON document search functionality in SQL, creating the `SEARCH_JSON` function. +- CORE-1009 Updated schema validation to allow all printable ASCII characters to be used in schema/table/attribute names, except, forward slashes and backticks. Same rules apply now for hash attribute values. +- CORE-1003 Fixed handling of ORDER BY statements with function aliases. +- CORE-1004 Fixed bug related to `SELECT*` on `JOIN` queries with table columns with the same name. +- CORE-996 Fixed an issue where the `transact_to_cluster` flag is lost for CSV URL loads, fixed an issue where new attributes created in CSV bulk load do not sync to the cluster. +- CORE-994 Added new operation `system_information`. This operation returns info & metrics for the OS, time, memory, cpu, disk, network. +- CORE-993 Added new custom date functions for AlaSQL & UTC updates. +- CORE-991 Changed jobs to spawn a new process which will run the intended job without impacting a main HarperDB process. +- CORE-992 HTTPS enabled by default. +- CORE-990 Updated `describe_table` to add the record count for the table for LMDB data storage. +- CORE-989 Killed the socket cluster processes prior to HarperDB processes to eliminate a false uptime. +- CORE-975 Updated time values set by SQL Date Functions to be in epoch format. +- CORE-974 Added date functions to `SQL SELECT` column alias functionality. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.2.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.2.0.md new file mode 100644 index 00000000..a669ca8b --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.2.0.md @@ -0,0 +1,44 @@ +--- +title: 2.2.0 +sidebar_position: 79799 +--- + +### HarperDB 2.2.0, Penny Release + +08/24/2020 + +**Features/Updates** + +- CORE-997 Updated the data format for CSV data loads being sync'd across a cluster to take up less resources +- CORE-1018 Adds SQL functionality for `BETWEEN` statements +- CORE-1032 Updates permissions to allow regular users (i.e. non-super users) to call the `get_job` operation +- CORE-1036 On create/drop table we auto create/drop the related transactions environments for the schema.table +- CORE-1042 Built raw functions to write to a tables transaction log for insert/update/delete operations +- CORE-1057 Implemented write transaction into lmdb create/update/delete functions +- CORE-1048 Adds `SEARCH` wildcard handling for role permissions standards +- CORE-1059 Added config setting to disable transaction logging for an instance +- CORE-1076 Adds permissions filter to describe operations +- CORE-1043 Change clustering catchup to use the new transaction log +- CORE-1052 Removed word "master" from source +- CORE-1061 Added new operation called `delete_transactions_before` this will tail a transaction log for a specific schema / table +- CORE-1040 On HarperDB startup make sure all tables have a transaction environment +- CORE-1055 Added 2 new setting to change the server headersTimeout & keepAliveTimeout from the config file +- CORE-1044 Created new operation `read_transaction_log` which will allow a user to get transactions for a table by `timestamp`, `username`, or `hash_value` +- CORE-1043 Change clustering catchup to use the new transaction log +- CORE-1089 Added new attribute to `system_information` for table/transaction log data size in bytes & transaction log record count +- CORE-1101 Fix to store empty strings rather than considering them null & fix to be able to search on empty strings in SQL/NoSQL. +- CORE-1054 Updates permissions object to remove delete attribute permission and update table attribute permission key to `attribute_permissions` +- CORE-1092 Do not allow the `__createdtime__` to be updated +- CORE-1085 Updates create schema/table & drop schema/table/attribute operations permissions to require super user role and adds integration tests to validate +- CORE-1071 Updates response messages and status codes from `describe_schema` and `describe_table` operations to provide standard language/status code when a schema item is not found +- CORE-1049 Updates response message for SQL update op with no matching rows +- CORE-1096 Added tracking of the origin in the transaction log. This origin object stores the node name, timestamp of the transaction from the originating node & the user. + +**Bug Fixes** + +- CORE-1028 Fixes bug for simple `SQL SELECT` queries not returning aliases and incorrectly returning hash values when not requested in query +- CORE-1037 Fixed an issue where numbers with leading zero i.e. 00123 are converted to numbers rather than being honored as strings. +- CORE-1063 Updates permission error response shape to consolidate issues into individual objects per schema/table combo +- CORE-1098 Fixed an issue where transaction environments were remaining in the global cache after being dropped. +- CORE-1086 Fixed issue where responses from insert/update were incorrect with skipped records. +- CORE-1079 Fixes SQL bugs around invalid schema/table and special characters in `WHERE` clause diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.2.2.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.2.2.md new file mode 100644 index 00000000..fca00967 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.2.2.md @@ -0,0 +1,17 @@ +--- +title: 2.2.2 +sidebar_position: 79797 +--- + +### HarperDB 2.2.2, Penny Release + +10/27/2020 + +- CORE-1154 Allowed transaction logging to be disabled even if clustering is enabled. +- CORE-1153 Fixed issue where `delete_files_before` was writing to transaction log. +- CORE-1152 Fixed issue where no more than 4 HarperDB forks would be created. +- CORE-1112 Adds handling for system timestamp attributes in permissions. +- CORE-1131 Adds better handling for checking perms on operations with action value in JSON. +- CORE-1113 Fixes validation bug checking for super user/cluster user permissions and other permissions. +- CORE-1135 Adds validation for valid keys in role API operations. +- CORE-1073 Adds new `import_from_s3` operation to API. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.2.3.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.2.3.md new file mode 100644 index 00000000..06b89d4e --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.2.3.md @@ -0,0 +1,10 @@ +--- +title: 2.2.3 +sidebar_position: 79796 +--- + +### HarperDB 2.2.3, Penny Release + +11/16/2020 + +- CORE-1158 Performance improvements to core delete function and configuration of `delete_files_before` to run in batches with a pause into between. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.3.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.3.0.md new file mode 100644 index 00000000..a027eedb --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.3.0.md @@ -0,0 +1,23 @@ +--- +title: 2.3.0 +sidebar_position: 79699 +--- + +### HarperDB 2.3.0, Penny Release + +12/03/2020 + +**Features/Updates** + +- CORE-1191, CORE-1190, CORE-1125, CORE-1157, CORE-1126, CORE-1140, CORE-1134, CORE-1123, CORE-1124, CORE-1122 Added JWT Authentication option (See documentation for more information) +- CORE-1128, CORE-1143, CORE-1140, CORE-1129 Added `upsert` operation +- CORE-1187 Added `get_configuration` operation which allows admins to view their configuration settings. +- CORE-1175 Added new internal LMDB function to copy an environment for use in future features. +- CORE-1166 Updated packages to address security vulnerabilities. + +**Bug Fixes** + +- CORE-1195 Modified `drop_attribute` to drop after data cleanse completes. +- CORE-1149 Fix SQL bug regarding self joins and updates alasql to 0.6.5 release. +- CORE-1168 Fix inconsistent invalid schema/table errors. +- CORE-1162 Fix bug which caused `delete_files_before` to cause tables to grow in size due to an open cursor issue. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.3.1.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.3.1.md new file mode 100644 index 00000000..03df0186 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/2.3.1.md @@ -0,0 +1,13 @@ +--- +title: 2.3.1 +sidebar_position: 79698 +--- + +### HarperDB 2.3.1, Penny Release + +1/29/2021 + +**Bug Fixes** + +- CORE-1218 A bug in HarperDB 2.3.0 was identified related to manually calling the `create_attribute` operation. This bug caused secondary indexes to be overwritten by the most recently inserted or updated value for the index, thereby causing a search operation filtered with that index to only return the most recently inserted/updated row. Note, this issue does not affect attributes that are reflexively/automatically created. It only affects attributes created using `create_attribute`. To resolve this issue in 2.3.0 or earlier, drop and recreate your table using reflexive attribute creation. In 2.3.1, drop and recreate your table and use either reflexive attribute creation or `create_attribute`. +- CORE-1219 Increased maximum table attributes from 1000 to 10000 diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/_category_.json b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/_category_.json new file mode 100644 index 00000000..285eecf7 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Penny (Version 2)", + "position": -2 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/index.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/index.md new file mode 100644 index 00000000..b9a59b47 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v2-penny/index.md @@ -0,0 +1,13 @@ +--- +title: HarperDB Penny (Version 2) +--- + +# HarperDB Penny (Version 2) + +Did you know our release names are dedicated to employee pups? For our second release, Penny was the star. + +Here is a bit about Penny: + +![picture of brindle dog](/img/v4.6/dogs/penny.webp) + +_Hi I am Penny! My dad is Kyle Bernhardy, the CTO of HarperDB. I am a nine-year-old Whippet who lives for running hard and fast while exploring the beautiful terrain of Colorado. My favorite activity is chasing birds along with afternoon snoozes in a sunny spot in my backyard._ diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.0.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.0.0.md new file mode 100644 index 00000000..10319747 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.0.0.md @@ -0,0 +1,32 @@ +--- +title: 3.0.0 +sidebar_position: 69999 +--- + +### HarperDB 3.0, Monkey Release + +5/18/2021 + +**Features/Updates** + +- CORE-1217, CORE-1226, CORE-1232 Create new `search_by_conditions` operation. +- CORE-1304 Upgrade to Node 12.22.1. +- CORE-1235 Adds new upgrade/install functionality. +- CORE-1206, CORE-1248, CORE-1252 Implement `lmdb-store` library for optimized performance. +- CORE-1062 Added alias operation for `delete_files_before`, named `delete_records_before`. +- CORE-1243 Change `HTTPS_ON` settings value to false by default. +- CORE-1189 Implement fastify web server, resulting in improved performance. +- CORE-1221 Update user API to use role name instead of role id. +- CORE-1225 Updated dependencies to eliminate npm security warnings. +- CORE-1241 Adds 3.0 update directive and refactors/fixes update functionality. + +**Bug Fixes** + +- CORE-1299 Remove all references to the `PROJECT_DIR` setting. This setting is problematic when using node version managers and upgrading the version of node and then installing a new instance of HarperDB. +- CORE-1288 Fix bug with drop table/schema that was causing 'env required' error log. +- CORE-1285 Update warning log when trying to create an attribute that already exists. +- CORE-1254 Added logic to manage data collisions in clustering. +- CORE-1212 Add pre-check to `drop_user` that returns error if user doesn't exist. +- CORE-1114 Update response code and message from `add_user` when user already exists. +- CORE-1111 Update response from `create_attribute` to match the create schema/table response. +- CORE-1205 Fixed bug that prevented schema/table from being dropped if name was a number or had a wildcard value in it. Updated validation for insert, upsert and update. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.0.md new file mode 100644 index 00000000..f14acb8e --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.0.md @@ -0,0 +1,24 @@ +--- +title: 3.1.0 +sidebar_position: 69899 +--- + +### HarperDB 3.1.0, Monkey Release + +8/24/2021 + +**Features/Updates** + +- CORE-1320, CORE-1321, CORE-1323, CORE-1324 Version 1.0 of HarperDB Custom Functions +- CORE-1275, CORE-1276, CORE-1278, CORE-1279, CORE-1280, CORE-1282, CORE-1283, CORE-1305, CORE-1314 IPC server for communication between HarperDB processes, including HarperDB, HarperDB Clustering, and HarperDB Functions +- CORE-1352, CORE-1355, CORE-1356, CORE-1358 Implement pm2 for HarperDB process management +- CORE-1292, CORE-1308, CORE-1312, CORE-1334, CORE-1338 Updated installation process to start HarperDB immediately on install and to accept all config settings via environment variable or command line arguments +- CORE-1310 Updated licensing functionality +- CORE-1301 Updated validation for performance improvement +- CORE-1359 Add `hdb-response-time` header which returns the HarperDB response time in milliseconds +- CORE-1330, CORE-1309 New config settings: `LOG_TO_FILE`, `LOG_TO_STDSTREAMS`, `IPC_SERVER_PORT`, `RUN_IN_FOREGROUND`, `CUSTOM_FUNCTIONS`, `CUSTOM_FUNCTIONS_PORT`, `CUSTOM_FUNCTIONS_DIRECTORY`, `MAX_CUSTOM_FUNCTION_PROCESSES` + +**Bug Fixes** + +- CORE-1315 Corrected issue in HarperDB restart scenario +- CORE-1370 Update some of the validation error handlers so that they don't log full stack diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.1.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.1.md new file mode 100644 index 00000000..8f90dc10 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.1.md @@ -0,0 +1,19 @@ +--- +title: 3.1.1 +sidebar_position: 69898 +--- + +### HarperDB 3.1.1, Monkey Release + +9/23/2021 + +**Features/Updates** + +- CORE-1393 Added utility function to add settings from env/cmd vars to the settings file on every run/restart +- CORE-1395 Create a setting which will allow to enable the local Studio to be served from an instance of HarperDB +- CORE-1397 Update the stock 404 response to not return the request URL +- General updates to optimize Docker container + +**Bug Fixes** + +- CORE-1399 Added fixes for complex SQL alias issues diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.2.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.2.md new file mode 100644 index 00000000..706e5956 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.2.md @@ -0,0 +1,16 @@ +--- +title: 3.1.2 +sidebar_position: 69897 +--- + +### HarperDB 3.1.2, Monkey Release + +10/21/2021 + +**Features/Updates** + +- Updated the installation ASCII art to reflect the new HarperDB logo + +**Bug Fixes** + +- CORE-1408 Corrects issue where `drop_attribute` was not properly setting the LMDB version number causing tables to behave unexpectedly diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.3.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.3.md new file mode 100644 index 00000000..1a7d3301 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.3.md @@ -0,0 +1,12 @@ +--- +title: 3.1.3 +sidebar_position: 69896 +--- + +### HarperDB 3.1.3, Monkey Release + +1/14/2022 + +**Bug Fixes** + +- CORE-1446 Fix for scans on indexes larger than 1 million entries causing queries to never return diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.4.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.4.md new file mode 100644 index 00000000..3fa86ead --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.4.md @@ -0,0 +1,12 @@ +--- +title: 3.1.4 +sidebar_position: 69895 +--- + +### HarperDB 3.1.4, Monkey Release + +2/24/2022 + +**Features/Updates** + +- CORE-1460 Added new setting `STORAGE_WRITE_ASYNC`. If this setting is true, LMDB will have faster write performance at the expense of not being crash safe. The default for this setting is false, which results in HarperDB being crash safe. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.5.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.5.md new file mode 100644 index 00000000..23661928 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.1.5.md @@ -0,0 +1,12 @@ +--- +title: 3.1.5 +sidebar_position: 69894 +--- + +### HarperDB 3.1.5, Monkey Release + +3/4/2022 + +**Features/Updates** + +- CORE-1498 Fixed incorrect autocasting of string that start with "0." that tries to convert to number but instead returns NaN. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.2.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.2.0.md new file mode 100644 index 00000000..fa215082 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.2.0.md @@ -0,0 +1,14 @@ +--- +title: 3.2.0 +sidebar_position: 69799 +--- + +### HarperDB 3.2.0, Monkey Release + +3/25/2022 + +**Features/Updates** + +- CORE-1391 Bug fix related to orphaned HarperDB background processes. +- CORE-1509 Updated node version check, updated Node.js version, updated project dependencies. +- CORE-1518 Remove final call from logger. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.2.1.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.2.1.md new file mode 100644 index 00000000..4cc983a4 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.2.1.md @@ -0,0 +1,12 @@ +--- +title: 3.2.1 +sidebar_position: 69798 +--- + +### HarperDB 3.2.1, Monkey Release + +6/1/2022 + +**Features/Updates** + +- CORE-1573 Added logic to track the pid of the foreground process if running in foreground. Then on stop, use that pid to kill the process. Logic was also added to kill the pm2 daemon when stop is called. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.3.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.3.0.md new file mode 100644 index 00000000..236704dd --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/3.3.0.md @@ -0,0 +1,12 @@ +--- +title: 3.3.0 +sidebar_position: 69699 +--- + +### HarperDB 3.3.0 - Monkey + +- CORE-1595 Added new role type `structure_user`, this enables non-superusers to be able to create/drop schema/table/attribute. +- CORE-1501 Improved performance for drop_table. +- CORE-1599 Added two new operations for custom functions `install_node_modules` & `audit_node_modules`. +- CORE-1598 Added `skip_node_modules` flag to `package_custom_function_project` operation. This flag allows for not bundling project dependencies and deploying a smaller project to other nodes. Use this flag in tandem with `install_node_modules`. +- CORE-1707 Binaries are now included for Linux on AMD64, Linux on ARM64, and macOS. GCC, Make, Python are no longer required when installing on these platforms. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/_category_.json b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/_category_.json new file mode 100644 index 00000000..0103ac36 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Monkey (Version 3)", + "position": -3 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/index.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/index.md new file mode 100644 index 00000000..a446b5c3 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v3-monkey/index.md @@ -0,0 +1,11 @@ +--- +title: HarperDB Monkey (Version 3) +--- + +# HarperDB Monkey (Version 3) + +Did you know our release names are dedicated to employee pups? For our third release, we have Monkey. + +![picture of tan dog](/img/v4.6/dogs/monkey.webp) + +_Hi, I am Monkey, a.k.a. Monk, a.k.a. Monchichi. My dad is Aron Johnson, the Director of DevOps at HarperDB. I am an eight-year-old Australian Cattle dog mutt whose favorite pastime is hunting and collecting tennis balls from the park next to her home. I love burrowing in the Colorado snow, rolling in the cool grass on warm days, and cheese!_ diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.0.md new file mode 100644 index 00000000..7a3b86bb --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.0.md @@ -0,0 +1,131 @@ +--- +title: 4.0.0 +sidebar_position: 59999 +--- + +### HarperDB 4.0.0, Tucker Release + +11/2/2022 + +**Networking & Data Replication (Clustering)** + +The HarperDB clustering internals have been rewritten and the underlying technology for Clustering has been completely replaced with [NATS](https:/nats.io/), an enterprise grade connective technology responsible for addressing, discovery and exchanging of messages that drive the common patterns in distributed systems. + +- CORE-1464, CORE-1470, : Remove SocketCluster dependencies and all code related to them. +- CORE-1465, CORE-1485, CORE-1537, CORE-1538, CORE-1558, CORE-1583, CORE_1665, CORE-1710, CORE-1801, CORE-1865 :Add nats-`server` code as dependency, on install of HarperDB download nats-`server` is possible else fallback to building from source code. +- CORE-1593, CORE-1761: Add `nats.js` as project dependency. +- CORE-1466: Build NATS configs on `harperdb run` based on HarperDB YAML configuration. +- CORE-1467, CORE-1508: Launch and manage NATS servers with PM2. +- CORE-1468, CORE-1507: Create a process which reads the work queue stream and processes transactions. +- CORE-1481, CORE-1529, CORE-1698, CORE-1502, CORE-1696: On upgrade to 4.0, update pre-existing clustering configurations, create table transaction streams, create work queue stream, update `hdb_nodes` table, create clustering folder structure, and rebuild self-signed certs. +- CORE-1494, CORE-1521, CORE-1755: Build out internals to interface with NATS. +- CORE-1504: Update existing hooks to save transactions to work with NATS. +- CORE-1514, CORE-1515, CORE-1516, CORE-1527, CORE-1532: Update `add_node`, `update_node`, and `remove_node` operations to no longer need host and port in payload. These operations now manage dynamically sourcing of table level transaction streams between nodes and work queues. +- CORE-1522: Create `NATSReplyService` process which handles the receiving NATS based requests from remote instances and sending back appropriate responses. +- CORE-1471, CORE-1568, CORE-1563, CORE-1534, CORE-1569: Update `cluster_status` operation. +- CORE-1611: Update pre-existing transaction log operations to be audit log operations. +- CORE-1541, CORE-1612, CORE-1613: Create translation log operations which interface with streams. +- CORE-1668: Update NATS serialization / deserialization to use MessagePack. +- CORE-1673: Add `system_info` param to `hdb_nodes` table and update on `add_node` and `cluster_status`. +- CORE-1477, CORE-1493, CORE-1557, CORE-1596, CORE-1577: Both a full HarperDB restart & just clustering restart call the NATS server with a reload directive to maintain full uptime while servers refresh. +- CORE-1474:HarperDB install adds clustering folder structure. +- CORE-1530: Post `drop_table` HarperDB purges the related transaction stream. +- CORE-1567: Set NATS config to always use TLS. +- CORE-1543: Removed the `transact_to_cluster` attribute from the bulk load operations. Now bulk loads always replicate. +- CORE-1533, CORE-1556, CORE-1561, CORE-1562, CORE-1564: New operation `configure_cluster`, this operation enables bulk publishing and subscription of multiple tables to multiple instances of HarperDB. +- CORE-1535: Create work queue stream on install of HarperDB. This stream receives transactions from remote instances of HarperDB which are then ingested in order. +- CORE-1551: Create transaction streams on the remote node if they do not exist when performing `add_node` or `update_node`. +- CORE-1594, CORE-1605, CORE-1749, CORE-1767, CORE-1770: Optimize the work queue stream and its consumer to be more performant and validate exact once delivery. +- CORE-1621, CORE-1692, CORE-1570, CORE-1693: NATS stream names are MD5 hashed to avoid characters that HarperDB allows, but NATS may not. +- CORE-1762: Add a new optional attribute to `add_node` and `update_node` named `opt_start_time`. This attribute sets a starting time to start synchronizing transactions. +- CORE-1785: Optimizations and bug fixes in regards to sourcing data from remote instances on HarperDB. +- CORE-1588: Created new operation `set_cluster_routes` to enable setting routes for instances of HarperDB to mesh together. +- CORE-1589: Created new operation `get_cluster_routes` to allow for retrieval of routes used to connect the instance of HarperDB to the mesh. +- CORE-1590: Created new operation `delete_cluster_routes` to allow for removal of routes used to connect the instance of HarperDB to the mesh. +- CORE-1667: Fix old environment variable `CLUSTERING_PORT` not mapping to new hub server port. +- CORE-1609: Allow `remove_node` to be called when the other node cannot be reached. +- CORE-1815: Add transaction lock to `add_node` and `update_node` to avoid concurrent nats source update bug. +- CORE-1848: Update stream configs if the node name has been changed in the YAML configuration. +- CORE-1873: Update `add_node` and `update_node` so that it auto-creates schema/table on both local and remote node respectively + +**Data Storage** + +We have made improvements to how we store, index, and retrieve data. + +- CORE-1619: Enabled new concurrent flushing technology for improved write performance. +- CORE-1701: Optimize search performance for `search_by_conditions` when executing multiple AND conditions. +- CORE-1652: Encode the values of secondary indices more efficiently for faster access. +- CORE-1670: Store updated timestamp in `lmdb.js`' version property. +- CORE-1651: Enabled multiple value indexing of array values which allows for the ability to search on specific elements in an array more efficiently. +- CORE-1649, CORE-1659: Large text values (larger than 255 bytes) are no longer stored in separate blob index. Now they are segmented and delimited in the same index to increase search performance. +- Complex objects and object arrays are no longer stored in a separate index to preserve storage and increase write throughput. +- CORE-1650, CORE-1724, CORE-1738: Improved internals around interpreting attribute values. +- CORE-1657: Deferred property decoding allows large objects to be stored, but individual attributes can be accessed (like with get_attributes) without incurring the cost of decoding the entire object. +- CORE-1658: Enable in-memory caching of records for even faster access to frequently accessed data. +- CORE-1693: Wrap updates in async transactions to ensure ACID-compliant updates. +- CORE-1653: Upgrade to 4.0 rebuilds tables to reflect changes made to index improvements. +- CORE-1753: Removed old `node-lmdb` dependency. +- CORE-1787: Freeze objects returned from queries. +- CORE-1821: Read the `WRITE_ASYNC` setting which enables LMDB nosync. + +**Logging** + +HarperDB has increased logging specificity by breaking out logs based on components logging. There are specific log files each for HarperDB Core, Custom Functions, Hub Server, Leaf Server, and more. + +- CORE-1497: Remove `pino` and `winston` dependencies. +- CORE-1426: All logging is output via `stdout` and `stderr`, our default logging is then picked up by PM2 which handles writing out to file. +- CORE-1431: Improved `read_log` operation validation. +- CORE-1433, CORE-1463: Added log rotation. +- CORE-1553, CORE-1555, CORE-1552, CORE-1554, CORE-1704: Performance gain by only serializing objects and arrays if the log is for the level defined in configuration. +- CORE-1436: Upgrade to 4.0 updates internals for logging changes. +- CORE-1428, CORE-1440, CORE-1442, CORE-1434, CORE-1435, CORE-1439, CORE-1482, CORE-1751, CORE-1752: Bug fixes, performance improvements and improved unit tests. +- CORE-1691: Convert non-PM2 managed log file writes to use Node.js `fs.appendFileSync` function. + +**Configuration** + +HarperDB has updated its configuration from a properties file to YAML. + +- CORE-1448, CORE-1449, CORE-1519, CORE-1587: Upgrade automatically converts the pre-existing settings file to YAML. +- CORE-1445, CORE-1534, CORE-1444, CORE-1858: Build out new logic to create, update, and interpret the YAML configuration file. +- Installer has updated prompts to reflect YAML settings. +- CORE-1447: Create an alias for the `configure_cluster` operation as `set_configuration`. +- CORE-1461, CORE-1462, CORE-1483: Unit test improvements. +- CORE-1492: Improvements to get_configuration and set_configuration operations. +- CORE-1503: Modify HarperDB configuration for more granular certificate definition. +- CORE-1591: Update `routes` IP param to `host` and to `leaf` config in `harperdb.conf` +- CORE-1519: Fix issue when switching between old and new versions of HarperDB we are getting the config parameter is undefined error on npm install. + +**Broad NodeJS and Platform Support** + +- CORE-1624: HarperDB can now run on multiple versions of NodeJS, from v14 to v19. We primarily test on v18, so that is the preferred version. + +**Windows 10 and 11** + +- CORE-1088: HarperDB now runs natively on Windows 10 and 11 without the need to run in a container or installed in WSL. Windows is only intended for evaluation and development purposes, not for production work loads. + +**Extra Changes and Bug Fixes** + +- CORE-1520: Refactor installer to remove all waterfall code and update to use Promises. +- CORE-1573: Stop the PM2 daemon and any logging processes when stopping hdb. +- CORE-1586: When HarperDB is running in foreground stop any additional logging processes from being spawned. +- CORE-1626: Update docker file to accommodate new `harperdb.conf` file. +- CORE-1592, CORE-1526, CORE-1660, CORE-1646, CORE-1640, CORE-1689, CORE-1711, CORE-1601, CORE-1726, CORE-1728, CORE-1736, CORE-1735, CORE-1745, CORE-1729, CORE-1748, CORE-1644, CORE-1750, CORE-1757, CORE-1727, CORE-1740, CORE-1730, CORE-1777, CORE-1778, CORE-1782, CORE-1775, CORE-1771, CORE-1774, CORE-1759, CORE-1772, CORE-1861, CORE-1862, CORE-1863, CORE-1870, CORE-1869:Changes for CI/CD pipeline and integration tests. +- CORE-1661: Fixed issue where old boot properties file caused an error when attempting to install 4.0.0. +- CORE-1697, CORE-1814, CORE-1855: Upgrade fastify dependency to new major version 4. +- CORE-1629: Jobs are now running as processes managed by the PM2 daemon. +- CORE-1733: Update LICENSE to reflect our EULA on our site. +- CORE-1606: Enable Custom Functions by default. +- CORE-1714: Include pre-built binaries for most common platforms (darwin-arm64, darwin-x64, linux-arm64, linux-x64, win32-x64). +- CORE-1628: Fix issue where setting license through environment variable not working. +- CORE-1602, CORE-1760, CORE-1838, CORE-1839, CORE-1847, CORE-1773: HarperDB Docker container improvements. +- CORE-1706: Add support for encoding HTTP responses with MessagePack. +- CORE-1709: Improve the way lmdb.js dependencies are installed. +- CORE-1758: Remove/update unnecessary HTTP headers. +- CORE-1756: On `npm install` and `harperdb install` change the node version check from an error to a warning if the installed Node.js version does not match our preferred version. +- CORE-1791: Optimizations to authenticated user caching. +- CORE-1794: Update README to discuss Windows support & Node.js versions +- CORE-1837: Fix issue where Custom Function directory was not being created on install. +- CORE-1742: Add more validation to audit log - check schema/table exists and log is enabled. +- CORE-1768: Fix issue where when running in foreground HarperDB process is not stopping on `harperdb stop`. +- CORE-1864: Fix to semver checks on upgrade. +- CORE-1850: Fix issue where a `cluster_user` type role could not be altered. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.1.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.1.md new file mode 100644 index 00000000..2a85f511 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.1.md @@ -0,0 +1,13 @@ +--- +title: 4.0.1 +sidebar_position: 59998 +--- + +### HarperDB 4.0.1, Tucker Release + +01/20/2023 + +**Bug Fixes** + +- CORE-1992 Local studio was not loading because the path got mangled in the build. +- CORE-2001 Fixed deploy_custom_function_project after node update broke it. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.2.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.2.md new file mode 100644 index 00000000..bedbd970 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.2.md @@ -0,0 +1,13 @@ +--- +title: 4.0.2 +sidebar_position: 59997 +--- + +### HarperDB 4.0.2, Tucker Release + +01/24/2023 + +**Bug Fixes** + +- CORE-2003 Fix bug where if machine had one core thread config would default to zero. +- Update to lmdb 2.7.3 and msgpackr 1.7.0 diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.3.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.3.md new file mode 100644 index 00000000..ad1cbf8a --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.3.md @@ -0,0 +1,12 @@ +--- +title: 4.0.3 +sidebar_position: 59996 +--- + +### HarperDB 4.0.3, Tucker Release + +01/26/2023 + +**Bug Fixes** + +- CORE-2007 Add update nodes 4.0.0 launch script to build script to fix clustering upgrade. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.4.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.4.md new file mode 100644 index 00000000..3f052465 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.4.md @@ -0,0 +1,12 @@ +--- +title: 4.0.4 +sidebar_position: 59995 +--- + +### HarperDB 4.0.4, Tucker Release + +01/27/2023 + +**Bug Fixes** + +- CORE-2009 Fixed bug where add node was not being called when upgrading clustering. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.5.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.5.md new file mode 100644 index 00000000..1696d6d4 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.5.md @@ -0,0 +1,14 @@ +--- +title: 4.0.5 +sidebar_position: 59994 +--- + +### HarperDB 4.0.5, Tucker Release + +02/15/2023 + +**Bug Fixes** + +- CORE-2029 Improved the upgrade process for handling existing user TLS certificates and correctly configuring TLS settings. Added a prompt to upgrade to determine if new certificates should be created or existing certificates should be kept/used. +- Fix the way NATS connections are honored in a local environment. +- Do not define the certificate authority path to NATS if it is not defined in the HarperDB config. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.6.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.6.md new file mode 100644 index 00000000..1cdc1bd7 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.6.md @@ -0,0 +1,12 @@ +--- +title: 4.0.6 +sidebar_position: 59993 +--- + +### HarperDB 4.0.6, Tucker Release + +03/09/2023 + +**Bug Fixes** + +- Fixed a data serialization error that occurs when a large number of different record structures are persisted in a single table. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.7.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.7.md new file mode 100644 index 00000000..c4d1fbbf --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.0.7.md @@ -0,0 +1,12 @@ +--- +title: 4.0.7 +sidebar_position: 59992 +--- + +### HarperDB 4.0.7, Tucker Release + +03/10/2023 + +**Bug Fixes** + +- Update lmdb.js dependency diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.1.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.1.0.md new file mode 100644 index 00000000..17e3fd08 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.1.0.md @@ -0,0 +1,63 @@ +--- +title: 4.1.0 +sidebar_position: 59899 +--- + +# 4.1.0 + +HarperDB 4.1 introduces the ability to use worker threads for concurrently handling HTTP requests. Previously this was handled by processes. This shift provides important benefits in terms of better control of traffic delegation with support for optimized load tracking and session affinity, better debuggability, and reduced memory footprint. + +This means debugging will be much easier for custom functions. If you install/run HarperDB locally, most modern IDEs like WebStorm and VSCode support worker thread debugging, so you can start HarperDB in your IDE, and set breakpoints in your custom functions and debug them. + +The associated routing functionality now includes session affinity support. This can be used to consistently route users to the same thread which can improve caching locality, performance, and fairness. This can be enabled in with the [`http.sessionAffinity` option in your configuration](../../../deployments/configuration#http). + +HarperDB 4.1's NoSQL query handling has been revamped to consistently use iterators, which provide an extremely memory efficient mechanism for directly streaming query results to the network _as_ the query results are computed. This results in faster Time to First Byte (TTFB) (only the first record/value in a query needs to be computed before data can start to be sent), and less memory usage during querying (the entire query result does not need to be stored in memory). These iterators are also available in query results for custom functions and can provide means for custom function code to iteratively access data from the database without loading entire results. This should be a completely transparent upgrade, all HTTP APIs function the same, with the one exception that custom functions need to be aware that they can't access query results by `[index]` (they should use array methods or for-in loops to handle query results). + +4.1 includes configuration options for specifying the location of database storage files. This allows you to specifically locate database directories and files on different volumes for better flexibility and utilization of disks and storage volumes. See the [storage configuration](../../../deployments/configuration#storage) and [schemas configuration](../../../deployments/configuration#schemas) for information on how to configure these locations. + +Logging has been revamped and condensed into one `hdb.log` file. See [logginglogging for more information. + +A new operation called `cluster_network` was added, this operation will ping the cluster and return a list of enmeshed nodes. + +Custom Functions will no longer automatically load static file routes, instead the `@fastify/static` plugin will need to be registered with the Custom Function server. See [Host A Static Web UI-static](https:/docs.harperdb.io/docs/v/4.1/custom-functions/host-static). + +Updates to S3 import and export mean that these operations now require the bucket `region` in the request. Also, if referencing a nested object it should be done in the `key` parameter. See examples [here](../../../developers/operations-api/bulk-operations#import-from-s3). + +Due to the AWS SDK v2 reaching end of life support we have updated to v3. This has caused some breaking changes in our operations `import_from_s3` and `export_to_s3`: + +- A new attribute `region` will need to be supplied +- The `bucket` attribute can no longer have trailing slashes. Slashes will now need to be in the `key`. + +Starting HarperDB without any command (just `harperdb`) now runs HarperDB like a standard process, in the foreground. This means you can use standard unix tooling for interacting with the process and is conducive for running HarperDB with systemd or any other process management tool. If you wish to have HarperDB launch itself in separate background process (and immediately terminate the shell process), you can do so by running `harperdb start`. + +Internal Tickets completed: + +- CORE-609 - Ensure that attribute names are always added to global schema as Strings +- CORE-1549 - Remove fastify-static code from Custom Functions server which auto serves content from "static" folder +- CORE-1655 - Iterator based queries +- CORE-1764 - Fix issue where describe_all operation returns an empty object for non super-users if schema(s) do not yet have table(s) +- CORE-1854 - Switch to using worker threads instead of processes for handling concurrency +- CORE-1877 - Extend the csv_url_load operation to allow for additional headers to be passed to the remote server when the csv is being downloaded +- CORE-1893 - Add last updated timestamp to describe operations +- CORE-1896 - Fix issue where Select \* from system.hdb_info returns wrong HDB version number after Instance Upgrade +- CORE-1904 - Fix issue when executing GEOJSON query in SQL +- CORE-1905 - Add HarperDB YAML configuration setting which defines the storage location of NATS streams +- CORE-1906 - Add HarperDB YAML configuration setting defining the storage location of tables. +- CORE-1655 - Streaming binary format serialization +- CORE-1943 - Add configuration option to set mount point for audit tables +- CORE-1921 - Update NATS transaction lifecycle to handle message deduplication in work queue streams. +- CORE-1963 - Update logging for better readability, reduced duplication, and request context information. +- CORE-1968 - In server\nats\natsIngestService.js remove the js_msg.working(); line to improve performance. +- CORE-1976 - Fix error when calling describe_table operation with no schema or table defined in payload. +- CORE-1983 - Fix issue where create_attribute operation does not validate request for required attributes +- CORE-2015 - Remove PM2 logs that get logged in console when starting HDB +- CORE-2048 - systemd script for 4.1 +- CORE-2052 - Include thread information in system_information for visibility of threads +- CORE-2061 - Add a better error msg when clustering is enabled without a cluster user set +- CORE-2068 - Create new log rotate logic since pm2 log-rotate no longer used +- CORE-2072 - Update to Node 18.15.0 +- CORE-2090 - Upgrade Testing from v4.0.x and v3.x to v4.1. +- CORE-2091 - Run the performance tests +- CORE-2092 - Allow for automatic patch version updates of certain packages +- CORE-2109 - Add verify option to clustering TLS configuration +- CORE-2111 - Update AWS SDK to v3 diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.1.1.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.1.1.md new file mode 100644 index 00000000..54163b63 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.1.1.md @@ -0,0 +1,15 @@ +--- +title: 4.1.1 +sidebar_position: 59898 +--- + +# 4.1.1 + +06/16/2023 + +- HarperDB uses improved logic for determining default heap limits and thread counts. When running in a restricted container and on NodeJS 18.15+, HarperDB will use the constrained memory limit to determine heap limits for each thread. In more memory constrained servers with many CPU cores, a reduced default thread count will be used to ensure that excessive memory is not used by many workers. You may still define your own thread count (with `http`/`threads`) in the [configuration](../../../deployments/configuration). +- An option has been added for [disabling the republishing NATS messages](../../../deployments/configuration), which can provide improved replication performance in a fully connected network. +- Improvements to our OpenShift container. +- Dependency security updates. +- **Bug Fixes** +- Fixed a bug in reporting database metrics in the `system_information` operation. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.1.2.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.1.2.md new file mode 100644 index 00000000..fc5e16f4 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.1.2.md @@ -0,0 +1,13 @@ +--- +title: 4.1.2 +sidebar_position: 59897 +--- + +### HarperDB 4.1.2, Tucker Release + +06/16/2023 + +- HarperDB has updated binary dependencies to support older glibc versions back 2.17. +- A new CLI command was added to get the current status of whether HarperDB is running and the cluster status. This is available with `harperdb status`. +- Improvements to our OpenShift container. +- Dependency security updates. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.0.md new file mode 100644 index 00000000..cf51217b --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.0.md @@ -0,0 +1,99 @@ +--- +title: 4.2.0 +sidebar_position: 59799 +--- + +# 4.2.0 + +#### HarperDB 4.2.0 + +HarperDB 4.2 introduces a new interface to accessing our core database engine with faster access, well-typed idiomatic JavaScript interfaces, ergonomic object mapping, and real-time data subscriptions. 4.2 also had adopted a new component architecture for building extensions to deliver customized external data sources, authentication, file handlers, content types, and more. These architectural upgrades lead to several key new HarperDB capabilities including a new REST interface, advanced caching, real-time messaging and publish/subscribe functionality through MQTT, WebSockets, and Server-Sent Events. + +4.2 also introduces configurable database schemas, using GraphQL Schema syntax. The new component structure is also configuration-driven, providing easy, low-code paths to building applications. [Check out our new getting starting guide](../../../getting-started) to see how easy it is to get started with HarperDB apps. + +### Resource API + +The [Resource API](../../reference/resources) is the new interface for accessing data in HarperDB. It utilizes a uniform interface for accessing data in HarperDB database/tables and is designed to easily be implemented or extended for defining customized application logic for table access or defining custom external data sources. This API has support for connecting resources together for caching and delivering data change and message notifications in real-time. The [Resource API documentation details this interface](../../reference/resources). + +### Component Architecture + +HarperDB's custom functions have evolved towards a full component architecture; our internal functionality is defined as components, and this can be used in a modular way in conjunction with user components. These can all easily be configured and loaded through configuration files, and there is now a [well-defined interface for creating your own components. Components can easily be deployed/installed into HarperDB using [NPM and Github references as well. + +### Configurable Database Schemas + +HarperDB applications or components support [schema definitions using GraphQL schema syntax](../../../../developers/applications/defining-schemas). This makes it easy to define your table and attribute structure and gives you control over which attributes should be indexed and what types they should be. With schemas in configuration, these schemas can be bundled with an application and deployed together with application code. + +### REST Interface + +HarperDB 4.2 introduces a new REST interface for accessing data through best-practice HTTP APIs using intuitive paths and standards-based methods and headers that directly map to our Resource API. This new interface provides fast and easy access to data via queries through GET requests, modifications of data through PUTs, customized actions through POSTs and more. With standards-based header support built-in, this works seamlessly with external caches (including browser caches) for accelerated performance and reduced network transfers. + +### Real-Time + +HarperDB 4.2 now provides standard interfaces for subscribing to data changes and receiving notifications of changes and messages in real-time. Using these new real-time messaging capabilities with structured data provides a powerful integrated platform for both database style data updates and querying along with message delivery. [Real-time messaging](../../../../developers/real-time) of data is available through several protocols: + +#### MQTT + +4.2 now includes MQTT support which is a publish and subscribe messaging protocol, designed for efficiency (designed to be efficient enough for even small Internet of Things devices). This allows clients to connect to HarperDB and publish messages through our data center and subscribe to messages and data for real-time delivery. 4.2 implements support for QoS 0 and 1, along with durable sessions. + +#### WebSockets + +HarperDB now also supports WebSockets. This can be used as a transport for MQTT or as a connection for custom connection handling. + +#### Server-Sent Events + +HarperDB also includes support for Server-Sent Events. This is a very easy-to-use browser API that allows web sites/applications to connect to HarperDB and subscribe to data changes with minimal effort over standard HTTP. + +### Database Structure + +HarperDB databases contain a collection of tables, and these tables are now contained in a single transactionally-consistent database file. This means reads and writes can be performed transactionally and atomically across tables (as long as they are in the same database). Multi-table transactions are replicated as single atomic transactions as well. Audit logs are also maintained in the same database with atomic consistency as well. + +Databases are now entirely encapsulated in a file, which means they can be moved/copied to another database without requiring any separate metadata updates in the system tables. + +### Clone Node + +HarperDB includes new functionality for adding new HarperDB nodes in a cluster. New instances can be configured to clone from a leader node, performing and copying a database snapshot from a leader node, and self-configuring from the leader node as well, to facilitate accelerated deployment of new nodes for fast horizontal scaling to meet demand needs. [See the documentation on Clone Node for more information.](../../../../administration/cloning) + +### Operations API terminology updates + +Any operation that used the `schema` property was updated to make this property optional and alternately support `database` as the property for specifying the database (formerly 'schema'). If both `schema` and `database` are absent, operation defaults to using the `data` database. Term 'primary key' now used in place of 'hash'. noSQL operation `search_by_hash` updated to `search_by_id`. + +Support was added for defining a table with `primary_key` instead of `hash_attribute`. + +## Configuration + +There have been significant changes to `harperdb-config.yaml`, however none of these changes should affect pre-4.2 versions. If you upgrade to 4.2 any existing configuration should be backwards compatible and will not need to be updated. + +`harperdb-config.yaml` has had some configuration values added, removed, renamed and defaults changed. Please refer to [harperdb-config.yaml](../../../deployments/configuration) for the most current configuration parameters. + +- The `http` element has been expanded. + - `compressionThreshold` was added. + - All `customFunction` configuration now lives here, except for the `tls` section. +- `threads` has moved out of the `http` element and now is its own top level element. +- `authentication` section was moved out of the `operationsApi` section and is now its own top level element/section. +- `analytics.aggregatePeriod` was added. +- Default logging level was changed to `warn`. +- Default clustering log level was changed to `info`. +- `clustering.republishMessages` now defaults to `false`. +- `operationsApi.foreground` was removed. To start HarperDB in the foreground, from the CLI run `harperdb`. +- Made `operationsApi` configuration optional. Any config not defined here will default to the `http` section. +- Added a `securePort` parameter to `operationsApi` and `http` used for setting the https port. +- Added a new top level `tls` section. +- Removed `customFunctions.enabled`, `customFunctions.network.https`, `operationsApi.network.https` and `operationsApi.nodeEnv`. +- Added an element called `componentRoot` which replaces `customFunctions.root`. +- Updated custom pathing to use `databases` instead of `schemas`. +- Added `logging.auditAuthEvents.logFailed` and `logging.auditAuthEvents.logSuccessful` for enabling logging of auth events. +- A new `mqtt` section was added. + +### Socket Management + +HarperDB now uses socket sharing to distribute incoming connections to different threads (`SO_REUSEPORT`). This is considered to be the most performant mechanism available for multi-threaded socket handling. This does mean that we have deprecated session-affinity based socket delegation. + +HarperDB now also supports more flexible port configurations: application endpoints and WebSockets run on 9926 by default, but these can be separated, or application endpoints can be configured to run on the same port as the operations API for a single port configuration. + +### Sessions + +HarperDB now supports cookie-based sessions for authentication for web clients. This can be used with the standard authentication mechanisms to login, and then cookies can be used to preserve the authenticated session. This is generally a more secure way of maintaining authentication in browsers, without having to rely on storing credentials. + +### Dev Mode + +HarperDB can now directly run a HarperDB application from any location using `harperdb run /path/to/app` or `harperdb dev /path/to/app`. The latter starts in dev mode, with logging directly to the console, debugging enabled, and auto-restarting with any changes in your application files. Dev mode is recommended for local application and component development. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.1.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.1.md new file mode 100644 index 00000000..c792a637 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.1.md @@ -0,0 +1,14 @@ +--- +title: 4.2.1 +sidebar_position: 59798 +--- + +### HarperDB 4.2.1, Tucker Release + +11/3/2023 + +- Downgrade NATS 2.10.3 back to 2.10.1 due to regression in connection handling. +- Handle package names with underscores. +- Improved validation of queries and comparators +- Avoid double replication on transactions with multiple commits +- Added file metadata on get_component_file diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.2.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.2.md new file mode 100644 index 00000000..9cfa957e --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.2.md @@ -0,0 +1,16 @@ +--- +title: 4.2.2 +sidebar_position: 59797 +--- + +### HarperDB 4.2.2, Tucker Release + +11/8/2023 + +- Increase timeouts for NATS connections. +- Fix for database snapshots for backups (and for clone node). +- Fix application of permissions for default tables exposed through REST. +- Log replication failures with record information. +- Fix application of authorization/permissions for MQTT commands. +- Fix copying of local components in clone node. +- Fix calculation of overlapping start time in clone node. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.3.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.3.md new file mode 100644 index 00000000..edecd686 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.3.md @@ -0,0 +1,14 @@ +--- +title: 4.2.3 +sidebar_position: 59796 +--- + +### HarperDB 4.2.3, Tucker Release + +11/15/2023 + +- When setting setting securePort, disable unsecure port setting on same port +- Fix `harperdb status` when pid file is missing +- Fix/include missing icons/fonts from local studio +- Fix crash that can occur when concurrently accessing records > 16KB +- Apply a lower heap limit to better ensure that memory leaks are quickly caught/mitigated diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.4.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.4.md new file mode 100644 index 00000000..14d268b5 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.4.md @@ -0,0 +1,11 @@ +--- +title: 4.2.4 +sidebar_position: 59795 +--- + +### HarperDB 4.2.4, Tucker Release + +11/16/2023 + +- Prevent coercion of strings to numbers in SQL queries (in WHERE clause) +- Address fastify deprecation warning about accessing config diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.5.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.5.md new file mode 100644 index 00000000..1b6bf143 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.5.md @@ -0,0 +1,13 @@ +--- +title: 4.2.5 +sidebar_position: 59794 +--- + +### HarperDB 4.2.5, Tucker Release + +11/22/2023 + +- Disable compression on server-sent events to ensure messages are immediately sent (not queued for later deliver) +- Update geoNear function to tolerate null values +- lmdb-js fix to ensure prefetched keys are pinned in memory until retrieved +- Add header to indicate start of a new authenticated session (for studio to identify authenticated sessions) diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.6.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.6.md new file mode 100644 index 00000000..50abde53 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.6.md @@ -0,0 +1,11 @@ +--- +title: 4.2.6 +sidebar_position: 59793 +--- + +### HarperDB 4.2.6, Tucker Release + +11/29/2023 + +- Update various geo SQL functions to tolerate invalid values +- Properly report component installation/load errors in `get_components` (for studio to load components after an installation failure) diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.7.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.7.md new file mode 100644 index 00000000..5d75e134 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.7.md @@ -0,0 +1,12 @@ +--- +title: 4.2.7 +sidebar_position: 59792 +--- + +### HarperDB 4.2.7 + +12/6/2023 + +- Add support for cloning over the top of an existing HarperDB instance +- Add health checks for NATS consumer with ability to restart consumer loops for better resiliency +- Revert Fastify autoload module due to a regression that had caused EcmaScript modules for Fastify route modules to fail to load on Windows diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.8.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.8.md new file mode 100644 index 00000000..21127797 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.2.8.md @@ -0,0 +1,15 @@ +--- +title: 4.2.8 +sidebar_position: 59791 +--- + +### HarperDB 4.2.8 + +12/19/2023 + +- Added support CLI command line arguments for clone node +- Added support for cloning a node without enabling clustering +- Clear NATS client cache on closed event +- Fix check for attribute permissions so that an empty attribute permissions array is treated as a table level permission definition +- Improve speed of cross-node health checks +- Fix for using `database` in describe operations diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.0.md new file mode 100644 index 00000000..dc76f9ce --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.0.md @@ -0,0 +1,125 @@ +--- +title: 4.3.0 +sidebar_position: 59699 +--- + +# 4.3.0 + +#### HarperDB 4.3.0, Tucker Release + +3/19/2024 + +#### Relationships and Joins + +HarperDB now supports defining relationships between tables. These relationships can be defined as one-to-many, many-to-one, or many-to-many, and use a foreign key to record the relationship between records from different tables. An example of how to use this to define a many-to-one and one-to-many relationships between a product and brand table: + +```graphql +type Product @table { + id: ID @primaryKey + name: String @indexed + # foreign key used to reference a brand + brandId: ID @indexed + # many-to-one relationship to brand + brand: Related @relation(from: "brandId") +} +type Brand @table { + id: ID @primaryKey + name: String @indexed + # one-to-many relationship of brand to products of that brand + products: Product @relation(to: "brandId") +} +``` + +This relationships model can be used in queries and selects, which will automatically "join" the data from the tables. For example, you could search for products by brand name: + +```http +/Product?brand.name=Microsoft +``` + +HarperDB also now supports querying with a sort order. Multiple sort orders can be provided breaking ties. Nested select have also been added, which also utilizes joins when related records are referenced. For example: + +```http +/Product?brand.name=Microsoft&sort(price)&select(name,brand{name,size}) +``` + +See the [schema definition documentation](../../../../developers/applications/defining-schemas) for more information on defining relationships, and the [REST documentation for more information on queries](../../../../developers/rest). + +#### OpenAPI Specification + +A new default endpoint `GET /openapi` was added for describing endpoints configured through a GraphQL schema. + +#### Query Optimizations + +HarperDB has also made numerous improvements to query planning and execution for high performance query results with a broader range of queries. + +#### Indexing Nulls + +New tables and indexes now support indexing null values, enabling queries by null (as well as queries for non-null values). For example, you can query by nulls with the REST interface: + +```http +GET /Table/?attribute=null +``` + +Note, that existing indexes will remain without null value indexing, and can only support indexing/querying by nulls if they are rebuilt (removed and re-added). + +#### CLI Expansion + +The HarperDB now supports an expansive set of commands that execute operations from the operations API. For example, you can list users from the command line: + +```bash +harperdb list_users +``` + +#### BigInt Support + +HarperDB now supports `BigInt` attributes/values with integers (with full precision) up to 1000 bits (or 10^301). These can be used as primary keys or standard attributes, and can be used in queries or other operations. Within JSON documents, you can simply use standard JSON integer numbers with up to 300 digits, and large BigInt integers will be returned as standard JSON numbers. + +#### Local Studio Upgrade + +HarperDB has upgraded the local studio to match the same version that is offered on http:/studio.harperdb.io. The local studio now has the full robust feature set of the online version. + +### MQTT + +#### mTLS Support + +HarperDB now supports mTLS based authentication for HTTP, WebSockets, and MQTT. See the [configuration documentation for more information](../../../deployments/configuration). + +#### Single-Level Wildcards + +HarperDB's MQTT service now supports single-level wildcards (`+`), which facilitates a great range of subscriptions. + +#### Retain handling + +HarperDB's MQTT now supports the retain handling flags for subscriptions that are made using MQTT v5. + +#### CRDT + +HarperDB now supports basic conflict-free data type (CRDT) updates that allow properties to be individually updated and merged when separate properties are updated on different threads or nodes. Individual property CRDT updates are automatically performed when you update individual properties through the resource API. Individual property CRDT updates are used when making `PATCH` requests through the REST API. + +The CRDT functionality also supports explicit incrementation to merge multiple parallel incrementation requests with proper summing. See the [Resource API for more information](../../reference/resources). + +#### Configuration Improvements + +The configuration has improved support for detecting port conflicts, handling paths for fastify routes, and now includes support for specifying a heap limit and TLS ciphers. See the [configuration documentation for more information](../../../deployments/configuration). + +#### Balanced Audit Log Cleanup + +Audit log cleanup has been improved to reduce resource consumption during scheduled cleanups. + +#### `export_*` support for `search_by_conditions` + +The `export_local` and `export_to_s3` operations now support `search_by_conditions` as one of the allowed search operators. + +### Storage Performance Improvements + +Significant improvements were made to handling of free-space to decrease free-space fragmentation and improve performance of reusing free-space for new data. This includes prioritizing reuse of recently released free-space for more better memory/caching utilization. + +#### Compact Database + +In addition to storage improvements, HarperDB now includes functionality for [compacting a database](../../../deployments/harper-cli) (while offline), which can be used to eliminate all free-space to reset any fragmentation. + +#### Compression + +Compression is now enabled by default for all records over 4KB. + +To learn more on how to configure compression visit [configuration](https:/docs.harperdb.io/docs/v/4.3/deployments/configuration). diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.1.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.1.md new file mode 100644 index 00000000..870968bd --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.1.md @@ -0,0 +1,12 @@ +--- +title: 4.3.1 +sidebar_position: 59698 +--- + +### HarperDB 4.3.1 + +3/25/2024 + +- Fix Fastify warning about responseTime usage +- Add access to the MQTT topic in the context +- Fix for ensuring local NATS streams are created diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.10.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.10.md new file mode 100644 index 00000000..7badf0cc --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.10.md @@ -0,0 +1,13 @@ +--- +title: 4.3.10 +sidebar_position: 59689 +--- + +### HarperDB 4.3.10 + +5/5/2024 + +- Provide a `data` property on the request/context with deserialized data from the request body for any request including methods that don't typically have a request body +- Ensure that CRDTs are not double applied after committing a transaction +- Delete MQTT will after publishing even if it fails to publish +- Improve transaction retry logic to use async non-optimistic transactions after multiple retries diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.11.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.11.md new file mode 100644 index 00000000..82b47381 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.11.md @@ -0,0 +1,11 @@ +--- +title: 4.3.11 +sidebar_position: 59688 +--- + +### HarperDB 4.3.11 + +5/15/2024 + +- Add support for multiple certificates with SNI-based selection of certificates for HTTPS/TLS +- Fix warning in Node v22 diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.12.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.12.md new file mode 100644 index 00000000..3f016e25 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.12.md @@ -0,0 +1,11 @@ +--- +title: 4.3.12 +sidebar_position: 59687 +--- + +### HarperDB 4.3.12 + +5/16/2024 + +- Fix for handling ciphers in multiple certificates +- Allow each certificate config to have multiple hostnames diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.13.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.13.md new file mode 100644 index 00000000..e7833e0a --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.13.md @@ -0,0 +1,12 @@ +--- +title: 4.3.13 +sidebar_position: 59686 +--- + +### HarperDB 4.3.13 + +5/22/2024 + +- Fix for handling HTTPS/TLS with IP address targets (no hostname) where SNI is not available +- Fix for memory leak when a node is down and consumers are trying to reconnect +- Faster cross-thread notification mechanism for transaction events diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.14.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.14.md new file mode 100644 index 00000000..0bf4e9c8 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.14.md @@ -0,0 +1,10 @@ +--- +title: 4.3.14 +sidebar_position: 59685 +--- + +### HarperDB 4.3.14 + +5/24/2024 + +- Fix application of ciphers to multi-certificate TLS configuration diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.15.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.15.md new file mode 100644 index 00000000..48321fb6 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.15.md @@ -0,0 +1,11 @@ +--- +title: 4.3.15 +sidebar_position: 59684 +--- + +### HarperDB 4.3.15 + +5/29/2024 + +- Add support for wildcards in hostnames for SNI +- Properly apply ciphers settings on multiple TLS configurations diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.16.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.16.md new file mode 100644 index 00000000..195e27b7 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.16.md @@ -0,0 +1,11 @@ +--- +title: 4.3.16 +sidebar_position: 59683 +--- + +### HarperDB 4.3.16 + +6/3/2024 + +- Properly shim legacy TLS configuration with new multi-certificate support +- Show the changed filenames when an application is reloaded diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.17.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.17.md new file mode 100644 index 00000000..27a0f4cb --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.17.md @@ -0,0 +1,15 @@ +--- +title: 4.3.17 +sidebar_position: 59682 +--- + +### HarperDB 4.3.17 + +6/13/2024 + +- Add MQTT analytics of incoming messages and separate by QoS level +- Ensure that any installed `harperdb` package in components is relinked to running harperdb. +- Upgrade storage to more efficiently avoid storage increases +- Fix to improve database metrics in system_information +- Fix for pathing on Windows with extension modules +- Add ability to define a range of listening threads diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.18.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.18.md new file mode 100644 index 00000000..052b3821 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.18.md @@ -0,0 +1,10 @@ +--- +title: 4.3.18 +sidebar_position: 59681 +--- + +### HarperDB 4.3.18 + +6/18/2024 + +- Immediately terminate an MQTT connection when there is a keep-alive timeout. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.19.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.19.md new file mode 100644 index 00000000..2676c9f6 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.19.md @@ -0,0 +1,12 @@ +--- +title: 4.3.19 +sidebar_position: 59680 +--- + +### HarperDB 4.3.19 + +7/2/2024 + +- Properly return records for the existing value for subscriptions used for retained messages, so they are correctly serialized. +- Ensure that deploy components empty the target directory for a clean installation and expansion of a `package` sub-directory. +- Ensure that we do not double load components that are referenced by symlink from node_modules and in components directory. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.2.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.2.md new file mode 100644 index 00000000..ca273c5e --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.2.md @@ -0,0 +1,16 @@ +--- +title: 4.3.2 +sidebar_position: 59697 +--- + +### HarperDB 4.3.2 + +3/29/2024 + +- Clone node updates to individually clone missing parts +- Fixes for publishing OpenShift container +- Increase purge stream timeout +- Fixed declaration of analytics schema so queries work before a restart +- Fix for iterating queries when deleted records exist +- LMDB stability upgrade +- Fix for cleanup of last will in MQTT diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.20.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.20.md new file mode 100644 index 00000000..d090990b --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.20.md @@ -0,0 +1,18 @@ +--- +title: 4.3.20 +sidebar_position: 59679 +--- + +### HarperDB 4.3.20 + +7/11/2024 + +- The restart_service operation is now executed as a job, making it possible to track the progress of a restart (which is performed as a rolling restart of threads) +- Disable Nagle's algorithm for TCP connections to improve performance +- Append Server-Timing header if a fastify route has already added one +- Avoid symlinking the harperdb directory to itself +- Fix for deleting an empty database +- Upgrade ws and pm2 packages for security vulnerabilities +- Improved TypeScript definitions for Resource and Context. +- The context of a source can set `noCacheStore` to avoid caching the results of a retrieval from source +- Better error reporting of MQTT parsing errors and termination of connections for compliance diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.21.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.21.md new file mode 100644 index 00000000..7afefd12 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.21.md @@ -0,0 +1,14 @@ +--- +title: 4.3.21 +sidebar_position: 59678 +--- + +### HarperDB 4.3.21 + +8/21/2024 + +- Fixed an issue with iterating/serializing query results with a `limit`. +- Fixed an issue that was preventing the caching of structured records in memory. +- Fixed and added several TypeScript exported types including `tables`, `databases`, `Query`, and `Context`. +- Fixed logging warnings about license limits after a license is updated. +- Don't register a certificate as the default certificate for non-SNI connections unless it lists an IP address in the SAN field. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.22.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.22.md new file mode 100644 index 00000000..a4bc2003 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.22.md @@ -0,0 +1,15 @@ +--- +title: 4.3.22 +sidebar_position: 59677 +--- + +### HarperDB 4.3.22 + +9/6/2024 + +- Adding improved back-pressure handling for large subscriptions and backlogs with durable MQTT sessions +- Allow .extension in URL paths to indicate both preferred encoding and decoding +- Added support for multi-part ids in query parameters +- Limit describe calls by time before using statistical sampling +- Proper cleanup of a transaction when it is aborted due to running out of available read transactions +- Updates to release/builds diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.23.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.23.md new file mode 100644 index 00000000..7496c1d1 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.23.md @@ -0,0 +1,12 @@ +--- +title: 4.3.23 +sidebar_position: 59676 +--- + +### HarperDB 4.3.23 + +9/12/2024 + +- Avoid long-running read transactions on subscription catch-ups +- Reverted change to setting default certificate for IP address only +- Better handling of last-will messages on startup diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.24.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.24.md new file mode 100644 index 00000000..435c15ec --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.24.md @@ -0,0 +1,10 @@ +--- +title: 4.3.24 +sidebar_position: 59675 +--- + +### HarperDB 4.3.24 + +9/12/2024 + +- Fix for querying for large strings (over 255 characters) diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.25.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.25.md new file mode 100644 index 00000000..601d9ec0 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.25.md @@ -0,0 +1,13 @@ +--- +title: 4.3.25 +sidebar_position: 59674 +--- + +### HarperDB 4.3.25 + +9/24/2024 + +- Add analytics for replication latency +- Fix iteration issue over asynchronous joined queries +- Local studio fix for loading applications in insecure context (HTTP) +- Local studio fix for loading configuration tab diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.26.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.26.md new file mode 100644 index 00000000..c0dacf54 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.26.md @@ -0,0 +1,11 @@ +--- +title: 4.3.26 +sidebar_position: 59673 +--- + +### HarperDB 4.3.26 + +9/27/2024 + +- Fixed a security issue that allowed users to bypass access controls with the operations API +- Previously expiration handling was limited to tables with a source, but now it can be applied to any table diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.27.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.27.md new file mode 100644 index 00000000..0bbd448a --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.27.md @@ -0,0 +1,14 @@ +--- +title: 4.3.27 +sidebar_position: 59672 +--- + +### HarperDB 4.3.27 + +10/2/2024 + +- Fixed handling HTTP upgrade with Connection header that does not use Upgrade as the sole value (for Firefox) +- Added metrics for requests by status code +- Properly remove attributes from the stored metadata when removed from GraphQL schema +- Fixed a regression in clustering retrieval of schema description +- Fix attribute validation/handling to ensure that sequential ids can be assigned with insert/upsert operations diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.28.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.28.md new file mode 100644 index 00000000..361d416d --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.28.md @@ -0,0 +1,12 @@ +--- +title: 4.3.28 +sidebar_position: 59671 +--- + +### HarperDB 4.3.28 + +10/3/2024 + +- Tolerate user with no role when building NATS config +- Change metrics for requests by status code to be prefixed with "response\_" +- Log error `cause`, and other properties, when available. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.29.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.29.md new file mode 100644 index 00000000..5537df8b --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.29.md @@ -0,0 +1,17 @@ +--- +title: 4.3.29 +sidebar_position: 59670 +--- + +### HarperDB 4.3.29 + +10/7/2024 + +- Avoid unnecessary cookie session creation without explicit login +- Added support for caching directives in operations API +- Fixed issue with creating metadata for table with no primary key +- Local studio upgrade: + - Added support for "cache only" mode to view table data without origin resolution + - Added partial support for cookie-based authentication + - Added support for browsing tables with no primary key + - Improved performance for sorting tables diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.3.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.3.md new file mode 100644 index 00000000..38175dda --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.3.md @@ -0,0 +1,10 @@ +--- +title: 4.3.3 +sidebar_position: 59696 +--- + +### HarperDB 4.3.3 + +4/01/2024 + +- Improve MQTT logging by properly logging auth failures, logging disconnections diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.30.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.30.md new file mode 100644 index 00000000..e005db97 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.30.md @@ -0,0 +1,10 @@ +--- +title: 4.3.30 +sidebar_position: 59669 +--- + +### HarperDB 4.3.30 + +10/9/2024 + +- Properly assign transaction timestamp to writes from cache resolutions (ensuring that latencies can be calculated on replicating nodes) diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.31.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.31.md new file mode 100644 index 00000000..80cab2b9 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.31.md @@ -0,0 +1,12 @@ +--- +title: 4.3.31 +sidebar_position: 59668 +--- + +### HarperDB 4.3.31 + +10/10/2024 + +- Reset the restart limit for manual restarts to ensure that NATS process will continue to restart after more than 10 manual restarts +- Only apply caching directives (from headers) to tables/resources that are configured to be caching, sourced from another resource +- Catch/tolerate errors on serializing objects for logging diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.32.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.32.md new file mode 100644 index 00000000..0b5893b4 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.32.md @@ -0,0 +1,12 @@ +--- +title: 4.3.32 +sidebar_position: 59667 +--- + +### HarperDB 4.3.32 + +10/16/2024 + +- Fix a memory leak when cluster_network closes a hub connection +- Improved MQTT error handling, with less verbose logging of more common errors, and treat a missing subscription as an invalid/missing topic +- Record analytics and server-timing header even when cache resolution fails diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.33.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.33.md new file mode 100644 index 00000000..7707a562 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.33.md @@ -0,0 +1,10 @@ +--- +title: 4.3.33 +sidebar_position: 59666 +--- + +### HarperDB 4.3.33 + +10/24/2024 + +- Change the default maximum length for a fastify route parameter from 100 to 1000 characters. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.34.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.34.md new file mode 100644 index 00000000..2bd65833 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.34.md @@ -0,0 +1,10 @@ +--- +title: 4.3.34 +sidebar_position: 59665 +--- + +### HarperDB 4.3.34 + +10/24/2024 + +- lmdb-js upgrade diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.35.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.35.md new file mode 100644 index 00000000..f8dd7b73 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.35.md @@ -0,0 +1,11 @@ +--- +title: 4.3.35 +sidebar_position: 59664 +--- + +### HarperDB 4.3.35 + +11/12/2024 + +- Upgrades for supporting Node.js V23 +- Fix for handling a change in the schema for nested data structures diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.36.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.36.md new file mode 100644 index 00000000..2eb8e636 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.36.md @@ -0,0 +1,10 @@ +--- +title: 4.3.36 +sidebar_position: 59663 +--- + +### HarperDB 4.3.36 + +11/14/2024 + +- lmdb-js upgrade for better free-space management diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.37.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.37.md new file mode 100644 index 00000000..f36e1c32 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.37.md @@ -0,0 +1,10 @@ +--- +title: 4.3.37 +sidebar_position: 59662 +--- + +### HarperDB 4.3.37 + +12/6/2024 + +- lmdb-js upgrade for preventing crashes with shared user buffers diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.38.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.38.md new file mode 100644 index 00000000..d1fce0f8 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.38.md @@ -0,0 +1,10 @@ +--- +title: 4.3.38 +sidebar_position: 59661 +--- + +### HarperDB 4.3.38 + +1/10/2025 + +- Fixes for audit log cleanup diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.4.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.4.md new file mode 100644 index 00000000..0c96732f --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.4.md @@ -0,0 +1,11 @@ +--- +title: 4.3.4 +sidebar_position: 59695 +--- + +### HarperDB 4.3.4 + +4/9/2024 + +- Fixed a buffer overrun issue with decompressing compressed data +- Better keep-alive of transactions with long running queries diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.5.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.5.md new file mode 100644 index 00000000..60888785 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.5.md @@ -0,0 +1,10 @@ +--- +title: 4.3.5 +sidebar_position: 59694 +--- + +### HarperDB 4.3.5 + +4/10/2024 + +- Fixed a buffer overrun issue with decompressing compressed data diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.6.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.6.md new file mode 100644 index 00000000..54a4739a --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.6.md @@ -0,0 +1,14 @@ +--- +title: 4.3.6 +sidebar_position: 59693 +--- + +### HarperDB 4.3.6 + +4/12/2024 + +- Fixed parsing of dates from epoch millisecond times in queries +- Fixed CRDT incrementation of different data types +- Adjustments to text/plain content type q-value handling +- Fixed parsing of passwords with a colon +- Added MQTT events for connections, authorization, and disconnections diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.7.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.7.md new file mode 100644 index 00000000..df9fb331 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.7.md @@ -0,0 +1,14 @@ +--- +title: 4.3.7 +sidebar_position: 59692 +--- + +### HarperDB 4.3.7 + +4/16/2024 + +- Fixed transaction handling to stay on open on long compaction operations +- Fixed handling of sorting on non-indexed attributes +- Storage stability improvements +- Fixed authentication/authorization of WebSockets connection and use of cookies +- Fixes for clone node operations diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.8.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.8.md new file mode 100644 index 00000000..0e4c5b6c --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.8.md @@ -0,0 +1,14 @@ +--- +title: 4.3.8 +sidebar_position: 59691 +--- + +### HarperDB 4.3.8 + +4/26/2024 + +- Added support for the MQTT keep-alive feature (disconnecting if no control messages are received within keep-alive window) +- Improved handling of write queue timeouts, with configurability +- Fixed a memory leak that can occur with NATS reconnections after heartbeat misses +- Fixed a bug in clone node with a null port +- Add error events to MQTT events system diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.9.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.9.md new file mode 100644 index 00000000..17c95934 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.3.9.md @@ -0,0 +1,10 @@ +--- +title: 4.3.9 +sidebar_position: 59690 +--- + +### HarperDB 4.3.9 + +4/30/2024 + +- lmdb-js upgrade diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.0.md new file mode 100644 index 00000000..f193b95a --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.0.md @@ -0,0 +1,60 @@ +--- +title: 4.4.0 +sidebar_position: 59599 +--- + +# 4.4.0 + +#### HarperDB 4.4.0 + +10/14/2024 + +### Native Replication + +HarperDB has a completely [new native replication system](../../../developers/replication/) which is faster, more efficient, secure, and reliable than the previous replication system. The new system (codenamed "Plexus") uses direct WebSocket connections between servers with highly optimized encoding and is driven by direct tracking audit/transaction log for efficient and flexible data transfer. This replication has improved resilience with the ability to reach consensus consistency when one node goes down through cross-node catch-up. Network connections can be performed over the existing operations API port or a separate port, for improved configurability. + +The native replication system is much easier to configure, with multiple options for authentication and security, including PKI/mTLS security that is highly robust and easy to use in conjunction with existing PKI certificates. Replication can be configured through explicit subscriptions or for automated replication of all data in a database. With automated replication, gossiping is used to automatically discover and connect to other nodes in the cluster. + +#### Sharding + +The new replication system also includes provisional support for [sharding](../../../developers/replication/sharding). This sharding mechanism paves the way for greater scalability and performance, by allow data to be distributed across multiple nodes. + +#### Replicated Operations + +Certain operations can now be replicated across the cluster, including the deployment and management of components. This allows for a more seamless experience when managing a cluster of HarperDB instances. Restarts can also be "replicated", and if used, will perform a rolling restart of all the nodes in a cluster. + +### Computed Properties + +Computed properties allow applications to define properties that are computed from other properties, allowing for composite properties that are calculated from other data stored in records without requiring actual storage of the computed value. For example, you could have a computed property for a full name based on first and last, or age/duration based on a date. Computed properties are also foundational for custom indexes. See the [schema documentation ](../../../../developers/applications/defining-schemas), [Resource API](../../reference/resources), and our blog post on [computed properties](https:/www.harperdb.io/development/tutorials/how-to-create-custom-indexes-with-computed-properties) for more information. + +### Custom Indexing + +Custom indexes can now be defined using computed properties to allow for unlimited possibilities of indexing, including composite, full-text indexing, vector indexing. Again, see the [schema documentation](../../../../developers/applications/defining-schemas) for more information. + +### Native Graph Support + +HarperDB now includes provisional support for native [GraphQL querying functionality](../../reference/graphql). This allows for querying of graph data using GraphQL syntax. This is provisional and some APIs may be updated in the future. + +### Dynamic Certificate Management + +Certificates are now stored in system tables and can be dynamically managed. Certificates can be added, replaced, and deleted without restarting HarperDB. This includes both standard certificates and certificate authorities, as well as private keys (private keys are not stored in table, they securely stored in a file). + +#### Status Report on Startup + +On startup, HarperDB will now print out an informative status of all running services and ports they are listening on. + +#### Support for Response object + +Resource methods can now return a `Response` object (or an object with `headers` and `status`) to allow for more control over the response. + +### Auto-incrementing Primary Keys + +Primary keys can now be auto-incrementing, allowing for automatic generation of numeric primary keys on insert/creation. Primary keys defined with `ID` or `String` will continue to use GUIDs for auto-assigned primary keys, which occurs on insert or creation if the primary key is not provided. However, for keys that are defined as `Any`, `Int`, or `Long`, the primary key will be assigned using auto-incrementation. This is significantly more efficient than GUIDs since the key only requires 8 bytes of storage instead of 31 bytes, and doesn't require random number generation. + +#### Developer/Production Mode for Configuration + +When using interactive installation (when configuration is not provided through arguments or env vars), HarperDB now provides an option for developer or production mode with a set of default configuration for each mode better suited for developer or production environments. + +**Export by Protocol** + +Exported resources can be configured to be specifically exported by protocol (REST, MQTT, etc.) for more granular control over what is exported where. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.1.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.1.md new file mode 100644 index 00000000..5c1e2037 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.1.md @@ -0,0 +1,13 @@ +--- +title: 4.4.1 +sidebar_position: 59598 +--- + +### HarperDB 4.4.1 + +10/17/2024 + +- Fix issue where non-RSA keys were not being parsed correctly on startup. +- Fix a memory leak when cluster_network closes a hub connection +- Improved MQTT error handling, with less verbose logging of more common errors, and treat a missing subscription as an invalid/missing topic +- Record analytics and server-timing header even when cache resolution fails diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.10.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.10.md new file mode 100644 index 00000000..6d8aad2c --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.10.md @@ -0,0 +1,10 @@ +--- +title: 4.4.10 +sidebar_position: 59589 +--- + +### HarperDB 4.4.10 + +12/17/2024 + +- Fix for deploying packages and detecting node_modules directory diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.11.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.11.md new file mode 100644 index 00000000..5e5b5fc0 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.11.md @@ -0,0 +1,11 @@ +--- +title: 4.4.11 +sidebar_position: 59588 +--- + +### HarperDB 4.4.11 + +12/18/2024 + +- Fix for initial certification creation on upgrade +- Docker build fix diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.12.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.12.md new file mode 100644 index 00000000..8efe840e --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.12.md @@ -0,0 +1,11 @@ +--- +title: 4.4.12 +sidebar_position: 59587 +--- + +### HarperDB 4.4.12 + +12/19/2024 + +- Move components installed by reference into hdb/components for consistency and compatibility with next.js +- Use npm install --force to ensure modules are installed diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.13.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.13.md new file mode 100644 index 00000000..cab28cc0 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.13.md @@ -0,0 +1,16 @@ +--- +title: 4.4.13 +sidebar_position: 59586 +--- + +### HarperDB 4.4.13 + +1/2/2025 + +- Fix for not using requestCert if the port doesn't need replication +- Fix for applying timeouts HTTP server for ancient node versions +- Updates for different replication configuration settings, including sharding and replication using stored credentials +- Mitigation crashing due GC'ed shared array buffers +- Fix for error handling with CLI failures +- Updated dependencies +- Fix for allow securePort to be set on authentication diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.14.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.14.md new file mode 100644 index 00000000..b44a173d --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.14.md @@ -0,0 +1,13 @@ +--- +title: 4.4.14 +sidebar_position: 59585 +--- + +### HarperDB 4.4.14 + +1/3/2025 + +- Fix for starting HTTP server if headersTimeout is omitted in the configuration +- Fix for avoiding ping timeouts for large/long-duration WS messages between nodes +- Don't report errors for component that only uses a directory +- Add flag for disabling WebSocket on REST component diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.15.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.15.md new file mode 100644 index 00000000..b6a8ee2b --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.15.md @@ -0,0 +1,12 @@ +--- +title: 4.4.15 +sidebar_position: 59584 +--- + +### HarperDB 4.4.15 + +1/8/2025 + +- Fix for manage the state of replication sequences for node +- Fix for better concurrency with ongoing replication +- Fix for accessing audit log entries diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.16.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.16.md new file mode 100644 index 00000000..d85de974 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.16.md @@ -0,0 +1,16 @@ +--- +title: 4.4.16 +sidebar_position: 59583 +--- + +### HarperDB 4.4.16 + +1/22/2025 + +- Fix for cleaning up old audit entries and associated deletion entries +- Allow CLI operations to be run when cloning is enabled +- Report table size in describe operations +- Fix for cleaning up symlinks when dropping components +- Fix for enumerating components when symlinks are used +- Add an option for using a specific installation command with deploys +- Add an API for registering an HTTP upgrade listener with `server.upgrade` diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.17.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.17.md new file mode 100644 index 00000000..239f7729 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.17.md @@ -0,0 +1,13 @@ +--- +title: 4.4.17 +sidebar_position: 59582 +--- + +### HarperDB 4.4.17 + +1/29/2025 + +- Provide statistics on the size of the audit log store +- Fix handling of symlinks to HarperDB package that to avoid NPM's errors in restricted containers +- Add option for rolling/consecutive restarts for deployments +- Fix for enabling root CAs for replication authorization diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.18.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.18.md new file mode 100644 index 00000000..e7354587 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.18.md @@ -0,0 +1,12 @@ +--- +title: 4.4.18 +sidebar_position: 59581 +--- + +### HarperDB 4.4.18 + +1/29/2025 + +- Add option for disabling full table copy in replication +- Add option for startTime in route configuration +- Add/fix option to deploy with package from CLI diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.19.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.19.md new file mode 100644 index 00000000..5a1cc14e --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.19.md @@ -0,0 +1,13 @@ +--- +title: 4.4.19 +sidebar_position: 59580 +--- + +### HarperDB 4.4.19 + +2/4/2025 + +- LMDB upgrade for free-list verification on commit +- Add check to avoid compacting database multiple times with compactOnStart +- Fix handling of denied/absent subscription +- Add support for including symlinked directories in packaging a deployed component diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.2.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.2.md new file mode 100644 index 00000000..53dfbb7b --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.2.md @@ -0,0 +1,10 @@ +--- +title: 4.4.2 +sidebar_position: 59597 +--- + +### HarperDB 4.4.2 + +10/18/2024 + +- Republish of 4.4.1 with Git merge correction. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.20.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.20.md new file mode 100644 index 00000000..656de065 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.20.md @@ -0,0 +1,10 @@ +--- +title: 4.4.20 +sidebar_position: 59579 +--- + +### HarperDB 4.4.20 + +2/11/2025 + +- LMDB upgrade for improved handling of page boundaries with free-space lists diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.21.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.21.md new file mode 100644 index 00000000..c63d84a2 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.21.md @@ -0,0 +1,12 @@ +--- +title: 4.4.21 +sidebar_position: 59578 +--- + +### HarperDB 4.4.21 + +2/25/2025 + +- Fix for saving audit log entries for large keys (> 1KB) +- Security fix for handling missing passwords +- Skip bin links for NPM installation to avoid access issues diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.22.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.22.md new file mode 100644 index 00000000..d66163f9 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.22.md @@ -0,0 +1,10 @@ +--- +title: 4.4.22 +sidebar_position: 59577 +--- + +### HarperDB 4.4.22 + +3/5/2025 + +- Add new http configuration option `corsAccessControlAllowHeaders` diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.23.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.23.md new file mode 100644 index 00000000..9048b3d6 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.23.md @@ -0,0 +1,11 @@ +--- +title: 4.4.23 +sidebar_position: 59576 +--- + +### HarperDB 4.4.23 + +3/7/2025 + +- Fix for subscriptions to children of segmented id +- Fix for better error reporting on NPM failures diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.24.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.24.md new file mode 100644 index 00000000..324a2423 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.24.md @@ -0,0 +1,11 @@ +--- +title: 4.4.24 +sidebar_position: 59575 +--- + +### HarperDB 4.4.24 + +3/10/2025 + +- Use process.exit(0) to restart when enabled by env var +- Reset the cwd on thread restart diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.3.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.3.md new file mode 100644 index 00000000..4e844820 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.3.md @@ -0,0 +1,14 @@ +--- +title: 4.4.3 +sidebar_position: 59596 +--- + +### HarperDB 4.4.3 + +10/25/2024 + +- Fix for notification of records through classes that override get for multi-tier caching +- Fix for CLI operations +- Support for longer route parameters in Fastify routes +- Fix for accessing `harperdb` package/module from user threads +- Improvements to clone node for cloning without credentials diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.4.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.4.md new file mode 100644 index 00000000..bbf0df8d --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.4.md @@ -0,0 +1,12 @@ +--- +title: 4.4.4 +sidebar_position: 59595 +--- + +### HarperDB 4.4.4 + +11/4/2024 + +- Re-introduce declarative roles and permissions +- Fix for OpenAPI endpoint +- Fix for exports of `harperdb` package/module diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.5.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.5.md new file mode 100644 index 00000000..448687c6 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.5.md @@ -0,0 +1,16 @@ +--- +title: 4.4.5 +sidebar_position: 59594 +--- + +### HarperDB 4.4.5 + +11/15/2024 + +- Fix for DOS vulnerability in large headers with cache-control and replication headers +- Fix for handling a change in the schema type for sub-fields in a nested object +- Add support for content type handlers to return iterators +- Fix for session management with custom authentication handler +- Updates for Node.js V23 compatibility +- Fix for sorting on nested properties +- Fix for querying on not_equal to a null with object values diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.6.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.6.md new file mode 100644 index 00000000..4cc0cc86 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.6.md @@ -0,0 +1,13 @@ +--- +title: 4.4.6 +sidebar_position: 59593 +--- + +### HarperDB 4.4.6 + +11/25/2024 + +- Fix queries with only sorting applied +- Fix for handling invalidation events propagating through sources +- Expanded CLI support for deploying packages +- Support for deploying large packages diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.7.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.7.md new file mode 100644 index 00000000..a4f6041f --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.7.md @@ -0,0 +1,11 @@ +--- +title: 4.4.7 +sidebar_position: 59592 +--- + +### HarperDB 4.4.7 + +11/27/2024 + +- Allow for package to deploy own modules +- Fix for preventing double sourcing of resources diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.8.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.8.md new file mode 100644 index 00000000..493736a8 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.8.md @@ -0,0 +1,10 @@ +--- +title: 4.4.8 +sidebar_position: 59591 +--- + +### HarperDB 4.4.8 + +12/2/2024 + +- Add multiple node versions of published docker containers diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.9.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.9.md new file mode 100644 index 00000000..077e80cd --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.4.9.md @@ -0,0 +1,14 @@ +--- +title: 4.4.9 +sidebar_position: 59590 +--- + +### HarperDB 4.4.9 + +12/12/2024 + +- Change enableRootCAs to default to true +- Fixes for install and clone commands +- Add rejectUnauthorized to the CLI options +- Fixes for cloning +- Install modules in own component when deploying package by payload diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.0.md new file mode 100644 index 00000000..2f8203fa --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.0.md @@ -0,0 +1,99 @@ +--- +title: 4.5.0 +sidebar_position: 59499 +--- + +# 4.5.0 + +#### HarperDB 4.5.0 + +3/13/2025 + +### Blob Storage + +4.5 introduces a new [Blob storage system](../../reference/blob), that is designed to efficiently handle large binary objects, with built-in support for streaming large content/media in and out of storage. This provides significantly better performance and functionality for large unstructured data, such as HTML, images, video, and other large files. Components can leverage this functionality through the JavaScript `Blob` interface, and the new `createBlob` function. Blobs are fully replicated and integrated. Harper can also coerce strings to `Blob`s (when dictated by the field type), making it feasible to use blobs for large string data, including with MQTT messaging. + +### Password Hashing Upgrade + +4.5 adds two new password hashing algorithms for better security (to replace md5): +`sha256`: This is a solid general purpose of password hashing, with good security properties and excellent performance. This is the default algorithm in 4.5. +`argon2id`: This provides the highest level of security, and is the recommended algorithm that do not require frequent password verifications. However, it is more CPU intensive, and may not be suitable for environments with a high frequency of password verifications. + +### Resource and Storage Analytics + +4.5 includes numerous new analytics for resources and storage, including page faults, context switches, free space, disk usage, and other metrics. + +#### Default Replication Port + +The default port for replication has been changed from 9925 to 9933. + +### Property Forwarding + +Accessing record properties from resource instances should be accessible through standard property access syntax, regardless of whether the property was declared in a schema. Previously only properties declared in a schema were accessible through standard property access syntax. This change allows for more consistent and intuitive access to record properties, regardless of how they were defined. It is still recommended to declare properties in a schema for better performance and documentation. + +### Storage Reclamation + +Harper now includes functionality for automatically trying to clean up and evict non-essential data when storage is running low. When free space drops below 40% (configurable), Harper will start to: + +- Evict older entries from caching tables +- Evict older audit log entries +- Remove older rotated logs files + These efforts will become progressively more aggressive as free space decreases. + +### Expanded Sharding Functionality + +When sharding is being used, Harper can now honor write requests with residency information that will not be written to the local node's table. Harper also now allows nodes to be declaratively configured as part of a shard. + +### Certificate Revocation + +Certificates can now be revoked by configuring nodes with a list of revoked certificate serial numbers. + +### Built-in `loadEnv` Component + +There is a new `loadEnv` component loader that can be used to load environmental variables from a .env in a component. + +### Cluster Status Information + +The [`cluster_status` operation](../../../developers/operations-api/clustering) now includes new statistics for replication, including the timestamps of last received transactions, sent transactions, and committed transactions. + +### Improved URL path parsing + +Resources can be defined with nested paths and directly accessed by the exact path without requiring a trailing slash. The `id.property` syntax for accessing properties in URLs will only be applied to properties that are declared in a schema. This allows for URLs to generally include dots in paths without being interpreted as property access. A new [`directURLMapping` option/flag](../../../deployments/configuration) on resources that allows for more direct URL path handling as well. + +### `server.authenticateUser` API + +In addition to the `server.getUser` API that allows for retrieval of users by username, the `server.authenticateUser` API is now available which will _always_ verify the user by the provided password. + +#### Improved Message Delivery + +Performance of delivery of messages has been improved. + +### HTTP/2 + +HarperDB now supports HTTP/2 for all API endpoints. This can be enabled with the `http2` option in the configuration file. + +### `harperdb` symlink + +Using `import from 'harperdb'` will more consistently work when directly running a component locally. + +### Transaction Reuse + +By default, transactions can now be reused after calling `transaction.commit()`. + +### GraphQL configuration + +The GraphQL query endpoint can be configured to listen on different ports. GraphQL query endpoing is now also disabled by default, to avoid any conflicts. + +### Glob support for components + +Glob file handling for specifying files used by components has been improved for better consistency. + +### Table.getRecordCount + +`Table.getRecordCount()` is now available to get the number of records in a table. + +### Removal of record counts from REST API + +Previously the root path for a resource in the REST API would return a record count. However, this is a significant performance hazard and was never documented to exist, so this has been removed to ensure better performance and reliability. + +Note that downgrading from 4.5 to 4.4 is _not_ supported. diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.1.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.1.md new file mode 100644 index 00000000..ec431a8a --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.1.md @@ -0,0 +1,16 @@ +--- +title: 4.5.1 +sidebar_position: 59498 +--- + +### HarperDB 4.5.1 + +3/18/2025 + +- Fix/implementation for sharding data that is written for cache resolution +- Add support for replication.shard in configuration for defining local node's shard id +- Fix for source map handling in stack traces +- Improved error reporting for syntax errors in component code +- Improved logging on deployment and NPM installation +- Added shard information to cluster_status +- Fix for audit entry eviction when a table is deleted diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.10.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.10.md new file mode 100644 index 00000000..b74fbadb --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.10.md @@ -0,0 +1,11 @@ +--- +title: 4.5.10 +sidebar_position: 59489 +--- + +### HarperDB 4.5.10 + +5/20/2025 + +- Expose the `resources` map for being able to set and access custom resources +- Fix for cleaning up blob files that are used when a database is deleted diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.11.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.11.md new file mode 100644 index 00000000..cba2d019 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.11.md @@ -0,0 +1,10 @@ +--- +title: 4.5.11 +sidebar_position: 59488 +--- + +### HarperDB 4.5.11 +6/27/2025 + +* Fix bug (workaround Node.js bug) with assigning the ciphers to a server and applying to TLS connections +* Fix for handling TLS array when checking certificates configuration \ No newline at end of file diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.12.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.12.md new file mode 100644 index 00000000..6353bfc2 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.12.md @@ -0,0 +1,13 @@ +--- +title: 4.5.12 +sidebar_position: 59487 +--- + +### HarperDB 4.5.12 +7/9/2025 + +- Fix for dynamically setting `harperdb` package symlink on deploy +- Assign shard numbers from each node's config rather than from routes +- Handle certificates without a common name, falling back to the SANs +- Properly clean up blobs that are only transiently used for replication +- Ensure that we always set up server.shards even when there are no TLS connections diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.13.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.13.md new file mode 100644 index 00000000..2b8a6149 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.13.md @@ -0,0 +1,9 @@ +--- +title: 4.5.13 +sidebar_position: 59486 +--- + +### HarperDB 4.5.13 +7/12/2025 + +- Fix cleaning out audit entries when a blob has been removed diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.14.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.14.md new file mode 100644 index 00000000..0ad8f235 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.14.md @@ -0,0 +1,9 @@ +--- +title: 4.5.14 +sidebar_position: 59485 +--- + +### HarperDB 4.5.14 +7/15/2025 + +- Use proper back-pressure when copying a table for initial database sync diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.2.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.2.md new file mode 100644 index 00000000..62468720 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.2.md @@ -0,0 +1,13 @@ +--- +title: 4.5.2 +sidebar_position: 59497 +--- + +### HarperDB 4.5.2 + +3/25/2025 + +- For defined schemas, don't allow updates from remote nodes that could cause conflicts and repeated schema change requests +- New harper-chrome docker container for accessing Chrome binaries for use with tools like Puppeteer +- Improved rolling restart handling of errors with reaching individual nodes +- Defined cleaner operation object to avoid accident leaking of credentials with logging diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.3.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.3.md new file mode 100644 index 00000000..b0878089 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.3.md @@ -0,0 +1,11 @@ +--- +title: 4.5.3 +sidebar_position: 59496 +--- + +### HarperDB 4.5.3 + +4/3/2025 + +- Fix for immediately reloading updated certificates and private key files to ensure that certificates properly match the private key +- Fix for analytics of storage size when tables are deleted diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.4.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.4.md new file mode 100644 index 00000000..2d334a06 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.4.md @@ -0,0 +1,12 @@ +--- +title: 4.5.4 +sidebar_position: 59495 +--- + +### HarperDB 4.5.4 + +4/11/2025 + +- Fix for replication of (non-retained) published messages +- Make cookie domain be configurable to allow for cookies shared across sub-hostnames +- Fix for on-demand loading of shared blobs diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.5.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.5.md new file mode 100644 index 00000000..606f8063 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.5.md @@ -0,0 +1,11 @@ +--- +title: 4.5.5 +sidebar_position: 59494 +--- + +### HarperDB 4.5.5 + +4/15/2025 + +- Updates for better messaging with symlinks in Windows +- Fix for saving replicated blobs diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.6.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.6.md new file mode 100644 index 00000000..a711a988 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.6.md @@ -0,0 +1,12 @@ +--- +title: 4.5.6 +sidebar_position: 59493 +--- + +### HarperDB 4.5.6 + +4/17/2025 + +- Fix for changing the type of the primary key attribute +- Added a new `includeExpensiveRecordCountEstimates` property to the REST component for returning record count estimates +- Fix for dropping attributes diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.7.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.7.md new file mode 100644 index 00000000..ce785506 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.7.md @@ -0,0 +1,11 @@ +--- +title: 4.5.7 +sidebar_position: 59492 +--- + +### HarperDB 4.5.7 + +4/23/2025 + +- Fix for handling buffers from replicated sharded blob records to prevent overwriting while using +- Updated included studio version for fix for logging in diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.8.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.8.md new file mode 100644 index 00000000..32f43190 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.8.md @@ -0,0 +1,13 @@ +--- +title: 4.5.8 +sidebar_position: 59491 +--- + +### HarperDB 4.5.8 + +4/30/2025 + +- Fix MQTT subscription topics with trailing slashes to ensure they are not treated as a wildcard +- Fix the arguments that are used for the default connect/subscribe calls so they pass the second argument from connect like `connect(incomingMessages, query) -> subscribe(query)` +- Add support for replication connections using any configured certificate authorities to verify the server certificates +- Added more descriptive error messages on errors in user residency functions diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.9.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.9.md new file mode 100644 index 00000000..9d6d13ef --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.5.9.md @@ -0,0 +1,10 @@ +--- +title: 4.5.9 +sidebar_position: 59490 +--- + +### HarperDB 4.5.9 + +5/14/2025 + +- Remove --no-bin-links directive for NPM that was causing installs of dependencies to fail diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.6.0.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.6.0.md new file mode 100644 index 00000000..bdd07ea9 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.6.0.md @@ -0,0 +1,36 @@ +--- +title: 4.6.0 +sidebar_position: 59399 +--- + +# 4.6.0 + +#### HarperDB 4.6.0 + +6/13/2025 + +### Vector Indexing: Hierarchical Navigable Small World + +Harper 4.6 now includes support for vector indexing, which allows for efficient and fast queries on large semantic data sets. Vector indexing is powered by the [Hierarchical Navigable Small World (HNSW) algorithm](https:/arxiv.org/abs/1603.09320) and can be used to index any vector-valued property, and is particularly useful for vector text-embedding data. This provides powerful efficient vector-based searching for semantic and AI-based querying functionality. HNSW is a preferred algorithm for vector indexing and searching because it provides an excellent balance of recall and performance. + +### New Extension API with support for dynamic reloading + +4.6 introduces a new extension API with significant ergonomic improvements for creating new extension components that are more robust and dynamic. The new API also provides a mechanism for dynamic reloading of some files and configuration without restarts. + +### Logging Improvements + +4.6 includes significant expansions to logging configurability, allowing for specific logging configurations of individual components. This also leverages the new extension API to allow for dynamic reloading of logging configuration. With the more granular logging, logs can be directed to different files and/or different log levels. +The logger includes support for HTTP logging, which configurability for logging standard HTTP methods and paths as well headers, ids, and timing information. It also supports distinct logging configuration for different components. +The new logger is now based on the Node.js Console API, with improved the formatting of log messages for various types of objects. +An important change is that logging to standard out/error will _not_ include the timestamp. And console logging does not get logged to the log files by default. + + +### Data Loader +4.6 includes a new [data loader](../../../../developers/applications/data-loader) that can be used to load data into HarperDB as part of a component. The data loader can be used to load data from JSON file and can be deployed and distributed with a component to provide a reliable mechanism for ensuring specific records are loaded into Harper. + +### Resource API Upgrades + +4.6 includes an upgraded form of the Resource API that can be selected with significant improvements in ease of use. + +### only-if-cached behavior +Previously when the `only-in-cached` caching directive was used and the entry was not cached, Harper would return a 504, but still make a request to origin in the background. Now, Harper will no longer a request to origin for `only-if-cached`. \ No newline at end of file diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.6.1.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.6.1.md new file mode 100644 index 00000000..cf8ccd2c --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.6.1.md @@ -0,0 +1,18 @@ +--- +title: 4.6.1 +sidebar_position: 59398 +--- + +# 4.6.1 +7/10/2025 + +- Plugin API updates to use plugin nomenclature +- Fix for dynamically setting `harperdb` package symlink on deploy +- Assign shard numbers from each node's config rather than from routes +- Handle certificates without a common name, falling back to the SANs +- Properly clean up blobs that are only transiently used for replication +- Ensure that we always set up server.shards even when there are no TLS connections +- Fix for clone node getting the cluster status +- Properly initialize config on CLI operations to avoid path error +- Fix for lmdb for compiling for MacOS and using little-endian +- Allow secure cookies with localhost diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.6.2.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.6.2.md new file mode 100644 index 00000000..579f26df --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/4.6.2.md @@ -0,0 +1,11 @@ +--- +title: 4.6.2 +sidebar_position: 59397 +--- + +# 4.6.2 +7/15/2025 + +- Use proper back-pressure when copying a table for initial database sync +- Fix cleaning out audit entries when a blob has been removed +- Fix for running CLI operations when a Harper DB is not installed \ No newline at end of file diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/_category_.json b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/_category_.json new file mode 100644 index 00000000..9a7bca50 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "HarperDB Tucker (Version 4)", + "position": -4 +} \ No newline at end of file diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/index.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/index.md new file mode 100644 index 00000000..f8e926f5 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/index.md @@ -0,0 +1,53 @@ +--- +title: Harper Tucker (Version 4) +--- + +# Harper Tucker (Version 4) + +HarperDB version 4 ([Tucker release](./tucker)) represents major step forward in database technology. This release line has ground-breaking architectural advancements including: + +## [4.6](./4.6.0) + +- Vector Indexing - 4.6 introduces a new Vector Indexing system based on Hierarchical Navigable Small World Graphs. +- New extension API - 4.6 introduces a new extension API for creating extensions components. +- Improved logging configurability - Logging can be dynamically updated and specifically configured for each component. +- Resource API - 4.6 has updated Resource APIs for ease of use. +- Data loader - 4.6 introduces a new data loader that allows for ensuring records exist as part of a component. + +## [4.5](./4.5.0) + +- Blob Storage - 4.5 introduces a new [Blob storage system](../../reference/blob). +- Password Hashing Upgrade - two new password hashing algorithms for better security (to replace md5). +- New resource and storage Analytics + +## [4.4](./4.4.0) + +- Native replication (codename "Plexus") which is faster, more efficient, secure, and reliable than the previous replication system and provides provisional sharding capabilities with a foundation for the future +- Computed properties that allow applications to define properties that are computed from other properties, allowing for composite properties that are calculated from other data stored in records without requiring actual storage of the computed value +- Custom indexing including composite, full-text indexing, and vector indexing + +## [4.3](./4.3.0) + +- Relationships, joins, and broad new querying capabilities for complex and nested conditions, sorting, joining, and selecting with significant query optimizations +- More advanced transaction support for CRDTs and storage of large integers (with BigInt) +- Better management with new upgraded local studio and new CLI features + +## [4.2](./4.2.0) + +- New component architecture and Resource API for advanced, robust custom database application development +- Real-time capabilites through MQTT, WebSockets, and Server-Sent Events +- REST interface for intuitive, fast, and standards-compliant HTTP interaction +- Native caching capabilities for high-performance cache scenarios +- Clone node functionality + +## [4.1](./4.1.0) + +- New streaming iterators mechanism that allows query results to be delivered to clients _while_ querying results are being processed, for incredibly fast time-to-first-byte and concurrent processing/delivery +- New thread-based concurrency model for more efficient resource usage + +## [4.0](./4.0.0) + +- New clustering technology that delivers robust, resilient and high-performance replication +- Major storage improvements with highly-efficient adaptive-structure modified MessagePack format, with on-demand deserialization capabilities + +Did you know our release names are dedicated to employee pups? For our fourth release, [meet Tucker!](./tucker) diff --git a/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/tucker.md b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/tucker.md new file mode 100644 index 00000000..32574779 --- /dev/null +++ b/site/versioned_docs/version-4.6/technical-details/release-notes/v4-tucker/tucker.md @@ -0,0 +1,11 @@ +--- +title: Harper Tucker (Version 4) +--- + +# Harper Tucker (Version 4) + +Did you know our release names are dedicated to employee pups? For our fourth release, we have Tucker. + +![picture of grey and white dog](/img/v4.6/dogs/tucker.png) + +_G’day, I’m Tucker. My dad is David Cockerill, a software engineer here at Harper. I am a 3-year-old Labrador Husky mix. I love to protect my dad from all the squirrels and rabbits we have in our yard. I have very ticklish feet and love belly rubs!_ diff --git a/site/versioned_sidebars/version-4.1-sidebars.json b/site/versioned_sidebars/version-4.1-sidebars.json new file mode 100644 index 00000000..1e3daab3 --- /dev/null +++ b/site/versioned_sidebars/version-4.1-sidebars.json @@ -0,0 +1,169 @@ +{ + "docsSidebar": [ + { + "type": "doc", + "id": "index", + "label": "Developer Documentation" + }, + { + "type": "category", + "label": "Install HarperDB", + "items": [ + { + "type": "autogenerated", + "dirName": "install-harperdb" + } + ] + }, + { + "type": "category", + "label": "Getting Started", + "items": [ + { + "type": "autogenerated", + "dirName": "getting-started" + } + ] + }, + { + "type": "link", + "label": "Full API Documentation", + "href": "https://api.harperdb.io/" + }, + { + "type": "category", + "label": "HarperDB Studio", + "items": [ + { + "type": "autogenerated", + "dirName": "harperdb-studio" + } + ] + }, + { + "type": "category", + "label": "HarperDB Cloud", + "items": [ + { + "type": "autogenerated", + "dirName": "harperdb-cloud" + } + ] + }, + { + "type": "category", + "label": "Security", + "items": [ + { + "type": "autogenerated", + "dirName": "security" + } + ] + }, + { + "type": "category", + "label": "Clustering", + "items": [ + { + "type": "autogenerated", + "dirName": "clustering" + } + ] + }, + { + "type": "category", + "label": "Custom Functions", + "items": [ + { + "type": "autogenerated", + "dirName": "custom-functions" + } + ] + }, + { + "type": "category", + "label": "Add-ons and SDKs", + "items": [ + { + "type": "autogenerated", + "dirName": "add-ons-and-sdks" + } + ] + }, + { + "type": "category", + "label": "SQL Guide", + "items": [ + { + "type": "autogenerated", + "dirName": "sql-guide" + } + ] + }, + "harperdb-cli", + "configuration", + "logging", + "transaction-logging", + "audit-logging", + "jobs", + "upgrade-hdb-instance", + { + "type": "category", + "label": "Reference", + "items": [ + { + "type": "autogenerated", + "dirName": "reference" + } + ] + }, + "support", + { + "type": "category", + "label": "Release Notes", + "items": [ + "release-notes/index", + { + "type": "category", + "label": "HarperDB Tucker (Version 4)", + "items": [ + { + "type": "autogenerated", + "dirName": "release-notes/v4-tucker" + } + ] + }, + { + "type": "category", + "label": "HarperDB Monkey (Version 3)", + "items": [ + { + "type": "autogenerated", + "dirName": "release-notes/v3-monkey" + } + ] + }, + { + "type": "category", + "label": "HarperDB Penny (Version 2)", + "items": [ + { + "type": "autogenerated", + "dirName": "release-notes/v2-penny" + } + ] + }, + { + "type": "category", + "label": "HarperDB Alby (Version 1)", + "items": [ + { + "type": "autogenerated", + "dirName": "release-notes/v1-alby" + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/site/versioned_sidebars/version-4.2-sidebars.json b/site/versioned_sidebars/version-4.2-sidebars.json new file mode 100644 index 00000000..dc492023 --- /dev/null +++ b/site/versioned_sidebars/version-4.2-sidebars.json @@ -0,0 +1,50 @@ +{ + "docsSidebar": [ + { + "type": "doc", + "id": "index", + "label": "Harper Docs" + }, + "getting-started", + { + "type": "category", + "label": "Developers", + "items": [ + { + "type": "autogenerated", + "dirName": "developers" + } + ] + }, + { + "type": "category", + "label": "Administration", + "items": [ + { + "type": "autogenerated", + "dirName": "administration" + } + ] + }, + { + "type": "category", + "label": "Deployments", + "items": [ + { + "type": "autogenerated", + "dirName": "deployments" + } + ] + }, + { + "type": "category", + "label": "Technical Details", + "items": [ + { + "type": "autogenerated", + "dirName": "technical-details" + } + ] + } + ] +} \ No newline at end of file diff --git a/site/versioned_sidebars/version-4.3-sidebars.json b/site/versioned_sidebars/version-4.3-sidebars.json new file mode 100644 index 00000000..dc492023 --- /dev/null +++ b/site/versioned_sidebars/version-4.3-sidebars.json @@ -0,0 +1,50 @@ +{ + "docsSidebar": [ + { + "type": "doc", + "id": "index", + "label": "Harper Docs" + }, + "getting-started", + { + "type": "category", + "label": "Developers", + "items": [ + { + "type": "autogenerated", + "dirName": "developers" + } + ] + }, + { + "type": "category", + "label": "Administration", + "items": [ + { + "type": "autogenerated", + "dirName": "administration" + } + ] + }, + { + "type": "category", + "label": "Deployments", + "items": [ + { + "type": "autogenerated", + "dirName": "deployments" + } + ] + }, + { + "type": "category", + "label": "Technical Details", + "items": [ + { + "type": "autogenerated", + "dirName": "technical-details" + } + ] + } + ] +} \ No newline at end of file diff --git a/site/versioned_sidebars/version-4.4-sidebars.json b/site/versioned_sidebars/version-4.4-sidebars.json new file mode 100644 index 00000000..dc492023 --- /dev/null +++ b/site/versioned_sidebars/version-4.4-sidebars.json @@ -0,0 +1,50 @@ +{ + "docsSidebar": [ + { + "type": "doc", + "id": "index", + "label": "Harper Docs" + }, + "getting-started", + { + "type": "category", + "label": "Developers", + "items": [ + { + "type": "autogenerated", + "dirName": "developers" + } + ] + }, + { + "type": "category", + "label": "Administration", + "items": [ + { + "type": "autogenerated", + "dirName": "administration" + } + ] + }, + { + "type": "category", + "label": "Deployments", + "items": [ + { + "type": "autogenerated", + "dirName": "deployments" + } + ] + }, + { + "type": "category", + "label": "Technical Details", + "items": [ + { + "type": "autogenerated", + "dirName": "technical-details" + } + ] + } + ] +} \ No newline at end of file diff --git a/site/versioned_sidebars/version-4.5-sidebars.json b/site/versioned_sidebars/version-4.5-sidebars.json new file mode 100644 index 00000000..87f86c81 --- /dev/null +++ b/site/versioned_sidebars/version-4.5-sidebars.json @@ -0,0 +1,59 @@ +{ + "docsSidebar": [ + { + "type": "doc", + "id": "index", + "label": "Harper Docs" + }, + { + "type": "category", + "label": "Getting Started", + "items": [ + { + "type": "autogenerated", + "dirName": "getting-started" + } + ] + }, + { + "type": "category", + "label": "Developers", + "items": [ + { + "type": "autogenerated", + "dirName": "developers" + } + ] + }, + { + "type": "category", + "label": "Administration", + "items": [ + { + "type": "autogenerated", + "dirName": "administration" + } + ] + }, + { + "type": "category", + "label": "Deployments", + "items": [ + { + "type": "autogenerated", + "dirName": "deployments" + } + ] + }, + { + "type": "category", + "label": "Technical Details", + "items": [ + { + "type": "autogenerated", + "dirName": "technical-details" + } + ] + } + ] +} \ No newline at end of file diff --git a/site/versioned_sidebars/version-4.6-sidebars.json b/site/versioned_sidebars/version-4.6-sidebars.json new file mode 100644 index 00000000..87f86c81 --- /dev/null +++ b/site/versioned_sidebars/version-4.6-sidebars.json @@ -0,0 +1,59 @@ +{ + "docsSidebar": [ + { + "type": "doc", + "id": "index", + "label": "Harper Docs" + }, + { + "type": "category", + "label": "Getting Started", + "items": [ + { + "type": "autogenerated", + "dirName": "getting-started" + } + ] + }, + { + "type": "category", + "label": "Developers", + "items": [ + { + "type": "autogenerated", + "dirName": "developers" + } + ] + }, + { + "type": "category", + "label": "Administration", + "items": [ + { + "type": "autogenerated", + "dirName": "administration" + } + ] + }, + { + "type": "category", + "label": "Deployments", + "items": [ + { + "type": "autogenerated", + "dirName": "deployments" + } + ] + }, + { + "type": "category", + "label": "Technical Details", + "items": [ + { + "type": "autogenerated", + "dirName": "technical-details" + } + ] + } + ] +} \ No newline at end of file diff --git a/site/versions.json b/site/versions.json new file mode 100644 index 00000000..9e930d7d --- /dev/null +++ b/site/versions.json @@ -0,0 +1,8 @@ +[ + "4.6", + "4.5", + "4.4", + "4.3", + "4.2", + "4.1" +] \ No newline at end of file